Merge "Fix integer overflow abort"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index bbb0028..361686c 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -59,6 +59,14 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libaudiopolicymanager.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libaudiopolicyservice_intermediates)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libaudiopolicymanager_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/liboboe.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib64/liboboe.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/liboboe*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/liboboe*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj_arm/STATIC_LIBRARIES/liboboe*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj_arm/SHARED_LIBRARIES/liboboe*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/bin/mediacodec)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/init/mediacodec.rc)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libeffects.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib64/libeffects.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libeffects_intermediates)
@@ -69,8 +77,6 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/soundfx/libreverbwrapper.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/soundfx/libbundlewrapper.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/soundfx/libaudiopreprocessing.so)
-$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libeffects.so)
-$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib64/libeffects.so)
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index bf9904c..c6c35ef 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -240,6 +240,14 @@
c->releaseRecordingFrameHandle(handle);
}
+void Camera::releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*> handles) {
+ ALOGV("releaseRecordingFrameHandleBatch");
+ sp <::android::hardware::ICamera> c = mCamera;
+ if (c == 0) return;
+ c->releaseRecordingFrameHandleBatch(handles);
+}
+
// get preview state
bool Camera::previewEnabled()
{
@@ -418,6 +426,37 @@
}
}
+void Camera::recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles)
+{
+ // If recording proxy listener is registered, forward the frame and return.
+ // The other listener (mListener) is ignored because the receiver needs to
+ // call releaseRecordingFrameHandle.
+ sp<ICameraRecordingProxyListener> proxylistener;
+ {
+ Mutex::Autolock _l(mLock);
+ proxylistener = mRecordingProxyListener;
+ }
+ if (proxylistener != NULL) {
+ proxylistener->recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+ return;
+ }
+
+ sp<CameraListener> listener;
+ {
+ Mutex::Autolock _l(mLock);
+ listener = mListener;
+ }
+
+ if (listener != NULL) {
+ listener->postRecordingFrameHandleTimestampBatch(timestamps, handles);
+ } else {
+ ALOGW("No listener was set. Drop a batch of recording frames.");
+ releaseRecordingFrameHandleBatch(handles);
+ }
+}
+
sp<ICameraRecordingProxy> Camera::getRecordingProxy() {
ALOGV("getProxy");
return new RecordingProxy(this);
@@ -448,6 +487,12 @@
mCamera->releaseRecordingFrameHandle(handle);
}
+void Camera::RecordingProxy::releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles) {
+ ALOGV("RecordingProxy::releaseRecordingFrameHandleBatch");
+ mCamera->releaseRecordingFrameHandleBatch(handles);
+}
+
Camera::RecordingProxy::RecordingProxy(const sp<Camera>& camera)
{
mCamera = camera;
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 2d291a8..c53e6c3 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -39,7 +39,7 @@
namespace hardware {
-status_t CameraInfo::writeToParcel(Parcel* parcel) const {
+status_t CameraInfo::writeToParcel(android::Parcel* parcel) const {
status_t res;
res = parcel->writeInt32(facing);
if (res != OK) return res;
@@ -47,7 +47,7 @@
return res;
}
-status_t CameraInfo::readFromParcel(const Parcel* parcel) {
+status_t CameraInfo::readFromParcel(const android::Parcel* parcel) {
status_t res;
res = parcel->readInt32(&facing);
if (res != OK) return res;
@@ -55,8 +55,26 @@
return res;
}
+status_t CameraStatus::writeToParcel(android::Parcel* parcel) const {
+ auto res = parcel->writeString16(String16(cameraId));
+ if (res != OK) return res;
+
+ res = parcel->writeInt32(status);
+ return res;
}
+status_t CameraStatus::readFromParcel(const android::Parcel* parcel) {
+ String16 tempCameraId;
+ auto res = parcel->readString16(&tempCameraId);
+ if (res != OK) return res;
+ cameraId = String8(tempCameraId);
+
+ res = parcel->readInt32(&status);
+ return res;
+}
+
+} // namespace hardware
+
namespace {
sp<::android::hardware::ICameraService> gCameraService;
const int kCameraServicePollDelay = 500000; // 0.5s
@@ -239,24 +257,6 @@
return res.isOk() ? OK : res.serviceSpecificErrorCode();
}
-template <typename TCam, typename TCamTraits>
-status_t CameraBase<TCam, TCamTraits>::addServiceListener(
- const sp<::android::hardware::ICameraServiceListener>& listener) {
- const sp<::android::hardware::ICameraService>& cs = getCameraService();
- if (cs == 0) return UNKNOWN_ERROR;
- binder::Status res = cs->addListener(listener);
- return res.isOk() ? OK : res.serviceSpecificErrorCode();
-}
-
-template <typename TCam, typename TCamTraits>
-status_t CameraBase<TCam, TCamTraits>::removeServiceListener(
- const sp<::android::hardware::ICameraServiceListener>& listener) {
- const sp<::android::hardware::ICameraService>& cs = getCameraService();
- if (cs == 0) return UNKNOWN_ERROR;
- binder::Status res = cs->removeListener(listener);
- return res.isOk() ? OK : res.serviceSpecificErrorCode();
-}
-
template class CameraBase<Camera>;
} // namespace android
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 373b94e..e143e05 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -170,7 +170,7 @@
}
status_t CameraMetadata::checkType(uint32_t tag, uint8_t expectedType) {
- int tagType = get_camera_metadata_tag_type(tag);
+ int tagType = get_local_camera_metadata_tag_type(tag, mBuffer);
if ( CC_UNLIKELY(tagType == -1)) {
ALOGE("Update metadata entry: Unknown tag %d", tag);
return INVALID_OPERATION;
@@ -178,7 +178,7 @@
if ( CC_UNLIKELY(tagType != expectedType) ) {
ALOGE("Mismatched tag type when updating entry %s (%d) of type %s; "
"got type %s data instead ",
- get_camera_metadata_tag_name(tag), tag,
+ get_local_camera_metadata_tag_name(tag, mBuffer), tag,
camera_metadata_type_names[tagType],
camera_metadata_type_names[expectedType]);
return INVALID_OPERATION;
@@ -297,7 +297,7 @@
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
- int type = get_camera_metadata_tag_type(tag);
+ int type = get_local_camera_metadata_tag_type(tag, mBuffer);
if (type == -1) {
ALOGE("%s: Tag %d not found", __FUNCTION__, tag);
return BAD_VALUE;
@@ -332,8 +332,9 @@
if (res != OK) {
ALOGE("%s: Unable to update metadata entry %s.%s (%x): %s (%d)",
- __FUNCTION__, get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+ __FUNCTION__, get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer), tag,
+ strerror(-res), res);
}
IF_ALOGV() {
@@ -392,16 +393,18 @@
} else if (res != OK) {
ALOGE("%s: Error looking for entry %s.%s (%x): %s %d",
__FUNCTION__,
- get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+ get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer),
+ tag, strerror(-res), res);
return res;
}
res = delete_camera_metadata_entry(mBuffer, entry.index);
if (res != OK) {
ALOGE("%s: Error deleting entry %s.%s (%x): %s %d",
__FUNCTION__,
- get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+ get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer),
+ tag, strerror(-res), res);
}
return res;
}
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index 0a447e7..e6c0d00 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -26,7 +26,7 @@
return requestId >= 0;
}
-status_t CaptureResultExtras::readFromParcel(const Parcel *parcel) {
+status_t CaptureResultExtras::readFromParcel(const android::Parcel *parcel) {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
@@ -43,7 +43,7 @@
return OK;
}
-status_t CaptureResultExtras::writeToParcel(Parcel *parcel) const {
+status_t CaptureResultExtras::writeToParcel(android::Parcel *parcel) const {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
@@ -69,7 +69,7 @@
mMetadata = otherResult.mMetadata;
}
-status_t CaptureResult::readFromParcel(Parcel *parcel) {
+status_t CaptureResult::readFromParcel(android::Parcel *parcel) {
ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
@@ -100,7 +100,7 @@
return OK;
}
-status_t CaptureResult::writeToParcel(Parcel *parcel) const {
+status_t CaptureResult::writeToParcel(android::Parcel *parcel) const {
ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 2bf956d..f0945c7 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -55,6 +55,7 @@
SET_VIDEO_BUFFER_MODE,
SET_VIDEO_BUFFER_TARGET,
RELEASE_RECORDING_FRAME_HANDLE,
+ RELEASE_RECORDING_FRAME_HANDLE_BATCH,
};
class BpCamera: public BpInterface<ICamera>
@@ -172,6 +173,24 @@
native_handle_delete(handle);
}
+ void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+ ALOGV("releaseRecordingFrameHandleBatch");
+ Parcel data, reply;
+ data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+ uint32_t n = handles.size();
+ data.writeUint32(n);
+ for (auto& handle : handles) {
+ data.writeNativeHandle(handle);
+ }
+ remote()->transact(RELEASE_RECORDING_FRAME_HANDLE_BATCH, data, &reply);
+
+ // Close the native handle because camera received a dup copy.
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ }
+
status_t setVideoBufferMode(int32_t videoBufferMode)
{
ALOGV("setVideoBufferMode: %d", videoBufferMode);
@@ -378,6 +397,19 @@
releaseRecordingFrameHandle(data.readNativeHandle());
return NO_ERROR;
} break;
+ case RELEASE_RECORDING_FRAME_HANDLE_BATCH: {
+ ALOGV("RELEASE_RECORDING_FRAME_HANDLE_BATCH");
+ CHECK_INTERFACE(ICamera, data, reply);
+ // releaseRecordingFrameHandle will be responsble to close the native handle.
+ uint32_t n = data.readUint32();
+ std::vector<native_handle_t*> handles;
+ handles.reserve(n);
+ for (uint32_t i = 0; i < n; i++) {
+ handles.push_back(data.readNativeHandle());
+ }
+ releaseRecordingFrameHandleBatch(handles);
+ return NO_ERROR;
+ } break;
case SET_VIDEO_BUFFER_MODE: {
ALOGV("SET_VIDEO_BUFFER_MODE");
CHECK_INTERFACE(ICamera, data, reply);
diff --git a/camera/ICameraClient.cpp b/camera/ICameraClient.cpp
index 1b6fac4..7e6297c 100644
--- a/camera/ICameraClient.cpp
+++ b/camera/ICameraClient.cpp
@@ -32,6 +32,7 @@
DATA_CALLBACK,
DATA_CALLBACK_TIMESTAMP,
RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
+ RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH,
};
class BpCameraClient: public BpInterface<ICameraClient>
@@ -91,6 +92,29 @@
remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP, data, &reply,
IBinder::FLAG_ONEWAY);
}
+
+ void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) {
+ ALOGV("recordingFrameHandleCallbackTimestampBatch");
+ Parcel data, reply;
+ data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
+ uint32_t n = timestamps.size();
+ if (n != handles.size()) {
+ ALOGE("%s: size of timestamps(%zu) and handles(%zu) mismatch!",
+ __FUNCTION__, timestamps.size(), handles.size());
+ return;
+ }
+ data.writeUint32(n);
+ for (auto ts : timestamps) {
+ data.writeInt64(ts);
+ }
+ for (auto& handle : handles) {
+ data.writeNativeHandle(handle);
+ }
+ remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH, data, &reply,
+ IBinder::FLAG_ONEWAY);
+ }
};
IMPLEMENT_META_INTERFACE(CameraClient, "android.hardware.ICameraClient");
@@ -154,6 +178,41 @@
recordingFrameHandleCallbackTimestamp(timestamp, handle);
return NO_ERROR;
} break;
+ case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH: {
+ ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH");
+ CHECK_INTERFACE(ICameraClient, data, reply);
+ uint32_t n = 0;
+ status_t res = data.readUint32(&n);
+ if (res != OK) {
+ ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ std::vector<nsecs_t> timestamps;
+ std::vector<native_handle_t*> handles;
+ timestamps.reserve(n);
+ handles.reserve(n);
+ for (uint32_t i = 0; i < n; i++) {
+ res = data.readInt64(×tamps[i]);
+ if (res != OK) {
+ ALOGE("%s: Failed to read timestamp[%d]: %s (%d)",
+ __FUNCTION__, i, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ }
+ for (uint32_t i = 0; i < n; i++) {
+ native_handle_t* handle = data.readNativeHandle();
+ if (handle == nullptr) {
+ ALOGE("%s: Received a null native handle at handles[%d]",
+ __FUNCTION__, i);
+ return BAD_VALUE;
+ }
+ handles.push_back(handle);
+ }
+
+ // The native handle will be freed in BpCamera::releaseRecordingFrameHandleBatch.
+ recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/camera/ICameraRecordingProxy.cpp b/camera/ICameraRecordingProxy.cpp
index c9f8b5c..bd6af75 100644
--- a/camera/ICameraRecordingProxy.cpp
+++ b/camera/ICameraRecordingProxy.cpp
@@ -32,6 +32,7 @@
STOP_RECORDING,
RELEASE_RECORDING_FRAME,
RELEASE_RECORDING_FRAME_HANDLE,
+ RELEASE_RECORDING_FRAME_HANDLE_BATCH,
};
@@ -82,6 +83,24 @@
native_handle_close(handle);
native_handle_delete(handle);
}
+
+ void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+ ALOGV("releaseRecordingFrameHandleBatch");
+ Parcel data, reply;
+ data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
+ uint32_t n = handles.size();
+ data.writeUint32(n);
+ for (auto& handle : handles) {
+ data.writeNativeHandle(handle);
+ }
+ remote()->transact(RELEASE_RECORDING_FRAME_HANDLE_BATCH, data, &reply);
+
+ // Close the native handle because camera received a dup copy.
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ }
};
IMPLEMENT_META_INTERFACE(CameraRecordingProxy, "android.hardware.ICameraRecordingProxy");
@@ -121,6 +140,31 @@
releaseRecordingFrameHandle(data.readNativeHandle());
return NO_ERROR;
} break;
+ case RELEASE_RECORDING_FRAME_HANDLE_BATCH: {
+ ALOGV("RELEASE_RECORDING_FRAME_HANDLE_BATCH");
+ CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
+ uint32_t n = 0;
+ status_t res = data.readUint32(&n);
+ if (res != OK) {
+ ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ std::vector<native_handle_t*> handles;
+ handles.reserve(n);
+ for (uint32_t i = 0; i < n; i++) {
+ native_handle_t* handle = data.readNativeHandle();
+ if (handle == nullptr) {
+ ALOGE("%s: Received a null native handle at handles[%d]",
+ __FUNCTION__, i);
+ return BAD_VALUE;
+ }
+ handles.push_back(handle);
+ }
+
+ // releaseRecordingFrameHandleBatch will be responsble to close the native handle.
+ releaseRecordingFrameHandleBatch(handles);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/camera/ICameraRecordingProxyListener.cpp b/camera/ICameraRecordingProxyListener.cpp
index 8529d3e..c954241 100644
--- a/camera/ICameraRecordingProxyListener.cpp
+++ b/camera/ICameraRecordingProxyListener.cpp
@@ -28,6 +28,7 @@
enum {
DATA_CALLBACK_TIMESTAMP = IBinder::FIRST_CALL_TRANSACTION,
RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
+ RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH
};
class BpCameraRecordingProxyListener: public BpInterface<ICameraRecordingProxyListener>
@@ -62,6 +63,36 @@
native_handle_close(handle);
native_handle_delete(handle);
}
+
+ void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) {
+ ALOGV("recordingFrameHandleCallbackTimestampBatch");
+ Parcel data, reply;
+ data.writeInterfaceToken(ICameraRecordingProxyListener::getInterfaceDescriptor());
+
+ uint32_t n = timestamps.size();
+ if (n != handles.size()) {
+ ALOGE("%s: size of timestamps(%zu) and handles(%zu) mismatch!",
+ __FUNCTION__, timestamps.size(), handles.size());
+ return;
+ }
+ data.writeUint32(n);
+ for (auto ts : timestamps) {
+ data.writeInt64(ts);
+ }
+ for (auto& handle : handles) {
+ data.writeNativeHandle(handle);
+ }
+ remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH, data, &reply,
+ IBinder::FLAG_ONEWAY);
+
+ // The native handle is dupped in ICameraClient so we need to free it here.
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ }
};
IMPLEMENT_META_INTERFACE(CameraRecordingProxyListener, "android.hardware.ICameraRecordingProxyListener");
@@ -101,6 +132,41 @@
recordingFrameHandleCallbackTimestamp(timestamp, handle);
return NO_ERROR;
} break;
+ case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH: {
+ ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH");
+ CHECK_INTERFACE(ICameraRecordingProxyListener, data, reply);
+ uint32_t n = 0;
+ status_t res = data.readUint32(&n);
+ if (res != OK) {
+ ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ std::vector<nsecs_t> timestamps;
+ std::vector<native_handle_t*> handles;
+ timestamps.reserve(n);
+ handles.reserve(n);
+ for (uint32_t i = 0; i < n; i++) {
+ res = data.readInt64(×tamps[i]);
+ if (res != OK) {
+ ALOGE("%s: Failed to read timestamp[%d]: %s (%d)",
+ __FUNCTION__, i, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ }
+ for (uint32_t i = 0; i < n; i++) {
+ native_handle_t* handle = data.readNativeHandle();
+ if (handle == nullptr) {
+ ALOGE("%s: Received a null native handle at handles[%d]",
+ __FUNCTION__, i);
+ return BAD_VALUE;
+ }
+ handles.push_back(handle);
+ }
+ // The native handle will be freed in
+ // BpCameraRecordingProxy::releaseRecordingFrameHandleBatch.
+ recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index f3b3dbb..4c28789 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -29,6 +29,7 @@
#include <stdio.h>
#include <string.h>
+#include <inttypes.h>
namespace android {
@@ -40,11 +41,22 @@
static const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* v, uint32_t tag);
static int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* v, uint32_t tag);
+static int vendor_tag_descriptor_cache_get_tag_count(metadata_vendor_id_t id);
+static void vendor_tag_descriptor_cache_get_all_tags(uint32_t* tagArray,
+ metadata_vendor_id_t id);
+static const char* vendor_tag_descriptor_cache_get_section_name(uint32_t tag,
+ metadata_vendor_id_t id);
+static const char* vendor_tag_descriptor_cache_get_tag_name(uint32_t tag,
+ metadata_vendor_id_t id);
+static int vendor_tag_descriptor_cache_get_tag_type(uint32_t tag,
+ metadata_vendor_id_t id);
+
} /* extern "C" */
static Mutex sLock;
static sp<VendorTagDescriptor> sGlobalVendorTagDescriptor;
+static sp<VendorTagDescriptorCache> sGlobalVendorTagDescriptorCache;
namespace hardware {
namespace camera2 {
@@ -96,7 +108,7 @@
mVendorOps = src.mVendorOps;
}
-status_t VendorTagDescriptor::readFromParcel(const Parcel* parcel) {
+status_t VendorTagDescriptor::readFromParcel(const android::Parcel* parcel) {
status_t res = OK;
if (parcel == NULL) {
ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
@@ -244,7 +256,7 @@
return mTagToTypeMap.valueFor(tag);
}
-status_t VendorTagDescriptor::writeToParcel(Parcel* parcel) const {
+status_t VendorTagDescriptor::writeToParcel(android::Parcel* parcel) const {
status_t res = OK;
if (parcel == NULL) {
ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
@@ -333,11 +345,166 @@
}
+status_t VendorTagDescriptorCache::writeToParcel(Parcel* parcel) const {
+ status_t res = OK;
+ if (parcel == NULL) {
+ ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if ((res = parcel->writeInt32(mVendorMap.size())) != OK) {
+ return res;
+ }
+
+ for (const auto &iter : mVendorMap) {
+ if ((res = parcel->writeUint64(iter.first)) != OK) break;
+ if ((res = parcel->writeParcelable(*iter.second)) != OK) break;
+ }
+
+ return res;
+}
+
+
+status_t VendorTagDescriptorCache::readFromParcel(const Parcel* parcel) {
+ status_t res = OK;
+ if (parcel == NULL) {
+ ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ int32_t vendorCount = 0;
+ if ((res = parcel->readInt32(&vendorCount)) != OK) {
+ ALOGE("%s: could not read vendor count from parcel", __FUNCTION__);
+ return res;
+ }
+
+ if (vendorCount < 0 || vendorCount > INT32_MAX) {
+ ALOGE("%s: vendor count %d from is invalid.", __FUNCTION__, vendorCount);
+ return BAD_VALUE;
+ }
+
+ metadata_vendor_id_t id;
+ for (int32_t i = 0; i < vendorCount; i++) {
+ if ((res = parcel->readUint64(&id)) != OK) {
+ ALOGE("%s: could not read vendor id from parcel for index %d",
+ __FUNCTION__, i);
+ break;
+ }
+ sp<android::VendorTagDescriptor> desc = new android::VendorTagDescriptor();
+ if ((res = parcel->readParcelable(desc.get())) != OK) {
+ ALOGE("%s: could not read vendor tag descriptor from parcel for index %d rc = %d",
+ __FUNCTION__, i, res);
+ break;
+ }
+
+ if ((res = addVendorDescriptor(id, desc)) != OK) {
+ ALOGE("%s: failed to add vendor tag descriptor for index: %d ",
+ __FUNCTION__, i);
+ break;
+ }
+ }
+
+ return res;
+}
+
+int VendorTagDescriptorCache::getTagCount(metadata_vendor_id_t id) const {
+ int ret = 0;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagCount();
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+void VendorTagDescriptorCache::getTagArray(uint32_t* tagArray,
+ metadata_vendor_id_t id) const {
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ desc->second->getTagArray(tagArray);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+}
+
+const char* VendorTagDescriptorCache::getSectionName(uint32_t tag,
+ metadata_vendor_id_t id) const {
+ const char *ret = nullptr;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getSectionName(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+const char* VendorTagDescriptorCache::getTagName(uint32_t tag,
+ metadata_vendor_id_t id) const {
+ const char *ret = nullptr;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagName(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+int VendorTagDescriptorCache::getTagType(uint32_t tag,
+ metadata_vendor_id_t id) const {
+ int ret = 0;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagType(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+void VendorTagDescriptorCache::dump(int fd, int verbosity,
+ int indentation) const {
+ for (const auto &desc : mVendorMap) {
+ dprintf(fd, "%*sDumping vendor tag descriptors for vendor with"
+ " id %" PRIu64 " \n", indentation, "", desc.first);
+ desc.second->dump(fd, verbosity, indentation);
+ }
+}
+
+int32_t VendorTagDescriptorCache::addVendorDescriptor(metadata_vendor_id_t id,
+ sp<android::VendorTagDescriptor> desc) {
+ auto entry = mVendorMap.find(id);
+ if (entry != mVendorMap.end()) {
+ ALOGE("%s: Vendor descriptor with same id already present!", __func__);
+ return BAD_VALUE;
+ }
+
+ mVendorMap.emplace(id, desc);
+ return NO_ERROR;
+}
+
+int32_t VendorTagDescriptorCache::getVendorTagDescriptor(
+ metadata_vendor_id_t id, sp<android::VendorTagDescriptor> *desc /*out*/) {
+ auto entry = mVendorMap.find(id);
+ if (entry == mVendorMap.end()) {
+ return NAME_NOT_FOUND;
+ }
+
+ *desc = entry->second;
+
+ return NO_ERROR;
+}
+
} // namespace params
} // namespace camera2
} // namespace hardware
-
status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
/*out*/
sp<VendorTagDescriptor>& descriptor) {
@@ -451,6 +618,39 @@
return sGlobalVendorTagDescriptor;
}
+status_t VendorTagDescriptorCache::setAsGlobalVendorTagCache(
+ const sp<VendorTagDescriptorCache>& cache) {
+ status_t res = OK;
+ Mutex::Autolock al(sLock);
+ sGlobalVendorTagDescriptorCache = cache;
+
+ struct vendor_tag_cache_ops* opsPtr = NULL;
+ if (cache != NULL) {
+ opsPtr = &(cache->mVendorCacheOps);
+ opsPtr->get_tag_count = vendor_tag_descriptor_cache_get_tag_count;
+ opsPtr->get_all_tags = vendor_tag_descriptor_cache_get_all_tags;
+ opsPtr->get_section_name = vendor_tag_descriptor_cache_get_section_name;
+ opsPtr->get_tag_name = vendor_tag_descriptor_cache_get_tag_name;
+ opsPtr->get_tag_type = vendor_tag_descriptor_cache_get_tag_type;
+ }
+ if((res = set_camera_metadata_vendor_cache_ops(opsPtr)) != OK) {
+ ALOGE("%s: Could not set vendor tag cache, received error %s (%d)."
+ , __FUNCTION__, strerror(-res), res);
+ }
+ return res;
+}
+
+void VendorTagDescriptorCache::clearGlobalVendorTagCache() {
+ Mutex::Autolock al(sLock);
+ set_camera_metadata_vendor_cache_ops(NULL);
+ sGlobalVendorTagDescriptorCache.clear();
+}
+
+sp<VendorTagDescriptorCache> VendorTagDescriptorCache::getGlobalVendorTagCache() {
+ Mutex::Autolock al(sLock);
+ return sGlobalVendorTagDescriptorCache;
+}
+
extern "C" {
int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* /*v*/) {
@@ -498,5 +698,53 @@
return sGlobalVendorTagDescriptor->getTagType(tag);
}
+int vendor_tag_descriptor_cache_get_tag_count(metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_COUNT_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagCount(id);
+}
+
+void vendor_tag_descriptor_cache_get_all_tags(uint32_t* tagArray,
+ metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ }
+ sGlobalVendorTagDescriptorCache->getTagArray(tagArray, id);
+}
+
+const char* vendor_tag_descriptor_cache_get_section_name(uint32_t tag,
+ metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_SECTION_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getSectionName(tag, id);
+}
+
+const char* vendor_tag_descriptor_cache_get_tag_name(uint32_t tag,
+ metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagName(tag, id);
+}
+
+int vendor_tag_descriptor_cache_get_tag_type(uint32_t tag,
+ metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagType(tag, id);
+}
+
} /* extern "C" */
} /* namespace android */
diff --git a/camera/aidl/android/hardware/CameraStatus.aidl b/camera/aidl/android/hardware/CameraStatus.aidl
new file mode 100644
index 0000000..2089b8b
--- /dev/null
+++ b/camera/aidl/android/hardware/CameraStatus.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/** @hide */
+parcelable CameraStatus cpp_header "camera/CameraBase.h";
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index e94fd0c..9c0f28b 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -21,9 +21,11 @@
import android.hardware.camera2.ICameraDeviceUser;
import android.hardware.camera2.ICameraDeviceCallbacks;
import android.hardware.camera2.params.VendorTagDescriptor;
+import android.hardware.camera2.params.VendorTagDescriptorCache;
import android.hardware.camera2.impl.CameraMetadataNative;
import android.hardware.ICameraServiceListener;
import android.hardware.CameraInfo;
+import android.hardware.CameraStatus;
/**
* Binder interface for the native camera service running in mediaserver.
@@ -83,7 +85,7 @@
* Only supported for device HAL versions >= 3.2
*/
ICameraDeviceUser connectDevice(ICameraDeviceCallbacks callbacks,
- int cameraId,
+ String cameraId,
String opPackageName,
int clientUid);
@@ -102,16 +104,24 @@
int clientUid);
/**
- * Add/remove listeners for changes to camera device and flashlight state
+ * Add listener for changes to camera device and flashlight state.
+ *
+ * Also returns the set of currently-known camera IDs and state of each device.
+ * Adding a listener will trigger the torch status listener to fire for all
+ * devices that have a flash unit
*/
- void addListener(ICameraServiceListener listener);
+ CameraStatus[] addListener(ICameraServiceListener listener);
+
+ /**
+ * Remove listener for changes to camera device and flashlight state.
+ */
void removeListener(ICameraServiceListener listener);
/**
* Read the static camera metadata for a camera device.
* Only supported for device HAL versions >= 3.2
*/
- CameraMetadataNative getCameraCharacteristics(int cameraId);
+ CameraMetadataNative getCameraCharacteristics(String cameraId);
/**
* Read in the vendor tag descriptors from the camera module HAL.
@@ -121,6 +131,14 @@
VendorTagDescriptor getCameraVendorTagDescriptor();
/**
+ * Retrieve the vendor tag descriptor cache which can have multiple vendor
+ * providers.
+ * Intended to be used by the native code of CameraMetadataNative to correctly
+ * interpret camera metadata with vendor tags.
+ */
+ VendorTagDescriptorCache getCameraVendorTagCache();
+
+ /**
* Read the legacy camera1 parameters into a String
*/
String getLegacyParameters(int cameraId);
@@ -132,9 +150,9 @@
const int API_VERSION_2 = 2;
// Determines if a particular API version is supported directly
- boolean supportsCameraApi(int cameraId, int apiVersion);
+ boolean supportsCameraApi(String cameraId, int apiVersion);
- void setTorchMode(String CameraId, boolean enabled, IBinder clientBinder);
+ void setTorchMode(String cameraId, boolean enabled, IBinder clientBinder);
/**
* Notify the camera service of a system event. Should only be called from system_server.
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
index 4e2a8c7..f871ce4 100644
--- a/camera/aidl/android/hardware/ICameraServiceListener.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -40,7 +40,7 @@
*/
// Device physically unplugged
const int STATUS_NOT_PRESENT = 0;
- // Device physically has been plugged in and the camera can be used exlusively
+ // Device physically has been plugged in and the camera can be used exclusively
const int STATUS_PRESENT = 1;
// Device physically has been plugged in but it will not be connect-able until enumeration is
// complete
@@ -51,7 +51,7 @@
// Use to initialize variables only
const int STATUS_UNKNOWN = -1;
- oneway void onStatusChanged(int status, int cameraId);
+ oneway void onStatusChanged(int status, String cameraId);
/**
* The torch mode status of a camera.
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
index 755ec8e..8308095 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
@@ -44,4 +44,5 @@
* @param lastFrameNumber Frame number of the last frame of the streaming request.
*/
oneway void onRepeatingRequestError(in long lastFrameNumber);
+ oneway void onRequestQueueEmpty();
}
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 96ecfa0..0771fc8 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -54,6 +54,22 @@
void beginConfigure();
/**
+ * The standard operating mode for a camera device; all API guarantees are in force
+ */
+ const int NORMAL_MODE = 0;
+
+ /**
+ * High-speed recording mode; only two outputs targeting preview and video recording may be
+ * used, and requests must be batched.
+ */
+ const int CONSTRAINED_HIGH_SPEED_MODE = 1;
+
+ /**
+ * Start of custom vendor modes
+ */
+ const int VENDOR_MODE_START = 0x8000;
+
+ /**
* End the device configuration.
*
* <p>
@@ -61,8 +77,10 @@
* a call to beginConfigure and subsequent createStream/deleteStream calls). This
* must be called before any requests can be submitted.
* <p>
+ * @param operatingMode The kind of session to create; either NORMAL_MODE or
+ * CONSTRAINED_HIGH_SPEED_MODE. Must be a non-negative value.
*/
- void endConfigure(boolean isConstrainedHighSpeed);
+ void endConfigure(int operatingMode);
void deleteStream(int streamId);
@@ -122,5 +140,5 @@
void prepare2(int maxCount, int streamId);
- void setDeferredConfiguration(int streamId, in OutputConfiguration outputConfiguration);
+ void finalizeOutputConfigurations(int streamId, in OutputConfiguration outputConfiguration);
}
diff --git a/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl
new file mode 100644
index 0000000..d212207
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.params;
+
+/** @hide */
+parcelable VendorTagDescriptorCache cpp_header "camera/VendorTagDescriptor.h";
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 0d689a6..0597950 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -23,12 +23,20 @@
#include <binder/Parcel.h>
#include <gui/Surface.h>
+#include <gui/view/Surface.h>
namespace android {
namespace hardware {
namespace camera2 {
-status_t CaptureRequest::readFromParcel(const Parcel* parcel) {
+// These must be in the .cpp (to avoid inlining)
+CaptureRequest::CaptureRequest() = default;
+CaptureRequest::~CaptureRequest() = default;
+CaptureRequest::CaptureRequest(const CaptureRequest& rhs) = default;
+CaptureRequest::CaptureRequest(CaptureRequest&& rhs) noexcept = default;
+
+
+status_t CaptureRequest::readFromParcel(const android::Parcel* parcel) {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
@@ -90,7 +98,7 @@
return OK;
}
-status_t CaptureRequest::writeToParcel(Parcel* parcel) const {
+status_t CaptureRequest::writeToParcel(android::Parcel* parcel) const {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 38e1c01..468a1eb 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -21,8 +21,9 @@
#include <utils/Log.h>
#include <camera/camera2/OutputConfiguration.h>
-#include <gui/Surface.h>
#include <binder/Parcel.h>
+#include <gui/view/Surface.h>
+#include <utils/String8.h>
namespace android {
@@ -30,8 +31,9 @@
const int OutputConfiguration::INVALID_ROTATION = -1;
const int OutputConfiguration::INVALID_SET_ID = -1;
-sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
- return mGbp;
+const std::vector<sp<IGraphicBufferProducer>>&
+ OutputConfiguration::getGraphicBufferProducers() const {
+ return mGbps;
}
int OutputConfiguration::getRotation() const {
@@ -54,21 +56,31 @@
return mHeight;
}
+bool OutputConfiguration::isDeferred() const {
+ return mIsDeferred;
+}
+
+bool OutputConfiguration::isShared() const {
+ return mIsShared;
+}
+
OutputConfiguration::OutputConfiguration() :
mRotation(INVALID_ROTATION),
mSurfaceSetID(INVALID_SET_ID),
mSurfaceType(SURFACE_TYPE_UNKNOWN),
mWidth(0),
- mHeight(0) {
+ mHeight(0),
+ mIsDeferred(false),
+ mIsShared(false) {
}
-OutputConfiguration::OutputConfiguration(const Parcel& parcel) :
+OutputConfiguration::OutputConfiguration(const android::Parcel& parcel) :
mRotation(INVALID_ROTATION),
mSurfaceSetID(INVALID_SET_ID) {
readFromParcel(&parcel);
}
-status_t OutputConfiguration::readFromParcel(const Parcel* parcel) {
+status_t OutputConfiguration::readFromParcel(const android::Parcel* parcel) {
status_t err = OK;
int rotation = 0;
@@ -103,42 +115,60 @@
return err;
}
- view::Surface surfaceShim;
- if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
- // Read surface failure for deferred surface configuration is expected.
- if (surfaceType == SURFACE_TYPE_SURFACE_VIEW ||
- surfaceType == SURFACE_TYPE_SURFACE_TEXTURE) {
- ALOGV("%s: Get null surface from a deferred surface configuration (%dx%d)",
- __FUNCTION__, width, height);
- err = OK;
- } else {
- ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
- return err;
- }
+ int isDeferred = 0;
+ if ((err = parcel->readInt32(&isDeferred)) != OK) {
+ ALOGE("%s: Failed to read surface isDeferred flag from parcel", __FUNCTION__);
+ return err;
}
- mGbp = surfaceShim.graphicBufferProducer;
+ int isShared = 0;
+ if ((err = parcel->readInt32(&isShared)) != OK) {
+ ALOGE("%s: Failed to read surface isShared flag from parcel", __FUNCTION__);
+ return err;
+ }
+
+ if (isDeferred && surfaceType != SURFACE_TYPE_SURFACE_VIEW &&
+ surfaceType != SURFACE_TYPE_SURFACE_TEXTURE) {
+ ALOGE("%s: Invalid surface type for deferred configuration", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ std::vector<view::Surface> surfaceShims;
+ if ((err = parcel->readParcelableVector(&surfaceShims)) != OK) {
+ ALOGE("%s: Failed to read surface(s) from parcel", __FUNCTION__);
+ return err;
+ }
+
mRotation = rotation;
mSurfaceSetID = setID;
mSurfaceType = surfaceType;
mWidth = width;
mHeight = height;
+ mIsDeferred = isDeferred != 0;
+ mIsShared = isShared != 0;
+ for (auto& surface : surfaceShims) {
+ ALOGV("%s: OutputConfiguration: %p, name %s", __FUNCTION__,
+ surface.graphicBufferProducer.get(),
+ String8(surface.name).string());
+ mGbps.push_back(surface.graphicBufferProducer);
+ }
- ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d,"
- "surfaceType = %d", __FUNCTION__, mGbp.get(), String8(surfaceShim.name).string(),
- mRotation, mSurfaceSetID, mSurfaceType);
+ ALOGV("%s: OutputConfiguration: rotation = %d, setId = %d, surfaceType = %d",
+ __FUNCTION__, mRotation, mSurfaceSetID, mSurfaceType);
return err;
}
OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
int surfaceSetID) {
- mGbp = gbp;
+ mGbps.push_back(gbp);
mRotation = rotation;
mSurfaceSetID = surfaceSetID;
+ mIsDeferred = false;
+ mIsShared = false;
}
-status_t OutputConfiguration::writeToParcel(Parcel* parcel) const {
+status_t OutputConfiguration::writeToParcel(android::Parcel* parcel) const {
if (parcel == nullptr) return BAD_VALUE;
status_t err = OK;
@@ -158,14 +188,56 @@
err = parcel->writeInt32(mHeight);
if (err != OK) return err;
- view::Surface surfaceShim;
- surfaceShim.name = String16("unknown_name"); // name of surface
- surfaceShim.graphicBufferProducer = mGbp;
+ err = parcel->writeInt32(mIsDeferred ? 1 : 0);
+ if (err != OK) return err;
- err = surfaceShim.writeToParcel(parcel);
+ err = parcel->writeInt32(mIsShared ? 1 : 0);
+ if (err != OK) return err;
+
+ std::vector<view::Surface> surfaceShims;
+ for (auto& gbp : mGbps) {
+ view::Surface surfaceShim;
+ surfaceShim.name = String16("unknown_name"); // name of surface
+ surfaceShim.graphicBufferProducer = gbp;
+ surfaceShims.push_back(surfaceShim);
+ }
+ err = parcel->writeParcelableVector(surfaceShims);
if (err != OK) return err;
return OK;
}
+bool OutputConfiguration::gbpsEqual(const OutputConfiguration& other) const {
+ const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+ other.getGraphicBufferProducers();
+
+ if (mGbps.size() != otherGbps.size()) {
+ return false;
+ }
+
+ for (size_t i = 0; i < mGbps.size(); i++) {
+ if (mGbps[i] != otherGbps[i]) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool OutputConfiguration::gbpsLessThan(const OutputConfiguration& other) const {
+ const std::vector<sp<IGraphicBufferProducer> >& otherGbps =
+ other.getGraphicBufferProducers();
+
+ if (mGbps.size() != otherGbps.size()) {
+ return mGbps.size() < otherGbps.size();
+ }
+
+ for (size_t i = 0; i < mGbps.size(); i++) {
+ if (mGbps[i] != otherGbps[i]) {
+ return mGbps[i] < otherGbps[i];
+ }
+ }
+
+ return false;
+}
}; // namespace android
diff --git a/camera/camera2/SubmitInfo.cpp b/camera/camera2/SubmitInfo.cpp
index d739c79..6ebd810 100644
--- a/camera/camera2/SubmitInfo.cpp
+++ b/camera/camera2/SubmitInfo.cpp
@@ -22,7 +22,7 @@
namespace camera2 {
namespace utils {
-status_t SubmitInfo::writeToParcel(Parcel *parcel) const {
+status_t SubmitInfo::writeToParcel(android::Parcel *parcel) const {
status_t res;
if (parcel == nullptr) return BAD_VALUE;
@@ -33,7 +33,7 @@
return res;
}
-status_t SubmitInfo::readFromParcel(const Parcel *parcel) {
+status_t SubmitInfo::readFromParcel(const android::Parcel *parcel) {
status_t res;
if (parcel == nullptr) return BAD_VALUE;
diff --git a/camera/cameraserver/Android.mk b/camera/cameraserver/Android.mk
index 888862a..b8c94e6 100644
--- a/camera/cameraserver/Android.mk
+++ b/camera/cameraserver/Android.mk
@@ -22,9 +22,15 @@
LOCAL_SHARED_LIBRARIES := \
libcameraservice \
liblog \
- libcutils \
libutils \
+ libui \
+ libgui \
libbinder \
+ libhidltransport \
+ android.hardware.camera.common@1.0 \
+ android.hardware.camera.provider@2.4 \
+ android.hardware.camera.device@1.0 \
+ android.hardware.camera.device@3.2
LOCAL_MODULE:= cameraserver
LOCAL_32_BIT_ONLY := true
diff --git a/camera/cameraserver/main_cameraserver.cpp b/camera/cameraserver/main_cameraserver.cpp
index f4be468..3972436 100644
--- a/camera/cameraserver/main_cameraserver.cpp
+++ b/camera/cameraserver/main_cameraserver.cpp
@@ -17,8 +17,8 @@
#define LOG_TAG "cameraserver"
//#define LOG_NDEBUG 0
-// from LOCAL_C_INCLUDES
#include "CameraService.h"
+#include <hidl/HidlTransportSupport.h>
using namespace android;
@@ -26,6 +26,9 @@
{
signal(SIGPIPE, SIG_IGN);
+ // Set 3 threads for HIDL calls
+ hardware::configureRpcThreadpool(3, /*willjoin*/ false);
+
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
diff --git a/camera/include/camera/Camera.h b/camera/include/camera/Camera.h
index 57dc228..430aa1c 100644
--- a/camera/include/camera/Camera.h
+++ b/camera/include/camera/Camera.h
@@ -44,6 +44,9 @@
camera_frame_metadata_t *metadata) = 0;
virtual void postDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) = 0;
virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle) = 0;
+ virtual void postRecordingFrameHandleTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) = 0;
};
class Camera;
@@ -118,6 +121,10 @@
// release a recording frame handle
void releaseRecordingFrameHandle(native_handle_t *handle);
+ // release a batch of recording frame handles
+ void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*> handles);
+
// autoFocus - status returned from callback
status_t autoFocus();
@@ -166,6 +173,10 @@
camera_frame_metadata_t *metadata);
virtual void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle);
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles);
+
class RecordingProxy : public BnCameraRecordingProxy
{
@@ -177,6 +188,8 @@
virtual void stopRecording();
virtual void releaseRecordingFrame(const sp<IMemory>& mem);
virtual void releaseRecordingFrameHandle(native_handle_t* handle);
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles);
private:
sp<Camera> mCamera;
diff --git a/camera/include/camera/CameraBase.h b/camera/include/camera/CameraBase.h
index 41f8621..74a2dce 100644
--- a/camera/include/camera/CameraBase.h
+++ b/camera/include/camera/CameraBase.h
@@ -17,7 +17,10 @@
#ifndef ANDROID_HARDWARE_CAMERA_BASE_H
#define ANDROID_HARDWARE_CAMERA_BASE_H
+#include <android/hardware/ICameraServiceListener.h>
+
#include <utils/Mutex.h>
+#include <binder/BinderService.h>
struct camera_frame_metadata;
@@ -29,6 +32,13 @@
class ICameraService;
class ICameraServiceListener;
+enum {
+ /** The facing of the camera is opposite to that of the screen. */
+ CAMERA_FACING_BACK = 0,
+ /** The facing of the camera is the same as that of the screen. */
+ CAMERA_FACING_FRONT = 1,
+};
+
struct CameraInfo : public android::Parcelable {
/**
* The direction that the camera faces to. It should be CAMERA_FACING_BACK
@@ -50,11 +60,33 @@
*/
int orientation;
- virtual status_t writeToParcel(Parcel* parcel) const;
- virtual status_t readFromParcel(const Parcel* parcel);
+ virtual status_t writeToParcel(android::Parcel* parcel) const;
+ virtual status_t readFromParcel(const android::Parcel* parcel);
};
+/**
+ * Basic status information about a camera device - its name and its current
+ * state.
+ */
+struct CameraStatus : public android::Parcelable {
+ /**
+ * The name of the camera device
+ */
+ String8 cameraId;
+
+ /**
+ * Its current status, one of the ICameraService::STATUS_* fields
+ */
+ int32_t status;
+
+ virtual status_t writeToParcel(android::Parcel* parcel) const;
+ virtual status_t readFromParcel(const android::Parcel* parcel);
+
+ CameraStatus(String8 id, int32_t s) : cameraId(id), status(s) {}
+ CameraStatus() : status(ICameraServiceListener::STATUS_PRESENT) {}
+};
+
} // namespace hardware
using hardware::CameraInfo;
@@ -86,12 +118,6 @@
/*out*/
struct hardware::CameraInfo* cameraInfo);
- static status_t addServiceListener(
- const sp<::android::hardware::ICameraServiceListener>& listener);
-
- static status_t removeServiceListener(
- const sp<::android::hardware::ICameraServiceListener>& listener);
-
sp<TCamUser> remote();
// Status is set to 'UNKNOWN_ERROR' after successful (re)connection
diff --git a/camera/include/camera/CaptureResult.h b/camera/include/camera/CaptureResult.h
index 45e4518..917d953 100644
--- a/camera/include/camera/CaptureResult.h
+++ b/camera/include/camera/CaptureResult.h
@@ -88,8 +88,8 @@
*/
bool isValid();
- virtual status_t readFromParcel(const Parcel* parcel) override;
- virtual status_t writeToParcel(Parcel* parcel) const override;
+ virtual status_t readFromParcel(const android::Parcel* parcel) override;
+ virtual status_t writeToParcel(android::Parcel* parcel) const override;
};
} // namespace impl
} // namespace camera2
@@ -105,8 +105,8 @@
CaptureResult(const CaptureResult& otherResult);
- status_t readFromParcel(Parcel* parcel);
- status_t writeToParcel(Parcel* parcel) const;
+ status_t readFromParcel(android::Parcel* parcel);
+ status_t writeToParcel(android::Parcel* parcel) const;
};
}
diff --git a/camera/include/camera/ICameraRecordingProxy.h b/camera/include/camera/ICameraRecordingProxy.h
index cb6824a..02af2f3 100644
--- a/camera/include/camera/ICameraRecordingProxy.h
+++ b/camera/include/camera/ICameraRecordingProxy.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
#define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
+#include <vector>
#include <binder/IInterface.h>
#include <cutils/native_handle.h>
#include <utils/RefBase.h>
@@ -85,6 +86,8 @@
virtual void stopRecording() = 0;
virtual void releaseRecordingFrame(const sp<IMemory>& mem) = 0;
virtual void releaseRecordingFrameHandle(native_handle_t *handle) = 0;
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/camera/include/camera/ICameraRecordingProxyListener.h b/camera/include/camera/ICameraRecordingProxyListener.h
index 1fee5b9..da03c56 100644
--- a/camera/include/camera/ICameraRecordingProxyListener.h
+++ b/camera/include/camera/ICameraRecordingProxyListener.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
#define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
+#include <vector>
#include <binder/IInterface.h>
#include <cutils/native_handle.h>
#include <stdint.h>
@@ -38,6 +39,10 @@
virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
native_handle_t* handle) = 0;
+
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/camera/include/camera/VendorTagDescriptor.h b/camera/include/camera/VendorTagDescriptor.h
index bfc8c96..904fba2 100644
--- a/camera/include/camera/VendorTagDescriptor.h
+++ b/camera/include/camera/VendorTagDescriptor.h
@@ -22,7 +22,7 @@
#include <utils/String8.h>
#include <utils/RefBase.h>
#include <system/camera_vendor_tags.h>
-
+#include <unordered_map>
#include <stdint.h>
namespace android {
@@ -76,7 +76,7 @@
*/
virtual status_t writeToParcel(
/*out*/
- Parcel* parcel) const override;
+ android::Parcel* parcel) const override;
/**
* Convenience method to get a vector containing all vendor tag
@@ -103,7 +103,7 @@
*
* Returns OK on success, or a negative error code.
*/
- virtual status_t readFromParcel(const Parcel* parcel) override;
+ virtual status_t readFromParcel(const android::Parcel* parcel) override;
protected:
KeyedVector<String8, KeyedVector<String8, uint32_t>*> mReverseMapping;
@@ -166,8 +166,84 @@
};
-} /* namespace android */
+namespace hardware {
+namespace camera2 {
+namespace params {
+class VendorTagDescriptorCache : public Parcelable {
+ public:
+
+ VendorTagDescriptorCache() {};
+
+ int32_t addVendorDescriptor(metadata_vendor_id_t id,
+ sp<android::VendorTagDescriptor> desc);
+
+ int32_t getVendorTagDescriptor(
+ metadata_vendor_id_t id,
+ sp<android::VendorTagDescriptor> *desc /*out*/);
+
+ // Parcelable interface
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+ // Returns the number of vendor tags defined.
+ int getTagCount(metadata_vendor_id_t id) const;
+
+ // Returns an array containing the id's of vendor tags defined.
+ void getTagArray(uint32_t* tagArray, metadata_vendor_id_t id) const;
+
+ // Returns the section name string for a given vendor tag id.
+ const char* getSectionName(uint32_t tag, metadata_vendor_id_t id) const;
+
+ // Returns the tag name string for a given vendor tag id.
+ const char* getTagName(uint32_t tag, metadata_vendor_id_t id) const;
+
+ // Returns the tag type for a given vendor tag id.
+ int getTagType(uint32_t tag, metadata_vendor_id_t id) const;
+
+ /**
+ * Dump the currently configured vendor tags to a file descriptor.
+ */
+ void dump(int fd, int verbosity, int indentation) const;
+
+ protected:
+ std::unordered_map<metadata_vendor_id_t, sp<android::VendorTagDescriptor>> mVendorMap;
+ struct vendor_tag_cache_ops mVendorCacheOps;
+};
+
+} /* namespace params */
+} /* namespace camera2 */
+} /* namespace hardware */
+
+class VendorTagDescriptorCache :
+ public ::android::hardware::camera2::params::VendorTagDescriptorCache,
+ public LightRefBase<VendorTagDescriptorCache> {
+ public:
+
+ /**
+ * Sets the global vendor tag descriptor cache to use for this process.
+ * Camera metadata operations that access vendor tags will use the
+ * vendor tag definitions set this way.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ static status_t setAsGlobalVendorTagCache(
+ const sp<VendorTagDescriptorCache>& cache);
+
+ /**
+ * Returns the global vendor tag cache used by this process.
+ * This will contain NULL if no vendor tags are defined.
+ */
+ static sp<VendorTagDescriptorCache> getGlobalVendorTagCache();
+
+ /**
+ * Clears the global vendor tag cache used by this process.
+ */
+ static void clearGlobalVendorTagCache();
+
+};
+
+} /* namespace android */
#define VENDOR_TAG_DESCRIPTOR_H
#endif /* VENDOR_TAG_DESCRIPTOR_H */
diff --git a/camera/include/camera/android/hardware/ICamera.h b/camera/include/camera/android/hardware/ICamera.h
index 3b12afe..80823d6 100644
--- a/camera/include/camera/android/hardware/ICamera.h
+++ b/camera/include/camera/android/hardware/ICamera.h
@@ -33,7 +33,7 @@
class ICameraClient;
-class ICamera: public IInterface
+class ICamera: public android::IInterface
{
/**
* Keep up-to-date with ICamera.aidl in frameworks/base
@@ -101,6 +101,11 @@
// ICameraClient::recordingFrameHandleCallbackTimestamp.
virtual void releaseRecordingFrameHandle(native_handle_t *handle) = 0;
+ // Release a batch of recording frame handles that was received via
+ // ICameraClient::recordingFrameHandleCallbackTimestampBatch
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles) = 0;
+
// auto focus
virtual status_t autoFocus() = 0;
@@ -139,7 +144,7 @@
// ----------------------------------------------------------------------------
-class BnCamera: public BnInterface<ICamera>
+class BnCamera: public android::BnInterface<ICamera>
{
public:
virtual status_t onTransact( uint32_t code,
diff --git a/camera/include/camera/android/hardware/ICameraClient.h b/camera/include/camera/android/hardware/ICameraClient.h
index 3f835a9..8e46d17 100644
--- a/camera/include/camera/android/hardware/ICameraClient.h
+++ b/camera/include/camera/android/hardware/ICameraClient.h
@@ -27,7 +27,7 @@
namespace android {
namespace hardware {
-class ICameraClient: public IInterface
+class ICameraClient: public android::IInterface
{
public:
DECLARE_META_INTERFACE(CameraClient);
@@ -41,11 +41,18 @@
// ICamera::releaseRecordingFrameHandle to release the frame handle.
virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
native_handle_t* handle) = 0;
+
+ // Invoked to send a batch of recording frame handles with timestamp. Call
+ // ICamera::releaseRecordingFrameHandleBatch to release the frame handles.
+ // Size of timestamps and handles must match
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) = 0;
};
// ----------------------------------------------------------------------------
-class BnCameraClient: public BnInterface<ICameraClient>
+class BnCameraClient: public android::BnInterface<ICameraClient>
{
public:
virtual status_t onTransact( uint32_t code,
diff --git a/camera/include/camera/camera2/CaptureRequest.h b/camera/include/camera/camera2/CaptureRequest.h
index c989f26..0180183 100644
--- a/camera/include/camera/camera2/CaptureRequest.h
+++ b/camera/include/camera/camera2/CaptureRequest.h
@@ -30,6 +30,16 @@
namespace camera2 {
struct CaptureRequest : public Parcelable {
+
+ // those are needed so we can use a forward declaration of Surface, otherwise
+ // the type is incomplete when the ctor/dtors are generated. This has the added
+ // benefit that ctor/dtors are not inlined, which is good because they're not trivial
+ // (because of the vtable and Vector<>)
+ CaptureRequest();
+ CaptureRequest(const CaptureRequest& rhs);
+ CaptureRequest(CaptureRequest&& rhs) noexcept;
+ virtual ~CaptureRequest();
+
CameraMetadata mMetadata;
Vector<sp<Surface> > mSurfaceList;
bool mIsReprocess;
@@ -37,8 +47,8 @@
/**
* Keep impl up-to-date with CaptureRequest.java in frameworks/base
*/
- status_t readFromParcel(const Parcel* parcel) override;
- status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const android::Parcel* parcel) override;
+ status_t writeToParcel(android::Parcel* parcel) const override;
};
} // namespace camera2
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index cf8f3c6..8e641c7 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -38,18 +38,20 @@
SURFACE_TYPE_SURFACE_VIEW = 0,
SURFACE_TYPE_SURFACE_TEXTURE = 1
};
- sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
+ const std::vector<sp<IGraphicBufferProducer>>& getGraphicBufferProducers() const;
int getRotation() const;
int getSurfaceSetID() const;
int getSurfaceType() const;
int getWidth() const;
int getHeight() const;
+ bool isDeferred() const;
+ bool isShared() const;
/**
* Keep impl up-to-date with OutputConfiguration.java in frameworks/base
*/
- virtual status_t writeToParcel(Parcel* parcel) const override;
+ virtual status_t writeToParcel(android::Parcel* parcel) const override;
- virtual status_t readFromParcel(const Parcel* parcel) override;
+ virtual status_t readFromParcel(const android::Parcel* parcel) override;
// getGraphicBufferProducer will be NULL
// getRotation will be INVALID_ROTATION
@@ -59,25 +61,26 @@
// getGraphicBufferProducer will be NULL if error occurred
// getRotation will be INVALID_ROTATION if error occurred
// getSurfaceSetID will be INVALID_SET_ID if error occurred
- OutputConfiguration(const Parcel& parcel);
+ OutputConfiguration(const android::Parcel& parcel);
OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
int surfaceSetID = INVALID_SET_ID);
bool operator == (const OutputConfiguration& other) const {
- return (mGbp == other.mGbp &&
- mRotation == other.mRotation &&
+ return ( mRotation == other.mRotation &&
mSurfaceSetID == other.mSurfaceSetID &&
mSurfaceType == other.mSurfaceType &&
mWidth == other.mWidth &&
- mHeight == other.mHeight);
+ mHeight == other.mHeight &&
+ mIsDeferred == other.mIsDeferred &&
+ mIsShared == other.mIsShared &&
+ gbpsEqual(other));
}
bool operator != (const OutputConfiguration& other) const {
return !(*this == other);
}
bool operator < (const OutputConfiguration& other) const {
if (*this == other) return false;
- if (mGbp != other.mGbp) return mGbp < other.mGbp;
if (mSurfaceSetID != other.mSurfaceSetID) {
return mSurfaceSetID < other.mSurfaceSetID;
}
@@ -90,22 +93,34 @@
if (mHeight != other.mHeight) {
return mHeight < other.mHeight;
}
-
- return mRotation < other.mRotation;
+ if (mRotation != other.mRotation) {
+ return mRotation < other.mRotation;
+ }
+ if (mIsDeferred != other.mIsDeferred) {
+ return mIsDeferred < other.mIsDeferred;
+ }
+ if (mIsShared != other.mIsShared) {
+ return mIsShared < other.mIsShared;
+ }
+ return gbpsLessThan(other);
}
bool operator > (const OutputConfiguration& other) const {
return (*this != other && !(*this < other));
}
+ bool gbpsEqual(const OutputConfiguration& other) const;
+ bool gbpsLessThan(const OutputConfiguration& other) const;
private:
- sp<IGraphicBufferProducer> mGbp;
+ std::vector<sp<IGraphicBufferProducer>> mGbps;
int mRotation;
int mSurfaceSetID;
int mSurfaceType;
int mWidth;
int mHeight;
+ bool mIsDeferred;
+ bool mIsShared;
// helper function
- static String16 readMaybeEmptyString16(const Parcel* parcel);
+ static String16 readMaybeEmptyString16(const android::Parcel* parcel);
};
} // namespace params
} // namespace camera2
diff --git a/camera/include/camera/camera2/SubmitInfo.h b/camera/include/camera/camera2/SubmitInfo.h
index 3b47b32..8f271c0 100644
--- a/camera/include/camera/camera2/SubmitInfo.h
+++ b/camera/include/camera/camera2/SubmitInfo.h
@@ -31,8 +31,8 @@
int32_t mRequestId;
int64_t mLastFrameNumber;
- virtual status_t writeToParcel(Parcel *parcel) const override;
- virtual status_t readFromParcel(const Parcel* parcel) override;
+ virtual status_t writeToParcel(android::Parcel *parcel) const override;
+ virtual status_t readFromParcel(const android::Parcel* parcel) override;
};
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 7d78e2b..229b159 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -1347,6 +1347,12 @@
}
binder::Status
+CameraDevice::ServiceCallback::onRequestQueueEmpty() {
+ // onRequestQueueEmpty not yet implemented in NDK
+ return binder::Status::ok();
+}
+
+binder::Status
CameraDevice::ServiceCallback::onRepeatingRequestError(int64_t lastFrameNumber) {
binder::Status ret = binder::Status::ok();
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 27c7498..c566cd2 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -74,6 +74,7 @@
binder::Status onResultReceived(const CameraMetadata& metadata,
const CaptureResultExtras& resultExtras) override;
binder::Status onPrepared(int streamId) override;
+ binder::Status onRequestQueueEmpty() override;
binder::Status onRepeatingRequestError(int64_t lastFrameNumber) override;
private:
const wp<CameraDevice> mDevice;
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 35555ff..3f64bcc 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -28,11 +28,6 @@
using namespace android;
-//constants shared between ACameraManager and CameraManagerGlobal
-namespace {
- const int kMaxCameraIdLen = 32;
-}
-
namespace android {
// Static member definitions
const char* CameraManagerGlobal::kCameraIdKey = "CameraId";
@@ -125,17 +120,47 @@
if (mCameraServiceListener == nullptr) {
mCameraServiceListener = new CameraServiceListener(this);
}
- mCameraService->addListener(mCameraServiceListener);
+ std::vector<hardware::CameraStatus> cameraStatuses{};
+ mCameraService->addListener(mCameraServiceListener, &cameraStatuses);
+ for (auto& c : cameraStatuses) {
+ onStatusChangedLocked(c.status, c.cameraId);
+ }
// setup vendor tags
sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
binder::Status ret = mCameraService->getCameraVendorTagDescriptor(/*out*/desc.get());
if (ret.isOk()) {
- status_t err = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
- if (err != OK) {
- ALOGE("%s: Failed to set vendor tag descriptors, received error %s (%d)",
- __FUNCTION__, strerror(-err), err);
+ if (0 < desc->getTagCount()) {
+ status_t err = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+ if (err != OK) {
+ ALOGE("%s: Failed to set vendor tag descriptors, received error %s (%d)",
+ __FUNCTION__, strerror(-err), err);
+ }
+ } else {
+ sp<VendorTagDescriptorCache> cache =
+ new VendorTagDescriptorCache();
+ binder::Status res =
+ mCameraService->getCameraVendorTagCache(
+ /*out*/cache.get());
+ if (res.serviceSpecificErrorCode() ==
+ hardware::ICameraService::ERROR_DISCONNECTED) {
+ // No camera module available, not an error on devices with no cameras
+ VendorTagDescriptorCache::clearGlobalVendorTagCache();
+ } else if (res.isOk()) {
+ status_t err =
+ VendorTagDescriptorCache::setAsGlobalVendorTagCache(
+ cache);
+ if (err != OK) {
+ ALOGE("%s: Failed to set vendor tag cache,"
+ "received error %s (%d)", __FUNCTION__,
+ strerror(-err), err);
+ }
+ } else {
+ VendorTagDescriptorCache::clearGlobalVendorTagCache();
+ ALOGE("%s: Failed to setup vendor tag cache: %s",
+ __FUNCTION__, res.toString8().string());
+ }
}
} else if (ret.serviceSpecificErrorCode() ==
hardware::ICameraService::ERROR_DEPRECATED_HAL) {
@@ -157,8 +182,8 @@
sp<CameraManagerGlobal> cm = mCameraManager.promote();
if (cm != nullptr) {
AutoMutex lock(cm->mLock);
- for (auto pair : cm->mDeviceStatusMap) {
- int32_t cameraId = pair.first;
+ for (auto& pair : cm->mDeviceStatusMap) {
+ const String8 &cameraId = pair.first;
cm->onStatusChangedLocked(
CameraServiceListener::STATUS_NOT_PRESENT, cameraId);
}
@@ -174,8 +199,8 @@
auto pair = mCallbacks.insert(cb);
// Send initial callbacks if callback is newly registered
if (pair.second) {
- for (auto pair : mDeviceStatusMap) {
- int32_t cameraId = pair.first;
+ for (auto& pair : mDeviceStatusMap) {
+ const String8& cameraId = pair.first;
int32_t status = pair.second;
sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
@@ -183,7 +208,7 @@
callback->onCameraAvailable : callback->onCameraUnavailable;
msg->setPointer(kCallbackFpKey, (void *) cb);
msg->setPointer(kContextKey, callback->context);
- msg->setInt32(kCameraIdKey, cameraId);
+ msg->setString(kCameraIdKey, AString(cameraId));
msg->post();
}
}
@@ -196,6 +221,26 @@
mCallbacks.erase(cb);
}
+void CameraManagerGlobal::getCameraIdList(std::vector<String8> *cameraIds) {
+ // Ensure that we have initialized/refreshed the list of available devices
+ auto cs = getCameraService();
+ Mutex::Autolock _l(mLock);
+
+ for(auto& deviceStatus : mDeviceStatusMap) {
+ if (deviceStatus.second == hardware::ICameraServiceListener::STATUS_NOT_PRESENT ||
+ deviceStatus.second == hardware::ICameraServiceListener::STATUS_ENUMERATING) {
+ continue;
+ }
+ bool camera2Support = false;
+ binder::Status serviceRet = cs->supportsCameraApi(String16(deviceStatus.first),
+ hardware::ICameraService::API_VERSION_2, &camera2Support);
+ if (!serviceRet.isOk() || !camera2Support) {
+ continue;
+ }
+ cameraIds->push_back(deviceStatus.first);
+ }
+}
+
bool CameraManagerGlobal::validStatus(int32_t status) {
switch (status) {
case hardware::ICameraServiceListener::STATUS_NOT_PRESENT:
@@ -217,14 +262,6 @@
}
}
-void CameraManagerGlobal::CallbackHandler::sendSingleCallback(
- int32_t cameraId, void* context,
- ACameraManager_AvailabilityCallback cb) const {
- char cameraIdStr[kMaxCameraIdLen];
- snprintf(cameraIdStr, sizeof(cameraIdStr), "%d", cameraId);
- (*cb)(context, cameraIdStr);
-}
-
void CameraManagerGlobal::CallbackHandler::onMessageReceived(
const sp<AMessage> &msg) {
switch (msg->what()) {
@@ -232,7 +269,7 @@
{
ACameraManager_AvailabilityCallback cb;
void* context;
- int32_t cameraId;
+ AString cameraId;
bool found = msg->findPointer(kCallbackFpKey, (void**) &cb);
if (!found) {
ALOGE("%s: Cannot find camera callback fp!", __FUNCTION__);
@@ -243,12 +280,12 @@
ALOGE("%s: Cannot find callback context!", __FUNCTION__);
return;
}
- found = msg->findInt32(kCameraIdKey, &cameraId);
+ found = msg->findString(kCameraIdKey, &cameraId);
if (!found) {
ALOGE("%s: Cannot find camera ID!", __FUNCTION__);
return;
}
- sendSingleCallback(cameraId, context, cb);
+ (*cb)(context, cameraId.c_str());
break;
}
default:
@@ -258,10 +295,10 @@
}
binder::Status CameraManagerGlobal::CameraServiceListener::onStatusChanged(
- int32_t status, int32_t cameraId) {
+ int32_t status, const String16& cameraId) {
sp<CameraManagerGlobal> cm = mCameraManager.promote();
if (cm != nullptr) {
- cm->onStatusChanged(status, cameraId);
+ cm->onStatusChanged(status, String8(cameraId));
} else {
ALOGE("Cannot deliver status change. Global camera manager died");
}
@@ -269,40 +306,40 @@
}
void CameraManagerGlobal::onStatusChanged(
- int32_t status, int32_t cameraId) {
+ int32_t status, const String8& cameraId) {
Mutex::Autolock _l(mLock);
onStatusChangedLocked(status, cameraId);
}
void CameraManagerGlobal::onStatusChangedLocked(
- int32_t status, int32_t cameraId) {
- if (!validStatus(status)) {
- ALOGE("%s: Invalid status %d", __FUNCTION__, status);
- return;
- }
+ int32_t status, const String8& cameraId) {
+ if (!validStatus(status)) {
+ ALOGE("%s: Invalid status %d", __FUNCTION__, status);
+ return;
+ }
- bool firstStatus = (mDeviceStatusMap.count(cameraId) == 0);
- int32_t oldStatus = firstStatus ?
- status : // first status
- mDeviceStatusMap[cameraId];
+ bool firstStatus = (mDeviceStatusMap.count(cameraId) == 0);
+ int32_t oldStatus = firstStatus ?
+ status : // first status
+ mDeviceStatusMap[cameraId];
- if (!firstStatus &&
- isStatusAvailable(status) == isStatusAvailable(oldStatus)) {
- // No status update. No need to send callback
- return;
- }
+ if (!firstStatus &&
+ isStatusAvailable(status) == isStatusAvailable(oldStatus)) {
+ // No status update. No need to send callback
+ return;
+ }
- // Iterate through all registered callbacks
- mDeviceStatusMap[cameraId] = status;
- for (auto cb : mCallbacks) {
- sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
- ACameraManager_AvailabilityCallback cbFp = isStatusAvailable(status) ?
- cb.mAvailable : cb.mUnavailable;
- msg->setPointer(kCallbackFpKey, (void *) cbFp);
- msg->setPointer(kContextKey, cb.mContext);
- msg->setInt32(kCameraIdKey, cameraId);
- msg->post();
- }
+ // Iterate through all registered callbacks
+ mDeviceStatusMap[cameraId] = status;
+ for (auto cb : mCallbacks) {
+ sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
+ ACameraManager_AvailabilityCallback cbFp = isStatusAvailable(status) ?
+ cb.mAvailable : cb.mUnavailable;
+ msg->setPointer(kCallbackFpKey, (void *) cbFp);
+ msg->setPointer(kContextKey, cb.mContext);
+ msg->setString(kCameraIdKey, AString(cameraId));
+ msg->post();
+ }
}
} // namespace android
@@ -311,77 +348,13 @@
* ACameraManger Implementation
*/
camera_status_t
-ACameraManager::getOrCreateCameraIdListLocked(ACameraIdList** cameraIdList) {
- if (mCachedCameraIdList.numCameras == kCameraIdListNotInit) {
- if (isCameraServiceDisabled()) {
- mCachedCameraIdList.numCameras = 0;
- mCachedCameraIdList.cameraIds = new const char*[0];
- *cameraIdList = &mCachedCameraIdList;
- return ACAMERA_OK;
- }
-
- int numCameras = 0;
- Vector<char *> cameraIds;
- sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
- if (cs == nullptr) {
- ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
- return ACAMERA_ERROR_CAMERA_DISCONNECTED;
- }
- // Get number of cameras
- int numAllCameras = 0;
- binder::Status serviceRet = cs->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_ALL,
- &numAllCameras);
- if (!serviceRet.isOk()) {
- ALOGE("%s: Error getting camera count: %s", __FUNCTION__,
- serviceRet.toString8().string());
- numAllCameras = 0;
- }
- // Filter API2 compatible cameras and push to cameraIds
- for (int i = 0; i < numAllCameras; i++) {
- // TODO: Only suppot HALs that supports API2 directly now
- bool camera2Support = false;
- serviceRet = cs->supportsCameraApi(i, hardware::ICameraService::API_VERSION_2,
- &camera2Support);
- char buf[kMaxCameraIdLen];
- if (camera2Support) {
- numCameras++;
- mCameraIds.insert(i);
- snprintf(buf, sizeof(buf), "%d", i);
- size_t cameraIdSize = strlen(buf) + 1;
- char *cameraId = new char[cameraIdSize];
- if (!cameraId) {
- ALOGE("Allocate memory for ACameraIdList failed!");
- return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
- }
- strlcpy(cameraId, buf, cameraIdSize);
- cameraIds.push(cameraId);
- }
- }
- mCachedCameraIdList.numCameras = numCameras;
- mCachedCameraIdList.cameraIds = new const char*[numCameras];
- if (!mCachedCameraIdList.cameraIds) {
- ALOGE("Allocate memory for ACameraIdList failed!");
- return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
- }
- for (int i = 0; i < numCameras; i++) {
- mCachedCameraIdList.cameraIds[i] = cameraIds[i];
- }
- }
- *cameraIdList = &mCachedCameraIdList;
- return ACAMERA_OK;
-}
-
-camera_status_t
ACameraManager::getCameraIdList(ACameraIdList** cameraIdList) {
Mutex::Autolock _l(mLock);
- ACameraIdList* cachedList;
- camera_status_t ret = getOrCreateCameraIdListLocked(&cachedList);
- if (ret != ACAMERA_OK) {
- ALOGE("Get camera ID list failed! err: %d", ret);
- return ret;
- }
- int numCameras = cachedList->numCameras;
+ std::vector<String8> idList;
+ CameraManagerGlobal::getInstance().getCameraIdList(&idList);
+
+ int numCameras = idList.size();
ACameraIdList *out = new ACameraIdList;
if (!out) {
ALOGE("Allocate memory for ACameraIdList failed!");
@@ -391,14 +364,16 @@
out->cameraIds = new const char*[numCameras];
if (!out->cameraIds) {
ALOGE("Allocate memory for ACameraIdList failed!");
+ deleteCameraIdList(out);
return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
}
for (int i = 0; i < numCameras; i++) {
- const char* src = cachedList->cameraIds[i];
+ const char* src = idList[i].string();
size_t dstSize = strlen(src) + 1;
char* dst = new char[dstSize];
if (!dst) {
ALOGE("Allocate memory for ACameraIdList failed!");
+ deleteCameraIdList(out);
return ACAMERA_ERROR_NOT_ENOUGH_MEMORY;
}
strlcpy(dst, src, dstSize);
@@ -413,7 +388,9 @@
if (cameraIdList != nullptr) {
if (cameraIdList->cameraIds != nullptr) {
for (int i = 0; i < cameraIdList->numCameras; i ++) {
- delete[] cameraIdList->cameraIds[i];
+ if (cameraIdList->cameraIds[i] != nullptr) {
+ delete[] cameraIdList->cameraIds[i];
+ }
}
delete[] cameraIdList->cameraIds;
}
@@ -424,29 +401,27 @@
camera_status_t ACameraManager::getCameraCharacteristics(
const char *cameraIdStr, ACameraMetadata **characteristics) {
Mutex::Autolock _l(mLock);
- ACameraIdList* cachedList;
- // Make sure mCameraIds is initialized
- camera_status_t ret = getOrCreateCameraIdListLocked(&cachedList);
- if (ret != ACAMERA_OK) {
- ALOGE("%s: Get camera ID list failed! err: %d", __FUNCTION__, ret);
- return ret;
- }
- int cameraId = atoi(cameraIdStr);
- if (mCameraIds.count(cameraId) == 0) {
- ALOGE("%s: Camera ID %s does not exist!", __FUNCTION__, cameraIdStr);
- return ACAMERA_ERROR_INVALID_PARAMETER;
- }
+
sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
if (cs == nullptr) {
ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
return ACAMERA_ERROR_CAMERA_DISCONNECTED;
}
CameraMetadata rawMetadata;
- binder::Status serviceRet = cs->getCameraCharacteristics(cameraId, &rawMetadata);
+ binder::Status serviceRet = cs->getCameraCharacteristics(String16(cameraIdStr), &rawMetadata);
if (!serviceRet.isOk()) {
- ALOGE("Get camera characteristics from camera service failed: %s",
- serviceRet.toString8().string());
- return ACAMERA_ERROR_UNKNOWN; // should not reach here
+ switch(serviceRet.serviceSpecificErrorCode()) {
+ case hardware::ICameraService::ERROR_DISCONNECTED:
+ ALOGE("%s: Camera %s has been disconnected", __FUNCTION__, cameraIdStr);
+ return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+ case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
+ ALOGE("%s: Camera ID %s does not exist!", __FUNCTION__, cameraIdStr);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ default:
+ ALOGE("Get camera characteristics from camera service failed: %s",
+ serviceRet.toString8().string());
+ return ACAMERA_ERROR_UNKNOWN; // should not reach here
+ }
}
*characteristics = new ACameraMetadata(
@@ -475,16 +450,16 @@
sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
if (cs == nullptr) {
ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
+ delete device;
return ACAMERA_ERROR_CAMERA_DISCONNECTED;
}
- int id = atoi(cameraId);
sp<hardware::camera2::ICameraDeviceCallbacks> callbacks = device->getServiceCallback();
sp<hardware::camera2::ICameraDeviceUser> deviceRemote;
// No way to get package name from native.
// Send a zero length package name and let camera service figure it out from UID
binder::Status serviceRet = cs->connectDevice(
- callbacks, id, String16(""),
+ callbacks, String16(cameraId), String16(""),
hardware::ICameraService::USE_CALLING_UID, /*out*/&deviceRemote);
if (!serviceRet.isOk()) {
@@ -533,11 +508,5 @@
}
ACameraManager::~ACameraManager() {
- Mutex::Autolock _l(mLock);
- if (mCachedCameraIdList.numCameras != kCameraIdListNotInit) {
- for (int i = 0; i < mCachedCameraIdList.numCameras; i++) {
- delete[] mCachedCameraIdList.cameraIds[i];
- }
- delete[] mCachedCameraIdList.cameraIds;
- }
+
}
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index fc22b3a..4a172f3 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -40,7 +40,7 @@
* instances. Created when first ACameraManager is created and destroyed when
* all ACameraManager instances are deleted.
*
- * TODO: maybe CameraManagerGlobal is better sutied in libcameraclient?
+ * TODO: maybe CameraManagerGlobal is better suited in libcameraclient?
*/
class CameraManagerGlobal final : public RefBase {
public:
@@ -52,6 +52,11 @@
void unregisterAvailabilityCallback(
const ACameraManager_AvailabilityCallbacks *callback);
+ /**
+ * Return camera IDs that support camera2
+ */
+ void getCameraIdList(std::vector<String8> *cameraIds);
+
private:
sp<hardware::ICameraService> mCameraService;
const int kCameraServicePollDelay = 500000; // 0.5s
@@ -72,7 +77,7 @@
class CameraServiceListener final : public hardware::BnCameraServiceListener {
public:
explicit CameraServiceListener(CameraManagerGlobal* cm) : mCameraManager(cm) {}
- virtual binder::Status onStatusChanged(int32_t status, int32_t cameraId);
+ virtual binder::Status onStatusChanged(int32_t status, const String16& cameraId);
// Torch API not implemented yet
virtual binder::Status onTorchStatusChanged(int32_t, const String16&) {
@@ -125,22 +130,18 @@
public:
CallbackHandler() {}
void onMessageReceived(const sp<AMessage> &msg) override;
- private:
- inline void sendSingleCallback(
- int32_t cameraId, void* context,
- ACameraManager_AvailabilityCallback cb) const;
};
sp<CallbackHandler> mHandler;
sp<ALooper> mCbLooper; // Looper thread where callbacks actually happen on
- void onStatusChanged(int32_t status, int32_t cameraId);
- void onStatusChangedLocked(int32_t status, int32_t cameraId);
+ void onStatusChanged(int32_t status, const String8& cameraId);
+ void onStatusChangedLocked(int32_t status, const String8& cameraId);
// Utils for status
static bool validStatus(int32_t status);
static bool isStatusAvailable(int32_t status);
// Map camera_id -> status
- std::map<int32_t, int32_t> mDeviceStatusMap;
+ std::map<String8, int32_t> mDeviceStatusMap;
// For the singleton instance
static Mutex sLock;
@@ -157,7 +158,6 @@
*/
struct ACameraManager {
ACameraManager() :
- mCachedCameraIdList({kCameraIdListNotInit, nullptr}),
mGlobalManager(&(android::CameraManagerGlobal::getInstance())) {}
~ACameraManager();
camera_status_t getCameraIdList(ACameraIdList** cameraIdList);
@@ -170,14 +170,10 @@
/*out*/ACameraDevice** device);
private:
- camera_status_t getOrCreateCameraIdListLocked(ACameraIdList** cameraIdList);
-
enum {
kCameraIdListNotInit = -1
};
android::Mutex mLock;
- std::set<int> mCameraIds; // Init by getOrCreateCameraIdListLocked
- ACameraIdList mCachedCameraIdList; // Init by getOrCreateCameraIdListLocked
android::sp<android::CameraManagerGlobal> mGlobalManager;
};
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index d5d56d6..7b33c32 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -278,6 +278,7 @@
case ACAMERA_CONTROL_SCENE_MODE:
case ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE:
case ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST:
+ case ACAMERA_CONTROL_ENABLE_ZSL:
case ACAMERA_EDGE_MODE:
case ACAMERA_FLASH_MODE:
case ACAMERA_HOT_PIXEL_MODE:
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 0fec983..8b76cdf 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1515,6 +1515,45 @@
*/
ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST = // int32
ACAMERA_CONTROL_START + 40,
+ /**
+ * <p>Allow camera device to enable zero-shutter-lag mode for requests with
+ * ACAMERA_CONTROL_CAPTURE_INTENT == STILL_CAPTURE.</p>
+ *
+ * @see ACAMERA_CONTROL_CAPTURE_INTENT
+ *
+ * <p>This tag may appear in:</p>
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * <li>ACaptureRequest</li>
+ * </ul>
+ *
+ * <p>If enableZsl is <code>true</code>, the camera device may enable zero-shutter-lag mode for requests with
+ * STILL_CAPTURE capture intent. The camera device may use images captured in the past to
+ * produce output images for a zero-shutter-lag request. The result metadata including the
+ * ACAMERA_SENSOR_TIMESTAMP reflects the source frames used to produce output images.
+ * Therefore, the contents of the output images and the result metadata may be out of order
+ * compared to previous regular requests. enableZsl does not affect requests with other
+ * capture intents.</p>
+ * <p>For example, when requests are submitted in the following order:
+ * Request A: enableZsl is <code>true</code>, ACAMERA_CONTROL_CAPTURE_INTENT is PREVIEW
+ * Request B: enableZsl is <code>true</code>, ACAMERA_CONTROL_CAPTURE_INTENT is STILL_CAPTURE</p>
+ * <p>The output images for request B may have contents captured before the output images for
+ * request A, and the result metadata for request B may be older than the result metadata for
+ * request A.</p>
+ * <p>Note that when enableZsl is <code>true</code>, it is not guaranteed to get output images captured in the
+ * past for requests with STILL_CAPTURE capture intent.</p>
+ * <p>For applications targeting SDK versions O and newer, the value of enableZsl in
+ * TEMPLATE_STILL_CAPTURE template may be <code>true</code>. The value in other templates is always
+ * <code>false</code> if present.</p>
+ * <p>For applications targeting SDK versions older than O, the value of enableZsl in all
+ * capture templates is always <code>false</code> if present.</p>
+ * <p>For application-operated ZSL, use CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG template.</p>
+ *
+ * @see ACAMERA_CONTROL_CAPTURE_INTENT
+ * @see ACAMERA_SENSOR_TIMESTAMP
+ */
+ ACAMERA_CONTROL_ENABLE_ZSL = // byte (enum)
+ ACAMERA_CONTROL_START + 41,
ACAMERA_CONTROL_END,
/**
@@ -5762,6 +5801,26 @@
} acamera_metadata_enum_android_control_awb_lock_available_t;
+// ACAMERA_CONTROL_ENABLE_ZSL
+typedef enum acamera_metadata_enum_acamera_control_enable_zsl {
+ /**
+ * <p>Requests with ACAMERA_CONTROL_CAPTURE_INTENT == STILL_CAPTURE must be captured
+ * after previous requests.</p>
+ *
+ * @see ACAMERA_CONTROL_CAPTURE_INTENT
+ */
+ ACAMERA_CONTROL_ENABLE_ZSL_FALSE = 0,
+
+ /**
+ * <p>Requests with ACAMERA_CONTROL_CAPTURE_INTENT == STILL_CAPTURE may or may not be
+ * captured before previous requests.</p>
+ *
+ * @see ACAMERA_CONTROL_CAPTURE_INTENT
+ */
+ ACAMERA_CONTROL_ENABLE_ZSL_TRUE = 1,
+
+} acamera_metadata_enum_android_control_enable_zsl_t;
+
// ACAMERA_EDGE_MODE
diff --git a/camera/tests/Android.mk b/camera/tests/Android.mk
index 0978a81..659484f 100644
--- a/camera/tests/Android.mk
+++ b/camera/tests/Android.mk
@@ -18,7 +18,8 @@
LOCAL_SRC_FILES:= \
VendorTagDescriptorTests.cpp \
- CameraBinderTests.cpp
+ CameraBinderTests.cpp \
+ CameraZSLTests.cpp
LOCAL_SHARED_LIBRARIES := \
liblog \
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 828a758..946e3b8 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -65,14 +65,14 @@
// Stub listener implementation
class TestCameraServiceListener : public hardware::BnCameraServiceListener {
std::map<String16, int32_t> mCameraTorchStatuses;
- std::map<int32_t, int32_t> mCameraStatuses;
+ std::map<String16, int32_t> mCameraStatuses;
mutable Mutex mLock;
mutable Condition mCondition;
mutable Condition mTorchCondition;
public:
virtual ~TestCameraServiceListener() {};
- virtual binder::Status onStatusChanged(int32_t status, int32_t cameraId) {
+ virtual binder::Status onStatusChanged(int32_t status, const String16& cameraId) {
Mutex::Autolock l(mLock);
mCameraStatuses[cameraId] = status;
mCondition.broadcast();
@@ -130,7 +130,7 @@
return iter->second;
};
- int32_t getStatus(int32_t cameraId) const {
+ int32_t getStatus(const String16& cameraId) const {
Mutex::Autolock l(mLock);
const auto& iter = mCameraStatuses.find(cameraId);
if (iter == mCameraStatuses.end()) {
@@ -151,6 +151,7 @@
SENT_RESULT,
UNINITIALIZED,
REPEATING_REQUEST_ERROR,
+ REQUEST_QUEUE_EMPTY,
};
protected:
@@ -225,6 +226,14 @@
return binder::Status::ok();
}
+ virtual binder::Status onRequestQueueEmpty() {
+ Mutex::Autolock l(mLock);
+ mLastStatus = REQUEST_QUEUE_EMPTY;
+ mStatusesHit.push_back(mLastStatus);
+ mStatusCondition.broadcast();
+ return binder::Status::ok();
+ }
+
// Test helper functions:
bool hadError() const {
@@ -301,14 +310,16 @@
// Check listener binder calls
sp<TestCameraServiceListener> listener(new TestCameraServiceListener());
- res = service->addListener(listener);
+ std::vector<hardware::CameraStatus> statuses;
+ res = service->addListener(listener, &statuses);
EXPECT_TRUE(res.isOk()) << res;
- EXPECT_TRUE(listener->waitForNumCameras(numCameras));
+ EXPECT_EQ(numCameras, static_cast<const int>(statuses.size()));
for (int32_t i = 0; i < numCameras; i++) {
+ String16 cameraId = String16(String8::format("%d", i));
bool isSupported = false;
- res = service->supportsCameraApi(i,
+ res = service->supportsCameraApi(cameraId,
hardware::ICameraService::API_VERSION_2, &isSupported);
EXPECT_TRUE(res.isOk()) << res;
@@ -319,12 +330,12 @@
// Check metadata binder call
CameraMetadata metadata;
- res = service->getCameraCharacteristics(i, &metadata);
+ res = service->getCameraCharacteristics(cameraId, &metadata);
EXPECT_TRUE(res.isOk()) << res;
EXPECT_FALSE(metadata.isEmpty());
// Make sure we're available, or skip device tests otherwise
- int32_t s = listener->getStatus(i);
+ int32_t s = listener->getStatus(cameraId);
EXPECT_EQ(::android::hardware::ICameraServiceListener::STATUS_PRESENT, s);
if (s != ::android::hardware::ICameraServiceListener::STATUS_PRESENT) {
continue;
@@ -333,7 +344,7 @@
// Check connect binder calls
sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
sp<hardware::camera2::ICameraDeviceUser> device;
- res = service->connectDevice(callbacks, i, String16("meeeeeeeee!"),
+ res = service->connectDevice(callbacks, cameraId, String16("meeeeeeeee!"),
hardware::ICameraService::USE_CALLING_UID, /*out*/&device);
EXPECT_TRUE(res.isOk()) << res;
ASSERT_NE(nullptr, device.get());
@@ -343,12 +354,12 @@
int32_t torchStatus = listener->getTorchStatus(i);
if (torchStatus == hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF) {
// Check torch calls
- res = service->setTorchMode(String16(String8::format("%d", i)),
+ res = service->setTorchMode(cameraId,
/*enabled*/true, callbacks);
EXPECT_TRUE(res.isOk()) << res;
EXPECT_TRUE(listener->waitForTorchState(
hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON, i));
- res = service->setTorchMode(String16(String8::format("%d", i)),
+ res = service->setTorchMode(cameraId,
/*enabled*/false, callbacks);
EXPECT_TRUE(res.isOk()) << res;
EXPECT_TRUE(listener->waitForTorchState(
@@ -370,7 +381,7 @@
sp<TestCameraServiceListener> serviceListener;
std::pair<sp<TestCameraDeviceCallbacks>, sp<hardware::camera2::ICameraDeviceUser>>
- openNewDevice(int deviceId) {
+ openNewDevice(const String16& deviceId) {
sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
sp<hardware::camera2::ICameraDeviceUser> device;
{
@@ -406,7 +417,8 @@
sp<IBinder> binder = sm->getService(String16("media.camera"));
service = interface_cast<hardware::ICameraService>(binder);
serviceListener = new TestCameraServiceListener();
- service->addListener(serviceListener);
+ std::vector<hardware::CameraStatus> statuses;
+ service->addListener(serviceListener, &statuses);
service->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_BACKWARD_COMPATIBLE,
&numCameras);
}
@@ -426,14 +438,14 @@
EXPECT_TRUE(serviceListener->waitForNumCameras(numCameras));
for (int32_t i = 0; i < numCameras; i++) {
// Make sure we're available, or skip device tests otherwise
- int32_t s = serviceListener->getStatus(i);
+ String16 cameraId(String8::format("%d",i));
+ int32_t s = serviceListener->getStatus(cameraId);
EXPECT_EQ(hardware::ICameraServiceListener::STATUS_PRESENT, s);
if (s != hardware::ICameraServiceListener::STATUS_PRESENT) {
continue;
}
binder::Status res;
-
- auto p = openNewDevice(i);
+ auto p = openNewDevice(cameraId);
sp<TestCameraDeviceCallbacks> callbacks = p.first;
sp<hardware::camera2::ICameraDeviceUser> device = p.second;
diff --git a/camera/tests/CameraZSLTests.cpp b/camera/tests/CameraZSLTests.cpp
new file mode 100644
index 0000000..ecca354
--- /dev/null
+++ b/camera/tests/CameraZSLTests.cpp
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "CameraZSLTests"
+
+#include <gtest/gtest.h>
+
+#include <binder/ProcessState.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
+#include <camera/CameraParameters.h>
+#include <camera/CameraMetadata.h>
+#include <camera/Camera.h>
+#include <android/hardware/ICameraService.h>
+
+using namespace android;
+using namespace android::hardware;
+
+class CameraZSLTests : public ::testing::Test,
+ public ::android::hardware::BnCameraClient {
+protected:
+
+ CameraZSLTests() : numCameras(0), mPreviewBufferCount(0),
+ mAutoFocusMessage(false), mSnapshotNotification(false) {}
+
+ //Gtest interface
+ void SetUp() override;
+ void TearDown() override;
+
+ //CameraClient interface
+ void notifyCallback(int32_t msgType, int32_t, int32_t) override;
+ void dataCallback(int32_t msgType, const sp<IMemory>&,
+ camera_frame_metadata_t *) override;
+ void dataCallbackTimestamp(nsecs_t, int32_t,
+ const sp<IMemory>&) override {};
+ void recordingFrameHandleCallbackTimestamp(nsecs_t,
+ native_handle_t*) override {};
+ void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>&,
+ const std::vector<native_handle_t*>&) override {};
+
+ status_t waitForPreviewStart();
+ status_t waitForEvent(Mutex &mutex, Condition &condition, bool &flag);
+
+ mutable Mutex mPreviewLock;
+ mutable Condition mPreviewCondition;
+ mutable Mutex mAutoFocusLock;
+ mutable Condition mAutoFocusCondition;
+ mutable Mutex mSnapshotLock;
+ mutable Condition mSnapshotCondition;
+
+ int32_t numCameras;
+ size_t mPreviewBufferCount;
+ sp<ICameraService> mCameraService;
+ sp<SurfaceComposerClient> mComposerClient;
+ bool mAutoFocusMessage;
+ bool mSnapshotNotification;
+ static const int32_t kPreviewThreshold = 8;
+ static const nsecs_t kPreviewTimeout = 5000000000; // 5 [s.]
+ static const nsecs_t kEventTimeout = 10000000000; // 10 [s.]
+};
+
+void CameraZSLTests::SetUp() {
+ ::android::binder::Status rc;
+ ProcessState::self()->startThreadPool();
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.camera"));
+ mCameraService = interface_cast<ICameraService>(binder);
+ rc = mCameraService->getNumberOfCameras(
+ hardware::ICameraService::CAMERA_TYPE_ALL, &numCameras);
+ EXPECT_TRUE(rc.isOk());
+
+ mComposerClient = new SurfaceComposerClient;
+ ASSERT_EQ(NO_ERROR, mComposerClient->initCheck());
+}
+
+void CameraZSLTests::TearDown() {
+ mCameraService.clear();
+ mComposerClient->dispose();
+}
+
+void CameraZSLTests::notifyCallback(int32_t msgType, int32_t,
+ int32_t) {
+ if (CAMERA_MSG_FOCUS == msgType) {
+ Mutex::Autolock l(mAutoFocusLock);
+ mAutoFocusMessage = true;
+ mAutoFocusCondition.broadcast();
+ } else {
+ ALOGV("%s: msgType: %d", __FUNCTION__, msgType);
+ }
+};
+
+void CameraZSLTests::dataCallback(int32_t msgType, const sp<IMemory>& /*data*/,
+ camera_frame_metadata_t *) {
+
+ switch (msgType) {
+ case CAMERA_MSG_PREVIEW_FRAME: {
+ Mutex::Autolock l(mPreviewLock);
+ mPreviewBufferCount++;
+ mPreviewCondition.broadcast();
+ break;
+ }
+ case CAMERA_MSG_COMPRESSED_IMAGE: {
+ Mutex::Autolock l(mSnapshotLock);
+ mSnapshotNotification = true;
+ //TODO: Add checks on incoming Jpeg
+ mSnapshotCondition.broadcast();
+ break;
+ }
+ default:
+ ALOGV("%s: msgType: %d", __FUNCTION__, msgType);
+ }
+};
+
+status_t CameraZSLTests::waitForPreviewStart() {
+ status_t rc = NO_ERROR;
+ Mutex::Autolock l(mPreviewLock);
+ mPreviewBufferCount = 0;
+
+ while (mPreviewBufferCount < kPreviewThreshold) {
+ rc = mPreviewCondition.waitRelative(mPreviewLock,
+ kPreviewTimeout);
+ if (NO_ERROR != rc) {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+status_t CameraZSLTests::waitForEvent(Mutex &mutex,
+ Condition &condition, bool &flag) {
+ status_t rc = NO_ERROR;
+ Mutex::Autolock l(mutex);
+ flag = false;
+
+ while (!flag) {
+ rc = condition.waitRelative(mutex,
+ kEventTimeout);
+ if (NO_ERROR != rc) {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+TEST_F(CameraZSLTests, TestAllPictureSizes) {
+ ::android::binder::Status rc;
+
+ for (int32_t cameraId = 0; cameraId < numCameras; cameraId++) {
+ sp<Surface> previewSurface;
+ sp<SurfaceControl> surfaceControl;
+ sp<ICamera> cameraDevice;
+
+ String16 cameraIdStr = String16(String8::format("%d", cameraId));
+ bool isSupported = false;
+ rc = mCameraService->supportsCameraApi(cameraIdStr,
+ hardware::ICameraService::API_VERSION_1, &isSupported);
+ EXPECT_TRUE(rc.isOk());
+
+ // We only care about camera Camera1 ZSL support.
+ if (!isSupported) {
+ continue;
+ }
+
+ CameraMetadata metadata;
+ rc = mCameraService->getCameraCharacteristics(cameraIdStr, &metadata);
+ if (!rc.isOk()) {
+ // The test is relevant only for cameras with Hal 3.x
+ // support.
+ continue;
+ }
+ EXPECT_FALSE(metadata.isEmpty());
+ camera_metadata_entry_t availableCapabilities =
+ metadata.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ EXPECT_TRUE(0 < availableCapabilities.count);
+ bool isReprocessSupported = false;
+ const uint8_t *caps = availableCapabilities.data.u8;
+ for (size_t i = 0; i < availableCapabilities.count; i++) {
+ if (ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING ==
+ caps[i]) {
+ isReprocessSupported = true;
+ break;
+ }
+ }
+ if (!isReprocessSupported) {
+ // ZSL relies on this feature
+ continue;
+ }
+
+ rc = mCameraService->connect(this, cameraId,
+ String16("ZSLTest"), hardware::ICameraService::USE_CALLING_UID,
+ hardware::ICameraService::USE_CALLING_PID, &cameraDevice);
+ EXPECT_TRUE(rc.isOk());
+
+ CameraParameters params(cameraDevice->getParameters());
+
+ String8 focusModes(params.get(
+ CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
+ bool isAFSupported = false;
+ const char *focusMode = nullptr;
+ if (focusModes.contains(CameraParameters::FOCUS_MODE_AUTO)) {
+ // If supported 'auto' should be set by default
+ isAFSupported = true;
+ } else if (focusModes.contains(
+ CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE;
+ } else if (focusModes.contains(
+ CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO;
+ } else if (focusModes.contains(CameraParameters::FOCUS_MODE_MACRO)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_MACRO;
+ }
+
+ if (!isAFSupported) {
+ // AF state is needed
+ continue;
+ }
+
+ if (nullptr != focusMode) {
+ params.set(CameraParameters::KEY_FOCUS_MODE, focusMode);
+ ASSERT_EQ(NO_ERROR, cameraDevice->setParameters(params.flatten()));
+ }
+
+ int previewWidth, previewHeight;
+ params.getPreviewSize(&previewWidth, &previewHeight);
+ ASSERT_TRUE((0 < previewWidth) && (0 < previewHeight));
+
+ surfaceControl = mComposerClient->createSurface(
+ String8("Test Surface"),
+ previewWidth, previewHeight,
+ CameraParameters::previewFormatToEnum(
+ params.getPreviewFormat()),
+ GRALLOC_USAGE_HW_RENDER);
+
+ ASSERT_TRUE(nullptr != surfaceControl.get());
+ ASSERT_TRUE(surfaceControl->isValid());
+
+ SurfaceComposerClient::openGlobalTransaction();
+ ASSERT_EQ(NO_ERROR, surfaceControl->setLayer(0x7fffffff));
+ ASSERT_EQ(NO_ERROR, surfaceControl->show());
+ SurfaceComposerClient::closeGlobalTransaction();
+
+ previewSurface = surfaceControl->getSurface();
+ ASSERT_TRUE(previewSurface != NULL);
+ ASSERT_EQ(NO_ERROR, cameraDevice->setPreviewTarget(
+ previewSurface->getIGraphicBufferProducer()));
+
+ cameraDevice->setPreviewCallbackFlag(
+ CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER);
+
+ Vector<Size> pictureSizes;
+ params.getSupportedPictureSizes(pictureSizes);
+ for (size_t i = 0; i < pictureSizes.size(); i++) {
+ params.setPictureSize(pictureSizes[i].width,
+ pictureSizes[i].height);
+ ASSERT_EQ(NO_ERROR, cameraDevice->setParameters(params.flatten()));
+ ASSERT_EQ(NO_ERROR, cameraDevice->startPreview());
+ ASSERT_EQ(NO_ERROR, waitForPreviewStart());
+
+ ASSERT_EQ(NO_ERROR, cameraDevice->autoFocus());
+ ASSERT_EQ(NO_ERROR, waitForEvent(mAutoFocusLock,
+ mAutoFocusCondition, mAutoFocusMessage));
+
+ ASSERT_EQ(NO_ERROR,
+ cameraDevice->takePicture(CAMERA_MSG_COMPRESSED_IMAGE));
+ ASSERT_EQ(NO_ERROR, waitForEvent(mSnapshotLock, mSnapshotCondition,
+ mSnapshotNotification));
+ }
+
+ cameraDevice->stopPreview();
+ rc = cameraDevice->disconnect();
+ EXPECT_TRUE(rc.isOk());
+ }
+}
diff --git a/cmds/screenrecord/Android.mk b/cmds/screenrecord/Android.mk
index 6f210e6..7aa684a 100644
--- a/cmds/screenrecord/Android.mk
+++ b/cmds/screenrecord/Android.mk
@@ -34,6 +34,7 @@
frameworks/native/include/media/openmax \
external/jpeg
+LOCAL_CFLAGS := -Werror -Wall
LOCAL_CFLAGS += -Wno-multichar
#LOCAL_CFLAGS += -UNDEBUG
diff --git a/cmds/screenrecord/EglWindow.cpp b/cmds/screenrecord/EglWindow.cpp
index c16f2ad..5ea0706 100644
--- a/cmds/screenrecord/EglWindow.cpp
+++ b/cmds/screenrecord/EglWindow.cpp
@@ -21,7 +21,6 @@
#define EGL_EGLEXT_PROTOTYPES
#include <gui/BufferQueue.h>
-#include <gui/GraphicBufferAlloc.h>
#include <gui/Surface.h>
#include "EglWindow.h"
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index 9fd192c..17d7046 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -23,7 +23,6 @@
#include <utils/Log.h>
#include <gui/BufferQueue.h>
-#include <gui/GraphicBufferAlloc.h>
#include <gui/Surface.h>
#include <cutils/properties.h>
#include <utils/misc.h>
@@ -203,7 +202,6 @@
mGlConsumer->getTransformMatrix(texMatrix);
nsecs_t monotonicNsec = mGlConsumer->getTimestamp();
nsecs_t frameNumber = mGlConsumer->getFrameNumber();
- int64_t droppedFrames = 0;
if (mLastFrameNumber > 0) {
mTotalDroppedFrames += size_t(frameNumber - mLastFrameNumber) - 1;
@@ -259,6 +257,11 @@
const char* format = "%T";
struct tm tm;
+ if (mUseMonotonicTimestamps) {
+ snprintf(buf, bufLen, "%" PRId64, monotonicNsec);
+ return;
+ }
+
// localtime/strftime is not the fastest way to do this, but a trivial
// benchmark suggests that the cost is negligible.
int64_t realTime = mStartRealtimeNsecs +
diff --git a/cmds/screenrecord/Overlay.h b/cmds/screenrecord/Overlay.h
index ee3444d..1d8a569 100644
--- a/cmds/screenrecord/Overlay.h
+++ b/cmds/screenrecord/Overlay.h
@@ -37,7 +37,7 @@
*/
class Overlay : public GLConsumer::FrameAvailableListener, Thread {
public:
- Overlay() : Thread(false),
+ Overlay(bool monotonicTimestamps) : Thread(false),
mThreadResult(UNKNOWN_ERROR),
mState(UNINITIALIZED),
mFrameAvailable(false),
@@ -45,7 +45,8 @@
mStartMonotonicNsecs(0),
mStartRealtimeNsecs(0),
mLastFrameNumber(-1),
- mTotalDroppedFrames(0)
+ mTotalDroppedFrames(0),
+ mUseMonotonicTimestamps(monotonicTimestamps)
{}
// Creates a thread that performs the overlay. Pass in the surface that
@@ -151,6 +152,8 @@
nsecs_t mLastFrameNumber;
size_t mTotalDroppedFrames;
+ bool mUseMonotonicTimestamps;
+
static const char* kPropertyNames[];
};
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 59d5661..de0167a 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -51,6 +51,7 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaMuxer.h>
#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
#include "screenrecord.h"
#include "Overlay.h"
@@ -68,6 +69,7 @@
// Command-line parameters.
static bool gVerbose = false; // chatty on stdout
static bool gRotate = false; // rotate 90 degrees
+static bool gMonotonicTime = false; // use system monotonic time for timestamps
static enum {
FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES, FORMAT_RAW_FRAMES
} gOutputFormat = FORMAT_MP4; // data format for output
@@ -213,7 +215,6 @@
*/
static status_t setDisplayProjection(const sp<IBinder>& dpy,
const DisplayInfo& mainDpyInfo) {
- status_t err;
// Set the region of the layer stack we're interested in, which in our
// case is "all of it". If the app is rotated (so that the width of the
@@ -327,7 +328,7 @@
assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL));
- Vector<sp<ABuffer> > buffers;
+ Vector<sp<MediaCodecBuffer> > buffers;
err = encoder->getOutputBuffers(&buffers);
if (err != NO_ERROR) {
fprintf(stderr, "Unable to get output buffers (err=%d)\n", err);
@@ -410,7 +411,10 @@
// want to queue these up and do them on a different thread.
ATRACE_NAME("write sample");
assert(trackIdx != -1);
- err = muxer->writeSampleData(buffers[bufIndex], trackIdx,
+ // TODO
+ sp<ABuffer> buffer = new ABuffer(
+ buffers[bufIndex]->data(), buffers[bufIndex]->size());
+ err = muxer->writeSampleData(buffer, trackIdx,
ptsUsec, flags);
if (err != NO_ERROR) {
fprintf(stderr,
@@ -609,7 +613,7 @@
sp<Overlay> overlay;
if (gWantFrameTime) {
// Send virtual display frames to an external texture.
- overlay = new Overlay();
+ overlay = new Overlay(gMonotonicTime);
err = overlay->start(encoderInputSurface, &bufferProducer);
if (err != NO_ERROR) {
if (encoder != NULL) encoder->release();
@@ -892,6 +896,7 @@
{ "show-frame-time", no_argument, NULL, 'f' },
{ "rotate", no_argument, NULL, 'r' },
{ "output-format", required_argument, NULL, 'o' },
+ { "monotonic-time", no_argument, NULL, 'm' },
{ NULL, 0, NULL, 0 }
};
@@ -971,6 +976,9 @@
return 2;
}
break;
+ case 'm':
+ gMonotonicTime = true;
+ break;
default:
if (ic != '?') {
fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index d952428..f647ffd 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -3,19 +3,21 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- stagefright.cpp \
- jpeg.cpp \
- SineSource.cpp
+ stagefright.cpp \
+ jpeg.cpp \
+ SineSource.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia libutils libbinder libstagefright_foundation \
- libjpeg libgui libcutils liblog
+ libstagefright libmedia libutils libbinder libstagefright_foundation \
+ libjpeg libgui libcutils liblog \
+ libhidlmemory \
+ android.hardware.media.omx@1.0 \
LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/av/media/libstagefright/include \
- frameworks/native/include/media/openmax \
- external/jpeg \
+ frameworks/av/media/libstagefright \
+ frameworks/av/media/libstagefright/include \
+ frameworks/native/include/media/openmax \
+ external/jpeg \
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
@@ -30,16 +32,16 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- SineSource.cpp \
- record.cpp
+ SineSource.cpp \
+ record.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+ libstagefright libmedia liblog libutils libbinder libstagefright_foundation
LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax \
- frameworks/native/include/media/hardware
+ frameworks/av/media/libstagefright \
+ frameworks/native/include/media/openmax \
+ frameworks/native/include/media/hardware
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
@@ -54,16 +56,16 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- SineSource.cpp \
- recordvideo.cpp
+ SineSource.cpp \
+ recordvideo.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+ libstagefright libmedia liblog libutils libbinder libstagefright_foundation
LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax \
- frameworks/native/include/media/hardware
+ frameworks/av/media/libstagefright \
+ frameworks/native/include/media/openmax \
+ frameworks/native/include/media/hardware
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
@@ -79,15 +81,15 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- SineSource.cpp \
- audioloop.cpp
+ SineSource.cpp \
+ audioloop.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia liblog libutils libbinder libstagefright_foundation
+ libstagefright libmedia liblog libutils libbinder libstagefright_foundation
LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
+ frameworks/av/media/libstagefright \
+ frameworks/native/include/media/openmax
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
@@ -105,12 +107,12 @@
stream.cpp \
LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libgui \
- libstagefright_foundation libmedia libcutils
+ libstagefright liblog libutils libbinder libgui \
+ libstagefright_foundation libmedia libcutils
LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
+ frameworks/av/media/libstagefright \
+ frameworks/native/include/media/openmax
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
@@ -124,40 +126,17 @@
include $(CLEAR_VARS)
-LOCAL_SRC_FILES:= \
- sf2.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libgui libcutils
-
-LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
-
-LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= sf2
-
-include $(BUILD_EXECUTABLE)
-
-################################################################################
-
-include $(CLEAR_VARS)
-
LOCAL_SRC_FILES:= \
- codec.cpp \
- SimplePlayer.cpp \
+ codec.cpp \
+ SimplePlayer.cpp \
LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libstagefright_foundation \
- libmedia libaudioclient libgui libcutils
+ libstagefright liblog libutils libbinder libstagefright_foundation \
+ libmedia libaudioclient libgui libcutils
LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
+ frameworks/av/media/libstagefright \
+ frameworks/native/include/media/openmax
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
@@ -172,33 +151,33 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- filters/argbtorgba.rs \
- filters/nightvision.rs \
- filters/saturation.rs \
- mediafilter.cpp \
+ filters/argbtorgba.rs \
+ filters/nightvision.rs \
+ filters/saturation.rs \
+ mediafilter.cpp \
LOCAL_SHARED_LIBRARIES := \
- libstagefright \
- liblog \
- libutils \
- libbinder \
- libstagefright_foundation \
- libmedia \
- libgui \
- libcutils \
- libRScpp \
+ libstagefright \
+ liblog \
+ libutils \
+ libbinder \
+ libstagefright_foundation \
+ libmedia \
+ libgui \
+ libcutils \
+ libRScpp \
LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax \
- frameworks/rs/cpp \
- frameworks/rs \
+ frameworks/av/media/libstagefright \
+ frameworks/native/include/media/openmax \
+ frameworks/rs/cpp \
+ frameworks/rs \
intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
LOCAL_C_INCLUDES += $(intermediates)
LOCAL_STATIC_LIBRARIES:= \
- libstagefright_mediafilter
+ libstagefright_mediafilter
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
@@ -206,6 +185,9 @@
LOCAL_MODULE:= mediafilter
+LOCAL_SANITIZE := cfi
+LOCAL_SANITIZE_DIAG := cfi
+
include $(BUILD_EXECUTABLE)
################################################################################
@@ -213,15 +195,15 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- muxer.cpp \
+ muxer.cpp \
LOCAL_SHARED_LIBRARIES := \
- libstagefright liblog libutils libbinder libstagefright_foundation \
- libcutils libc
+ libstagefright liblog libutils libbinder libstagefright_foundation \
+ libcutils libc
LOCAL_C_INCLUDES:= \
- frameworks/av/media/libstagefright \
- frameworks/native/include/media/openmax
+ frameworks/av/media/libstagefright \
+ frameworks/native/include/media/openmax
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index 50913cd..afb7db3 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -25,6 +25,7 @@
#include <media/AudioTrack.h>
#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -355,7 +356,7 @@
err = state->mCodec->dequeueInputBuffer(&index, -1ll);
CHECK_EQ(err, (status_t)OK);
- const sp<ABuffer> &dstBuffer = state->mBuffers[0].itemAt(index);
+ const sp<MediaCodecBuffer> &dstBuffer = state->mBuffers[0].itemAt(index);
CHECK_LE(srcBuffer->size(), dstBuffer->capacity());
dstBuffer->setRange(0, srcBuffer->size());
@@ -482,11 +483,13 @@
state->mAvailInputBufferIndices.erase(
state->mAvailInputBufferIndices.begin());
- const sp<ABuffer> &dstBuffer =
+ const sp<MediaCodecBuffer> &dstBuffer =
state->mBuffers[0].itemAt(index);
+ sp<ABuffer> abuffer = new ABuffer(dstBuffer->base(), dstBuffer->capacity());
- err = mExtractor->readSampleData(dstBuffer);
+ err = mExtractor->readSampleData(abuffer);
CHECK_EQ(err, (status_t)OK);
+ dstBuffer->setRange(abuffer->offset(), abuffer->size());
int64_t timeUs;
CHECK_EQ(mExtractor->getSampleTime(&timeUs), (status_t)OK);
@@ -530,7 +533,7 @@
state->mCodec->releaseOutputBuffer(info->mIndex);
} else {
if (state->mAudioTrack != NULL) {
- const sp<ABuffer> &srcBuffer =
+ const sp<MediaCodecBuffer> &srcBuffer =
state->mBuffers[1].itemAt(info->mIndex);
renderAudio(state, info, srcBuffer);
@@ -597,7 +600,7 @@
}
void SimplePlayer::renderAudio(
- CodecState *state, BufferInfo *info, const sp<ABuffer> &buffer) {
+ CodecState *state, BufferInfo *info, const sp<MediaCodecBuffer> &buffer) {
CHECK(state->mAudioTrack != NULL);
if (state->mAudioTrack->stopped()) {
diff --git a/cmds/stagefright/SimplePlayer.h b/cmds/stagefright/SimplePlayer.h
index 0d8c74a..1269162 100644
--- a/cmds/stagefright/SimplePlayer.h
+++ b/cmds/stagefright/SimplePlayer.h
@@ -25,6 +25,7 @@
class AudioTrack;
class IGraphicBufferProducer;
struct MediaCodec;
+class MediaCodecBuffer;
struct NuMediaExtractor;
class Surface;
@@ -73,7 +74,7 @@
{
sp<MediaCodec> mCodec;
Vector<sp<ABuffer> > mCSD;
- Vector<sp<ABuffer> > mBuffers[2];
+ Vector<sp<MediaCodecBuffer> > mBuffers[2];
List<size_t> mAvailInputBufferIndices;
List<BufferInfo> mAvailOutputBufferInfos;
@@ -101,7 +102,7 @@
status_t onOutputFormatChanged(size_t trackIndex, CodecState *state);
void renderAudio(
- CodecState *state, BufferInfo *info, const sp<ABuffer> &buffer);
+ CodecState *state, BufferInfo *info, const sp<MediaCodecBuffer> &buffer);
DISALLOW_EVIL_CONSTRUCTORS(SimplePlayer);
};
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index dae9bbe..3108a67 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -26,12 +26,12 @@
#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
#include <media/IMediaPlayerService.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
@@ -56,8 +56,8 @@
struct CodecState {
sp<MediaCodec> mCodec;
- Vector<sp<ABuffer> > mInBuffers;
- Vector<sp<ABuffer> > mOutBuffers;
+ Vector<sp<MediaCodecBuffer> > mInBuffers;
+ Vector<sp<MediaCodecBuffer> > mOutBuffers;
bool mSignalledInputEOS;
bool mSawOutputEOS;
int64_t mNumBuffersDecoded;
@@ -174,10 +174,12 @@
if (err == OK) {
ALOGV("filling input buffer %zu", index);
- const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
+ const sp<MediaCodecBuffer> &buffer = state->mInBuffers.itemAt(index);
+ sp<ABuffer> abuffer = new ABuffer(buffer->base(), buffer->capacity());
- err = extractor->readSampleData(buffer);
+ err = extractor->readSampleData(abuffer);
CHECK_EQ(err, (status_t)OK);
+ buffer->setRange(abuffer->offset(), abuffer->size());
int64_t timeUs;
err = extractor->getSampleTime(&timeUs);
@@ -398,8 +400,6 @@
ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
sp<ALooper> looper = new ALooper;
looper->start();
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index d829df0..f219e69 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -26,10 +26,10 @@
#include <gui/Surface.h>
#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/NuMediaExtractor.h>
#include <media/stagefright/RenderScriptWrapper.h>
@@ -131,8 +131,8 @@
struct CodecState {
sp<MediaCodec> mCodec;
- Vector<sp<ABuffer> > mInBuffers;
- Vector<sp<ABuffer> > mOutBuffers;
+ Vector<sp<MediaCodecBuffer> > mInBuffers;
+ Vector<sp<MediaCodecBuffer> > mOutBuffers;
bool mSignalledInputEOS;
bool mSawOutputEOS;
int64_t mNumBuffersDecoded;
@@ -183,9 +183,9 @@
}
size_t outIndex = frame.index;
- const sp<ABuffer> &srcBuffer =
+ const sp<MediaCodecBuffer> &srcBuffer =
vidState->mOutBuffers.itemAt(outIndex);
- const sp<ABuffer> &destBuffer =
+ const sp<MediaCodecBuffer> &destBuffer =
filterState->mInBuffers.itemAt(filterIndex);
sp<AMessage> srcFormat, destFormat;
@@ -532,10 +532,12 @@
if (err == OK) {
ALOGV("filling input buffer %zu", index);
- const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
+ const sp<MediaCodecBuffer> &buffer = state->mInBuffers.itemAt(index);
+ sp<ABuffer> abuffer = new ABuffer(buffer->base(), buffer->capacity());
- err = extractor->readSampleData(buffer);
+ err = extractor->readSampleData(abuffer);
CHECK(err == OK);
+ buffer->setRange(abuffer->offset(), abuffer->size());
int64_t timeUs;
err = extractor->getSampleTime(&timeUs);
@@ -735,8 +737,6 @@
ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
android::sp<ALooper> looper = new ALooper;
looper->start();
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index 0a3bdf3..4a83a4a 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -29,7 +29,6 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaMuxer.h>
@@ -319,9 +318,6 @@
}
ProcessState::self()->startThreadPool();
- // Make sure setDataSource() works.
- DataSource::RegisterDefaultSniffers();
-
sp<ALooper> looper = new ALooper;
looper->start();
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 9aa0156..94c2e96 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -170,8 +170,6 @@
int main(int argc, char **argv) {
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
#if 1
if (argc != 3) {
fprintf(stderr, "usage: %s <filename> <input_color_format>\n", argv[0]);
diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp
deleted file mode 100644
index 1a4bf08..0000000
--- a/cmds/stagefright/sf2.cpp
+++ /dev/null
@@ -1,682 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "sf2"
-#include <inttypes.h>
-#include <utils/Log.h>
-
-#include <signal.h>
-
-#include <binder/ProcessState.h>
-
-#include <media/IMediaHTTPService.h>
-
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-#include <media/stagefright/ACodec.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
-#include <gui/SurfaceComposerClient.h>
-#include <gui/Surface.h>
-
-#include "include/ESDS.h"
-
-using namespace android;
-
-volatile static bool ctrlc = false;
-
-static sighandler_t oldhandler = NULL;
-
-static void mysighandler(int signum) {
- if (signum == SIGINT) {
- ctrlc = true;
- return;
- }
- oldhandler(signum);
-}
-
-struct Controller : public AHandler {
- Controller(const char *uri, bool decodeAudio,
- const sp<Surface> &surface, bool renderToSurface)
- : mURI(uri),
- mDecodeAudio(decodeAudio),
- mSurface(surface),
- mRenderToSurface(renderToSurface),
- mCodec(new ACodec),
- mIsVorbis(false) {
- CHECK(!mDecodeAudio || mSurface == NULL);
- }
-
- void startAsync() {
- (new AMessage(kWhatStart, this))->post();
- }
-
-protected:
- virtual ~Controller() {
- }
-
- virtual void printStatistics() {
- int64_t delayUs = ALooper::GetNowUs() - mStartTimeUs;
-
- if (mDecodeAudio) {
- printf("%" PRId64 " bytes received. %.2f KB/sec\n",
- mTotalBytesReceived,
- mTotalBytesReceived * 1E6 / 1024 / delayUs);
- } else {
- printf("%d frames decoded, %.2f fps. %" PRId64 " bytes "
- "received. %.2f KB/sec\n",
- mNumOutputBuffersReceived,
- mNumOutputBuffersReceived * 1E6 / delayUs,
- mTotalBytesReceived,
- mTotalBytesReceived * 1E6 / 1024 / delayUs);
- }
- }
-
- virtual void onMessageReceived(const sp<AMessage> &msg) {
- if (ctrlc) {
- printf("\n");
- printStatistics();
- (new AMessage(kWhatStop, this))->post();
- ctrlc = false;
- }
- switch (msg->what()) {
- case kWhatStart:
- {
-#if 1
- mDecodeLooper = looper();
-#else
- mDecodeLooper = new ALooper;
- mDecodeLooper->setName("sf2 decode looper");
- mDecodeLooper->start();
-#endif
-
- sp<DataSource> dataSource =
- DataSource::CreateFromURI(
- NULL /* httpService */, mURI.c_str());
-
- sp<IMediaExtractor> extractor =
- MediaExtractor::Create(dataSource);
-
- for (size_t i = 0; i < extractor->countTracks(); ++i) {
- sp<MetaData> meta = extractor->getTrackMetaData(i);
-
- const char *mime;
- CHECK(meta->findCString(kKeyMIMEType, &mime));
-
- if (!strncasecmp(mDecodeAudio ? "audio/" : "video/",
- mime, 6)) {
- mSource = extractor->getTrack(i);
-
- if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
- mIsVorbis = true;
- } else {
- mIsVorbis = false;
- }
- break;
- }
- }
- if (mSource == NULL) {
- printf("no %s track found\n", mDecodeAudio ? "audio" : "video");
- exit (1);
- }
-
- CHECK_EQ(mSource->start(), (status_t)OK);
-
- mDecodeLooper->registerHandler(mCodec);
-
- mCodec->setNotificationMessage(
- new AMessage(kWhatCodecNotify, this));
-
- sp<AMessage> format = makeFormat(mSource->getFormat());
-
- if (mSurface != NULL) {
- format->setObject("surface", mSurface);
- }
-
- mCodec->initiateSetup(format);
-
- mCSDIndex = 0;
- mStartTimeUs = ALooper::GetNowUs();
- mNumOutputBuffersReceived = 0;
- mTotalBytesReceived = 0;
- mLeftOverBuffer = NULL;
- mFinalResult = OK;
- mSeekState = SEEK_NONE;
-
- // (new AMessage(kWhatSeek, this))->post(5000000ll);
- break;
- }
-
- case kWhatSeek:
- {
- printf("+");
- fflush(stdout);
-
- CHECK(mSeekState == SEEK_NONE
- || mSeekState == SEEK_FLUSH_COMPLETED);
-
- if (mLeftOverBuffer != NULL) {
- mLeftOverBuffer->release();
- mLeftOverBuffer = NULL;
- }
-
- mSeekState = SEEK_FLUSHING;
- mSeekTimeUs = 30000000ll;
-
- mCodec->signalFlush();
- break;
- }
-
- case kWhatStop:
- {
- if (mLeftOverBuffer != NULL) {
- mLeftOverBuffer->release();
- mLeftOverBuffer = NULL;
- }
-
- CHECK_EQ(mSource->stop(), (status_t)OK);
- mSource.clear();
-
- mCodec->initiateShutdown();
- break;
- }
-
- case kWhatCodecNotify:
- {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- if (what == CodecBase::kWhatFillThisBuffer) {
- onFillThisBuffer(msg);
- } else if (what == CodecBase::kWhatDrainThisBuffer) {
- if ((mNumOutputBuffersReceived++ % 16) == 0) {
- printf(".");
- fflush(stdout);
- }
-
- onDrainThisBuffer(msg);
- } else if (what == CodecBase::kWhatEOS
- || what == CodecBase::kWhatError) {
- printf((what == CodecBase::kWhatEOS) ? "$\n" : "E\n");
-
- printStatistics();
- (new AMessage(kWhatStop, this))->post();
- } else if (what == CodecBase::kWhatFlushCompleted) {
- mSeekState = SEEK_FLUSH_COMPLETED;
- mCodec->signalResume();
-
- (new AMessage(kWhatSeek, this))->post(5000000ll);
- } else if (what == CodecBase::kWhatOutputFormatChanged) {
- } else if (what == CodecBase::kWhatShutdownCompleted) {
- mDecodeLooper->unregisterHandler(mCodec->id());
-
- if (mDecodeLooper != looper()) {
- mDecodeLooper->stop();
- }
-
- looper()->stop();
- }
- break;
- }
-
- default:
- TRESPASS();
- break;
- }
- }
-
-private:
- enum {
- kWhatStart = 'strt',
- kWhatStop = 'stop',
- kWhatCodecNotify = 'noti',
- kWhatSeek = 'seek',
- };
-
- sp<ALooper> mDecodeLooper;
-
- AString mURI;
- bool mDecodeAudio;
- sp<Surface> mSurface;
- bool mRenderToSurface;
- sp<ACodec> mCodec;
- sp<IMediaSource> mSource;
- bool mIsVorbis;
-
- Vector<sp<ABuffer> > mCSD;
- size_t mCSDIndex;
-
- MediaBuffer *mLeftOverBuffer;
- status_t mFinalResult;
-
- int64_t mStartTimeUs;
- int32_t mNumOutputBuffersReceived;
- int64_t mTotalBytesReceived;
-
- enum SeekState {
- SEEK_NONE,
- SEEK_FLUSHING,
- SEEK_FLUSH_COMPLETED,
- };
- SeekState mSeekState;
- int64_t mSeekTimeUs;
-
- sp<AMessage> makeFormat(const sp<MetaData> &meta) {
- CHECK(mCSD.isEmpty());
-
- const char *mime;
- CHECK(meta->findCString(kKeyMIMEType, &mime));
-
- sp<AMessage> msg = new AMessage;
- msg->setString("mime", mime);
-
- if (!strncasecmp("video/", mime, 6)) {
- int32_t width, height;
- CHECK(meta->findInt32(kKeyWidth, &width));
- CHECK(meta->findInt32(kKeyHeight, &height));
-
- msg->setInt32("width", width);
- msg->setInt32("height", height);
- } else {
- CHECK(!strncasecmp("audio/", mime, 6));
-
- int32_t numChannels, sampleRate;
- CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
- CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
-
- msg->setInt32("channel-count", numChannels);
- msg->setInt32("sample-rate", sampleRate);
-
- int32_t isADTS;
- if (meta->findInt32(kKeyIsADTS, &isADTS) && isADTS != 0) {
- msg->setInt32("is-adts", true);
- }
- }
-
- uint32_t type;
- const void *data;
- size_t size;
- if (meta->findData(kKeyAVCC, &type, &data, &size)) {
- // Parse the AVCDecoderConfigurationRecord
-
- const uint8_t *ptr = (const uint8_t *)data;
-
- CHECK(size >= 7);
- CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
- uint8_t profile __unused = ptr[1];
- uint8_t level __unused = ptr[3];
-
- // There is decodable content out there that fails the following
- // assertion, let's be lenient for now...
- // CHECK((ptr[4] >> 2) == 0x3f); // reserved
-
- size_t lengthSize __unused = 1 + (ptr[4] & 3);
-
- // commented out check below as H264_QVGA_500_NO_AUDIO.3gp
- // violates it...
- // CHECK((ptr[5] >> 5) == 7); // reserved
-
- size_t numSeqParameterSets = ptr[5] & 31;
-
- ptr += 6;
- size -= 6;
-
- sp<ABuffer> buffer = new ABuffer(1024);
- buffer->setRange(0, 0);
-
- for (size_t i = 0; i < numSeqParameterSets; ++i) {
- CHECK(size >= 2);
- size_t length = U16_AT(ptr);
-
- ptr += 2;
- size -= 2;
-
- CHECK(size >= length);
-
- memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4);
- memcpy(buffer->data() + buffer->size() + 4, ptr, length);
- buffer->setRange(0, buffer->size() + 4 + length);
-
- ptr += length;
- size -= length;
- }
-
- buffer->meta()->setInt32("csd", true);
- mCSD.push(buffer);
-
- buffer = new ABuffer(1024);
- buffer->setRange(0, 0);
-
- CHECK(size >= 1);
- size_t numPictureParameterSets = *ptr;
- ++ptr;
- --size;
-
- for (size_t i = 0; i < numPictureParameterSets; ++i) {
- CHECK(size >= 2);
- size_t length = U16_AT(ptr);
-
- ptr += 2;
- size -= 2;
-
- CHECK(size >= length);
-
- memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4);
- memcpy(buffer->data() + buffer->size() + 4, ptr, length);
- buffer->setRange(0, buffer->size() + 4 + length);
-
- ptr += length;
- size -= length;
- }
-
- buffer->meta()->setInt32("csd", true);
- mCSD.push(buffer);
-
- msg->setBuffer("csd", buffer);
- } else if (meta->findData(kKeyESDS, &type, &data, &size)) {
- ESDS esds((const char *)data, size);
- CHECK_EQ(esds.InitCheck(), (status_t)OK);
-
- const void *codec_specific_data;
- size_t codec_specific_data_size;
- esds.getCodecSpecificInfo(
- &codec_specific_data, &codec_specific_data_size);
-
- sp<ABuffer> buffer = new ABuffer(codec_specific_data_size);
-
- memcpy(buffer->data(), codec_specific_data,
- codec_specific_data_size);
-
- buffer->meta()->setInt32("csd", true);
- mCSD.push(buffer);
- } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
- sp<ABuffer> buffer = new ABuffer(size);
- memcpy(buffer->data(), data, size);
-
- buffer->meta()->setInt32("csd", true);
- mCSD.push(buffer);
-
- CHECK(meta->findData(kKeyVorbisBooks, &type, &data, &size));
-
- buffer = new ABuffer(size);
- memcpy(buffer->data(), data, size);
-
- buffer->meta()->setInt32("csd", true);
- mCSD.push(buffer);
- }
-
- int32_t maxInputSize;
- if (meta->findInt32(kKeyMaxInputSize, &maxInputSize)) {
- msg->setInt32("max-input-size", maxInputSize);
- }
-
- return msg;
- }
-
- void onFillThisBuffer(const sp<AMessage> &msg) {
- sp<AMessage> reply;
- CHECK(msg->findMessage("reply", &reply));
-
- if (mSource == NULL || mSeekState == SEEK_FLUSHING) {
- reply->setInt32("err", ERROR_END_OF_STREAM);
- reply->post();
- return;
- }
-
- sp<ABuffer> outBuffer;
- CHECK(msg->findBuffer("buffer", &outBuffer));
-
- if (mCSDIndex < mCSD.size()) {
- outBuffer = mCSD.editItemAt(mCSDIndex++);
- outBuffer->meta()->setInt64("timeUs", 0);
- } else {
- size_t sizeLeft = outBuffer->capacity();
- outBuffer->setRange(0, 0);
-
- int32_t n = 0;
-
- for (;;) {
- MediaBuffer *inBuffer;
-
- if (mLeftOverBuffer != NULL) {
- inBuffer = mLeftOverBuffer;
- mLeftOverBuffer = NULL;
- } else if (mFinalResult != OK) {
- break;
- } else {
- MediaSource::ReadOptions options;
- if (mSeekState == SEEK_FLUSH_COMPLETED) {
- options.setSeekTo(mSeekTimeUs);
- mSeekState = SEEK_NONE;
- }
- status_t err = mSource->read(&inBuffer, &options);
-
- if (err != OK) {
- mFinalResult = err;
- break;
- }
- }
-
- size_t sizeNeeded = inBuffer->range_length();
- if (mIsVorbis) {
- // Vorbis data is suffixed with the number of
- // valid samples on the page.
- sizeNeeded += sizeof(int32_t);
- }
-
- if (sizeNeeded > sizeLeft) {
- if (outBuffer->size() == 0) {
- ALOGE("Unable to fit even a single input buffer of size %zu.",
- sizeNeeded);
- }
- CHECK_GT(outBuffer->size(), 0u);
-
- mLeftOverBuffer = inBuffer;
- break;
- }
-
- ++n;
-
- if (outBuffer->size() == 0) {
- int64_t timeUs;
- CHECK(inBuffer->meta_data()->findInt64(kKeyTime, &timeUs));
-
- outBuffer->meta()->setInt64("timeUs", timeUs);
- }
-
- memcpy(outBuffer->data() + outBuffer->size(),
- (const uint8_t *)inBuffer->data()
- + inBuffer->range_offset(),
- inBuffer->range_length());
-
- if (mIsVorbis) {
- int32_t numPageSamples;
- if (!inBuffer->meta_data()->findInt32(
- kKeyValidSamples, &numPageSamples)) {
- numPageSamples = -1;
- }
-
- memcpy(outBuffer->data()
- + outBuffer->size() + inBuffer->range_length(),
- &numPageSamples, sizeof(numPageSamples));
- }
-
- outBuffer->setRange(
- 0, outBuffer->size() + sizeNeeded);
-
- sizeLeft -= sizeNeeded;
-
- inBuffer->release();
- inBuffer = NULL;
-
- break; // Don't coalesce
- }
-
- ALOGV("coalesced %d input buffers", n);
-
- if (outBuffer->size() == 0) {
- CHECK_NE(mFinalResult, (status_t)OK);
-
- reply->setInt32("err", mFinalResult);
- reply->post();
- return;
- }
- }
-
- reply->setBuffer("buffer", outBuffer);
- reply->post();
- }
-
- void onDrainThisBuffer(const sp<AMessage> &msg) {
- sp<ABuffer> buffer;
- CHECK(msg->findBuffer("buffer", &buffer));
-
- mTotalBytesReceived += buffer->size();
-
- sp<AMessage> reply;
- CHECK(msg->findMessage("reply", &reply));
-
- if (mRenderToSurface) {
- reply->setInt32("render", 1);
- }
-
- reply->post();
- }
-
- DISALLOW_EVIL_CONSTRUCTORS(Controller);
-};
-
-static void usage(const char *me) {
- fprintf(stderr, "usage: %s\n", me);
- fprintf(stderr, " -h(elp)\n");
- fprintf(stderr, " -a(udio)\n");
-
- fprintf(stderr,
- " -S(urface) Allocate output buffers on a surface.\n"
- " -R(ender) Render surface-allocated buffers.\n");
-}
-
-int main(int argc, char **argv) {
- android::ProcessState::self()->startThreadPool();
-
- bool decodeAudio = false;
- bool useSurface = false;
- bool renderToSurface = false;
-
- int res;
- while ((res = getopt(argc, argv, "haSR")) >= 0) {
- switch (res) {
- case 'a':
- decodeAudio = true;
- break;
-
- case 'S':
- useSurface = true;
- break;
-
- case 'R':
- renderToSurface = true;
- break;
-
- case '?':
- case 'h':
- default:
- {
- usage(argv[0]);
- return 1;
- }
- }
- }
-
- argc -= optind;
- argv += optind;
-
- if (argc != 1) {
- usage(argv[-optind]);
- return 1;
- }
-
- DataSource::RegisterDefaultSniffers();
-
- sp<ALooper> looper = new ALooper;
- looper->setName("sf2");
-
- sp<SurfaceComposerClient> composerClient;
- sp<SurfaceControl> control;
- sp<Surface> surface;
-
- if (!decodeAudio && useSurface) {
- composerClient = new SurfaceComposerClient;
- CHECK_EQ(composerClient->initCheck(), (status_t)OK);
-
- control = composerClient->createSurface(
- String8("A Surface"),
- 1280,
- 800,
- PIXEL_FORMAT_RGB_565,
- 0);
-
- CHECK(control != NULL);
- CHECK(control->isValid());
-
- SurfaceComposerClient::openGlobalTransaction();
- CHECK_EQ(control->setLayer(INT_MAX), (status_t)OK);
- CHECK_EQ(control->show(), (status_t)OK);
- SurfaceComposerClient::closeGlobalTransaction();
-
- surface = control->getSurface();
- CHECK(surface != NULL);
-
- CHECK_EQ((status_t)OK,
- native_window_api_connect(
- surface.get(), NATIVE_WINDOW_API_MEDIA));
- }
-
- sp<Controller> controller =
- new Controller(argv[0], decodeAudio, surface, renderToSurface);
-
- looper->registerHandler(controller);
-
- signal(SIGINT, mysighandler);
-
- controller->startAsync();
-
- CHECK_EQ(looper->start(true /* runOnCallingThread */), (status_t)OK);
-
- looper->unregisterHandler(controller->id());
-
- if (!decodeAudio && useSurface) {
- CHECK_EQ((status_t)OK,
- native_window_api_disconnect(
- surface.get(), NATIVE_WINDOW_API_MEDIA));
-
- composerClient->dispose();
- }
-
- return 0;
-}
-
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 08d2064..d7c2e87 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -64,6 +64,9 @@
#include <gui/Surface.h>
#include <gui/SurfaceComposerClient.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <media/omx/1.0/WOmx.h>
+
using namespace android;
static long gNumRepetitions;
@@ -904,13 +907,23 @@
}
if (listComponents) {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("media.codec"));
- sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
+ sp<IOMX> omx;
+ if (property_get_bool("persist.media.treble_omx", true)) {
+ using namespace ::android::hardware::media::omx::V1_0;
+ sp<IOmx> tOmx = IOmx::getService();
- CHECK(service.get() != NULL);
+ CHECK(tOmx.get() != NULL);
- sp<IOMX> omx = service->getOMX();
+ omx = new utils::LWOmx(tOmx);
+ } else {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.codec"));
+ sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
+
+ CHECK(service.get() != NULL);
+
+ omx = service->getOMX();
+ }
CHECK(omx.get() != NULL);
List<IOMX::ComponentInfo> list;
@@ -965,8 +978,6 @@
}
}
- DataSource::RegisterDefaultSniffers();
-
status_t err = OK;
for (int k = 0; k < argc && err == OK; ++k) {
@@ -1037,6 +1048,10 @@
bool haveVideo = false;
for (size_t i = 0; i < numTracks; ++i) {
sp<IMediaSource> source = extractor->getTrack(i);
+ if (source == nullptr) {
+ fprintf(stderr, "skip NULL track %zu, track count %zu.\n", i, numTracks);
+ continue;
+ }
const char *mime;
CHECK(source->getFormat()->findCString(
@@ -1099,6 +1114,10 @@
}
mediaSource = extractor->getTrack(i);
+ if (mediaSource == nullptr) {
+ fprintf(stderr, "skip NULL track %zu, total tracks %zu.\n", i, numTracks);
+ return -1;
+ }
}
}
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 16ff39d..2e1d240 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -171,7 +171,8 @@
mWriter = new MPEG2TSWriter(
this, &MyConvertingStreamSource::WriteDataWrapper);
- for (size_t i = 0; i < extractor->countTracks(); ++i) {
+ size_t numTracks = extractor->countTracks();
+ for (size_t i = 0; i < numTracks; ++i) {
const sp<MetaData> &meta = extractor->getTrackMetaData(i);
const char *mime;
@@ -181,7 +182,12 @@
continue;
}
- CHECK_EQ(mWriter->addSource(extractor->getTrack(i)), (status_t)OK);
+ sp<IMediaSource> track = extractor->getTrack(i);
+ if (track == nullptr) {
+ fprintf(stderr, "skip NULL track %zu, total tracks %zu\n", i, numTracks);
+ continue;
+ }
+ CHECK_EQ(mWriter->addSource(track), (status_t)OK);
}
CHECK_EQ(mWriter->start(), (status_t)OK);
@@ -301,8 +307,6 @@
int main(int argc, char **argv) {
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
if (argc != 2) {
fprintf(stderr, "Usage: %s filename\n", argv[0]);
return 1;
@@ -349,9 +353,7 @@
sp<IStreamSource> source;
- char prop[PROPERTY_VALUE_MAX];
- bool usemp4 = property_get("media.stagefright.use-mp4source", prop, NULL) &&
- (!strcmp(prop, "1") || !strcasecmp(prop, "true"));
+ bool usemp4 = property_get_bool("media.stagefright.use-mp4source", false);
size_t len = strlen(argv[1]);
if ((!usemp4 && len >= 3 && !strcasecmp(".ts", &argv[1][len - 3])) ||
diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp
index 1d835f9..bf04a89 100644
--- a/drm/drmserver/DrmManager.cpp
+++ b/drm/drmserver/DrmManager.cpp
@@ -88,14 +88,9 @@
}
status_t DrmManager::loadPlugIns() {
-
- String8 vendorPluginDirPath("/vendor/lib/drm");
- loadPlugIns(vendorPluginDirPath);
-
String8 pluginDirPath("/system/lib/drm");
loadPlugIns(pluginDirPath);
return DRM_NO_ERROR;
-
}
status_t DrmManager::loadPlugIns(const String8& plugInDirPath) {
diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/doc/FwdLock.html b/drm/libdrmframework/plugins/forward-lock/internal-format/doc/FwdLock.html
old mode 100755
new mode 100644
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 32f487e..66f5fc2 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -5,15 +5,29 @@
cc_library_shared {
name: "libmediadrm",
+ aidl: {
+ local_include_dirs: ["aidl"],
+ export_aidl_headers: true,
+ },
+
srcs: [
- "Crypto.cpp",
- "Drm.cpp",
+ "aidl/android/media/ICas.aidl",
+ "aidl/android/media/ICasListener.aidl",
+ "aidl/android/media/IDescrambler.aidl",
+ "aidl/android/media/IMediaCasService.aidl",
+
+ "CasImpl.cpp",
+ "DescramblerImpl.cpp",
+ "DrmPluginPath.cpp",
"DrmSessionManager.cpp",
"ICrypto.cpp",
"IDrm.cpp",
"IDrmClient.cpp",
"IMediaDrmService.cpp",
+ "MediaCasDefs.cpp",
"SharedLibrary.cpp",
+ "DrmHal.cpp",
+ "CryptoHal.cpp",
],
shared_libs: [
@@ -24,6 +38,10 @@
"libmediautils",
"libstagefright_foundation",
"libutils",
+ "android.hardware.drm@1.0",
+ "libhidlbase",
+ "libhidlmemory",
+ "libhidltransport",
],
cflags: [
diff --git a/drm/libmediadrm/CasImpl.cpp b/drm/libmediadrm/CasImpl.cpp
new file mode 100644
index 0000000..1a33bb0
--- /dev/null
+++ b/drm/libmediadrm/CasImpl.cpp
@@ -0,0 +1,224 @@
+
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CasImpl"
+
+#include <android/media/ICasListener.h>
+#include <media/cas/CasAPI.h>
+#include <media/CasImpl.h>
+#include <media/SharedLibrary.h>
+#include <utils/Log.h>
+
+namespace android {
+
+static Status getBinderStatus(status_t err) {
+ if (err == OK) {
+ return Status::ok();
+ }
+ if (err == BAD_VALUE) {
+ return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT);
+ }
+ if (err == INVALID_OPERATION) {
+ return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
+ }
+ return Status::fromServiceSpecificError(err);
+}
+
+static String8 sessionIdToString(const CasSessionId &sessionId) {
+ String8 result;
+ for (size_t i = 0; i < sessionId.size(); i++) {
+ result.appendFormat("%02x ", sessionId[i]);
+ }
+ if (result.isEmpty()) {
+ result.append("(null)");
+ }
+ return result;
+}
+
+struct CasImpl::PluginHolder : public RefBase {
+public:
+ explicit PluginHolder(CasPlugin *plugin) : mPlugin(plugin) {}
+ ~PluginHolder() { if (mPlugin != NULL) delete mPlugin; }
+ CasPlugin* get() { return mPlugin; }
+
+private:
+ CasPlugin *mPlugin;
+ DISALLOW_EVIL_CONSTRUCTORS(PluginHolder);
+};
+
+CasImpl::CasImpl(const sp<ICasListener> &listener)
+ : mPluginHolder(NULL), mListener(listener) {
+ ALOGV("CTOR");
+}
+
+CasImpl::~CasImpl() {
+ ALOGV("DTOR");
+ release();
+}
+
+//static
+void CasImpl::OnEvent(
+ void *appData,
+ int32_t event,
+ int32_t arg,
+ uint8_t *data,
+ size_t size) {
+ if (appData == NULL) {
+ ALOGE("Invalid appData!");
+ return;
+ }
+ CasImpl *casImpl = static_cast<CasImpl *>(appData);
+ casImpl->onEvent(event, arg, data, size);
+}
+
+void CasImpl::init(const sp<SharedLibrary>& library, CasPlugin *plugin) {
+ mLibrary = library;
+ mPluginHolder = new PluginHolder(plugin);
+}
+
+void CasImpl::onEvent(
+ int32_t event, int32_t arg, uint8_t *data, size_t size) {
+ if (mListener == NULL) {
+ return;
+ }
+
+ std::unique_ptr<CasData> eventData;
+ if (data != NULL && size > 0) {
+ eventData.reset(new CasData(data, data + size));
+ }
+
+ mListener->onEvent(event, arg, eventData);
+}
+
+Status CasImpl::setPrivateData(const CasData& pvtData) {
+ ALOGV("setPrivateData");
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ return getBinderStatus(holder->get()->setPrivateData(pvtData));
+}
+
+Status CasImpl::openSession(CasSessionId* sessionId) {
+ ALOGV("openSession");
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ status_t err = holder->get()->openSession(sessionId);
+
+ ALOGV("openSession: session opened, sessionId=%s",
+ sessionIdToString(*sessionId).string());
+
+ return getBinderStatus(err);
+}
+
+Status CasImpl::setSessionPrivateData(
+ const CasSessionId &sessionId, const CasData& pvtData) {
+ ALOGV("setSessionPrivateData: sessionId=%s",
+ sessionIdToString(sessionId).string());
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ return getBinderStatus(holder->get()->setSessionPrivateData(sessionId, pvtData));
+}
+
+Status CasImpl::closeSession(const CasSessionId &sessionId) {
+ ALOGV("closeSession: sessionId=%s",
+ sessionIdToString(sessionId).string());
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ return getBinderStatus(holder->get()->closeSession(sessionId));
+}
+
+Status CasImpl::processEcm(const CasSessionId &sessionId, const ParcelableCasData& ecm) {
+ ALOGV("processEcm: sessionId=%s",
+ sessionIdToString(sessionId).string());
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+
+ return getBinderStatus(holder->get()->processEcm(sessionId, ecm));
+}
+
+Status CasImpl::processEmm(const ParcelableCasData& emm) {
+ ALOGV("processEmm");
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+
+ return getBinderStatus(holder->get()->processEmm(emm));
+}
+
+Status CasImpl::sendEvent(
+ int32_t event, int32_t arg, const ::std::unique_ptr<CasData> &eventData) {
+ ALOGV("sendEvent");
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+
+ status_t err;
+ if (eventData == nullptr) {
+ err = holder->get()->sendEvent(event, arg, CasData());
+ } else {
+ err = holder->get()->sendEvent(event, arg, *eventData);
+ }
+ return getBinderStatus(err);
+}
+
+Status CasImpl::provision(const String16& provisionString) {
+ ALOGV("provision: provisionString=%s", String8(provisionString).string());
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+
+ return getBinderStatus(holder->get()->provision(String8(provisionString)));
+}
+
+Status CasImpl::refreshEntitlements(
+ int32_t refreshType, const ::std::unique_ptr<CasData> &refreshData) {
+ ALOGV("refreshEntitlements");
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+
+ status_t err;
+ if (refreshData == nullptr) {
+ err = holder->get()->refreshEntitlements(refreshType, CasData());
+ } else {
+ err = holder->get()->refreshEntitlements(refreshType, *refreshData);
+ }
+ return getBinderStatus(err);
+}
+
+Status CasImpl::release() {
+ ALOGV("release: plugin=%p",
+ mPluginHolder == NULL ? mPluginHolder->get() : NULL);
+ mPluginHolder.clear();
+ return Status::ok();
+}
+
+} // namespace android
+
diff --git a/drm/libmediadrm/Crypto.cpp b/drm/libmediadrm/Crypto.cpp
deleted file mode 100644
index 79633cb..0000000
--- a/drm/libmediadrm/Crypto.cpp
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Crypto"
-#include <utils/Log.h>
-#include <dirent.h>
-#include <dlfcn.h>
-
-#include <binder/IMemory.h>
-#include <media/Crypto.h>
-#include <media/hardware/CryptoAPI.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaErrors.h>
-
-namespace android {
-
-KeyedVector<Vector<uint8_t>, String8> Crypto::mUUIDToLibraryPathMap;
-KeyedVector<String8, wp<SharedLibrary> > Crypto::mLibraryPathToOpenLibraryMap;
-Mutex Crypto::mMapLock;
-
-static bool operator<(const Vector<uint8_t> &lhs, const Vector<uint8_t> &rhs) {
- if (lhs.size() < rhs.size()) {
- return true;
- } else if (lhs.size() > rhs.size()) {
- return false;
- }
-
- return memcmp((void *)lhs.array(), (void *)rhs.array(), rhs.size()) < 0;
-}
-
-Crypto::Crypto()
- : mInitCheck(NO_INIT),
- mFactory(NULL),
- mPlugin(NULL) {
-}
-
-Crypto::~Crypto() {
- delete mPlugin;
- mPlugin = NULL;
- closeFactory();
-}
-
-void Crypto::closeFactory() {
- delete mFactory;
- mFactory = NULL;
- mLibrary.clear();
-}
-
-status_t Crypto::initCheck() const {
- return mInitCheck;
-}
-
-/*
- * Search the plugins directory for a plugin that supports the scheme
- * specified by uuid
- *
- * If found:
- * mLibrary holds a strong pointer to the dlopen'd library
- * mFactory is set to the library's factory method
- * mInitCheck is set to OK
- *
- * If not found:
- * mLibrary is cleared and mFactory are set to NULL
- * mInitCheck is set to an error (!OK)
- */
-void Crypto::findFactoryForScheme(const uint8_t uuid[16]) {
-
- closeFactory();
-
- // lock static maps
- Mutex::Autolock autoLock(mMapLock);
-
- // first check cache
- Vector<uint8_t> uuidVector;
- uuidVector.appendArray(uuid, sizeof(uuid[0]) * 16);
- ssize_t index = mUUIDToLibraryPathMap.indexOfKey(uuidVector);
- if (index >= 0) {
- if (loadLibraryForScheme(mUUIDToLibraryPathMap[index], uuid)) {
- mInitCheck = OK;
- return;
- } else {
- ALOGE("Failed to load from cached library path!");
- mInitCheck = ERROR_UNSUPPORTED;
- return;
- }
- }
-
- // no luck, have to search
- String8 dirPath("/vendor/lib/mediadrm");
- String8 pluginPath;
-
- DIR* pDir = opendir(dirPath.string());
- if (pDir) {
- struct dirent* pEntry;
- while ((pEntry = readdir(pDir))) {
-
- pluginPath = dirPath + "/" + pEntry->d_name;
-
- if (pluginPath.getPathExtension() == ".so") {
-
- if (loadLibraryForScheme(pluginPath, uuid)) {
- mUUIDToLibraryPathMap.add(uuidVector, pluginPath);
- mInitCheck = OK;
- closedir(pDir);
- return;
- }
- }
- }
-
- closedir(pDir);
- }
-
- // try the legacy libdrmdecrypt.so
- pluginPath = "libdrmdecrypt.so";
- if (loadLibraryForScheme(pluginPath, uuid)) {
- mUUIDToLibraryPathMap.add(uuidVector, pluginPath);
- mInitCheck = OK;
- return;
- }
-
- mInitCheck = ERROR_UNSUPPORTED;
-}
-
-bool Crypto::loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]) {
-
- // get strong pointer to open shared library
- ssize_t index = mLibraryPathToOpenLibraryMap.indexOfKey(path);
- if (index >= 0) {
- mLibrary = mLibraryPathToOpenLibraryMap[index].promote();
- } else {
- index = mLibraryPathToOpenLibraryMap.add(path, NULL);
- }
-
- if (!mLibrary.get()) {
- mLibrary = new SharedLibrary(path);
- if (!*mLibrary) {
- ALOGE("loadLibraryForScheme failed:%s", mLibrary->lastError());
- return false;
- }
-
- mLibraryPathToOpenLibraryMap.replaceValueAt(index, mLibrary);
- }
-
- typedef CryptoFactory *(*CreateCryptoFactoryFunc)();
-
- CreateCryptoFactoryFunc createCryptoFactory =
- (CreateCryptoFactoryFunc)mLibrary->lookup("createCryptoFactory");
-
- if (createCryptoFactory == NULL ||
- (mFactory = createCryptoFactory()) == NULL ||
- !mFactory->isCryptoSchemeSupported(uuid)) {
- ALOGE("createCryptoFactory failed:%s", mLibrary->lastError());
- closeFactory();
- return false;
- }
- return true;
-}
-
-bool Crypto::isCryptoSchemeSupported(const uint8_t uuid[16]) {
- Mutex::Autolock autoLock(mLock);
-
- if (mFactory && mFactory->isCryptoSchemeSupported(uuid)) {
- return true;
- }
-
- findFactoryForScheme(uuid);
- return (mInitCheck == OK);
-}
-
-status_t Crypto::createPlugin(
- const uint8_t uuid[16], const void *data, size_t size) {
- Mutex::Autolock autoLock(mLock);
-
- if (mPlugin != NULL) {
- return -EINVAL;
- }
-
- if (!mFactory || !mFactory->isCryptoSchemeSupported(uuid)) {
- findFactoryForScheme(uuid);
- }
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- return mFactory->createPlugin(uuid, data, size, &mPlugin);
-}
-
-status_t Crypto::destroyPlugin() {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- delete mPlugin;
- mPlugin = NULL;
-
- return OK;
-}
-
-bool Crypto::requiresSecureDecoderComponent(const char *mime) const {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->requiresSecureDecoderComponent(mime);
-}
-
-ssize_t Crypto::decrypt(
- DestinationType dstType,
- const uint8_t key[16],
- const uint8_t iv[16],
- CryptoPlugin::Mode mode,
- const CryptoPlugin::Pattern &pattern,
- const sp<IMemory> &sharedBuffer, size_t offset,
- const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
- void *dstPtr,
- AString *errorDetailMsg) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- const void *srcPtr = static_cast<uint8_t *>(sharedBuffer->pointer()) + offset;
-
- return mPlugin->decrypt(
- dstType != kDestinationTypeVmPointer,
- key, iv, mode, pattern, srcPtr, subSamples, numSubSamples, dstPtr,
- errorDetailMsg);
-}
-
-void Crypto::notifyResolution(uint32_t width, uint32_t height) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck == OK && mPlugin != NULL) {
- mPlugin->notifyResolution(width, height);
- }
-}
-
-status_t Crypto::setMediaDrmSession(const Vector<uint8_t> &sessionId) {
- Mutex::Autolock autoLock(mLock);
-
- status_t result = NO_INIT;
- if (mInitCheck == OK && mPlugin != NULL) {
- result = mPlugin->setMediaDrmSession(sessionId);
- }
- return result;
-}
-
-} // namespace android
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
new file mode 100644
index 0000000..d613a5b
--- /dev/null
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CryptoHal"
+#include <utils/Log.h>
+
+#include <android/hardware/drm/1.0/types.h>
+#include <android/hidl/manager/1.0/IServiceManager.h>
+
+#include <binder/IMemory.h>
+#include <cutils/native_handle.h>
+#include <media/CryptoHal.h>
+#include <media/hardware/CryptoAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+
+using ::android::hardware::drm::V1_0::BufferType;
+using ::android::hardware::drm::V1_0::DestinationBuffer;
+using ::android::hardware::drm::V1_0::ICryptoFactory;
+using ::android::hardware::drm::V1_0::ICryptoPlugin;
+using ::android::hardware::drm::V1_0::Mode;
+using ::android::hardware::drm::V1_0::Pattern;
+using ::android::hardware::drm::V1_0::SharedBuffer;
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_0::SubSample;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hidl::manager::V1_0::IServiceManager;
+using ::android::sp;
+
+
+namespace android {
+
+static status_t toStatusT(Status status) {
+ switch (status) {
+ case Status::OK:
+ return OK;
+ case Status::ERROR_DRM_NO_LICENSE:
+ return ERROR_DRM_NO_LICENSE;
+ case Status::ERROR_DRM_LICENSE_EXPIRED:
+ return ERROR_DRM_LICENSE_EXPIRED;
+ case Status::ERROR_DRM_RESOURCE_BUSY:
+ return ERROR_DRM_RESOURCE_BUSY;
+ case Status::ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION:
+ return ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION;
+ case Status::ERROR_DRM_SESSION_NOT_OPENED:
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ case Status::ERROR_DRM_CANNOT_HANDLE:
+ return ERROR_DRM_CANNOT_HANDLE;
+ case Status::ERROR_DRM_DECRYPT:
+ return ERROR_DRM_DECRYPT;
+ default:
+ return UNKNOWN_ERROR;
+ }
+}
+
+
+static hidl_vec<uint8_t> toHidlVec(const Vector<uint8_t> &vector) {
+ hidl_vec<uint8_t> vec;
+ vec.setToExternal(const_cast<uint8_t *>(vector.array()), vector.size());
+ return vec;
+}
+
+static hidl_vec<uint8_t> toHidlVec(const void *ptr, size_t size) {
+ hidl_vec<uint8_t> vec;
+ vec.resize(size);
+ memcpy(vec.data(), ptr, size);
+ return vec;
+}
+
+static hidl_array<uint8_t, 16> toHidlArray16(const uint8_t *ptr) {
+ if (!ptr) {
+ return hidl_array<uint8_t, 16>();
+ }
+ return hidl_array<uint8_t, 16>(ptr);
+}
+
+
+static String8 toString8(hidl_string hString) {
+ return String8(hString.c_str());
+}
+
+
+CryptoHal::CryptoHal()
+ : mFactories(makeCryptoFactories()),
+ mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT),
+ mNextBufferId(0),
+ mHeapSeqNum(0) {
+}
+
+CryptoHal::~CryptoHal() {
+}
+
+Vector<sp<ICryptoFactory>> CryptoHal::makeCryptoFactories() {
+ Vector<sp<ICryptoFactory>> factories;
+
+ auto manager = ::IServiceManager::getService();
+ if (manager != NULL) {
+ manager->listByInterface(ICryptoFactory::descriptor,
+ [&factories](const hidl_vec<hidl_string> ®istered) {
+ for (const auto &instance : registered) {
+ auto factory = ICryptoFactory::getService(instance);
+ if (factory != NULL) {
+ factories.push_back(factory);
+ ALOGI("makeCryptoFactories: factory instance %s is %s",
+ instance.c_str(),
+ factory->isRemote() ? "Remote" : "Not Remote");
+ }
+ }
+ }
+ );
+ }
+
+ if (factories.size() == 0) {
+ // must be in passthrough mode, load the default passthrough service
+ auto passthrough = ICryptoFactory::getService();
+ if (passthrough != NULL) {
+ ALOGI("makeCryptoFactories: using default crypto instance");
+ factories.push_back(passthrough);
+ } else {
+ ALOGE("Failed to find any crypto factories");
+ }
+ }
+ return factories;
+}
+
+sp<ICryptoPlugin> CryptoHal::makeCryptoPlugin(const sp<ICryptoFactory>& factory,
+ const uint8_t uuid[16], const void *initData, size_t initDataSize) {
+
+ sp<ICryptoPlugin> plugin;
+ Return<void> hResult = factory->createPlugin(toHidlArray16(uuid),
+ toHidlVec(initData, initDataSize),
+ [&](Status status, const sp<ICryptoPlugin>& hPlugin) {
+ if (status != Status::OK) {
+ ALOGE("Failed to make crypto plugin");
+ return;
+ }
+ plugin = hPlugin;
+ }
+ );
+ return plugin;
+}
+
+
+status_t CryptoHal::initCheck() const {
+ return mInitCheck;
+}
+
+
+bool CryptoHal::isCryptoSchemeSupported(const uint8_t uuid[16]) {
+ Mutex::Autolock autoLock(mLock);
+
+ for (size_t i = 0; i < mFactories.size(); i++) {
+ if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+status_t CryptoHal::createPlugin(const uint8_t uuid[16], const void *data,
+ size_t size) {
+ Mutex::Autolock autoLock(mLock);
+
+ for (size_t i = 0; i < mFactories.size(); i++) {
+ if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
+ mPlugin = makeCryptoPlugin(mFactories[i], uuid, data, size);
+ }
+ }
+
+ if (mPlugin == NULL) {
+ mInitCheck = ERROR_UNSUPPORTED;
+ } else {
+ mInitCheck = OK;
+ }
+
+ return mInitCheck;
+}
+
+status_t CryptoHal::destroyPlugin() {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ mPlugin.clear();
+ return OK;
+}
+
+bool CryptoHal::requiresSecureDecoderComponent(const char *mime) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ return mPlugin->requiresSecureDecoderComponent(hidl_string(mime));
+}
+
+
+/**
+ * If the heap base isn't set, get the heap base from the IMemory
+ * and send it to the HAL so it can map a remote heap of the same
+ * size. Once the heap base is established, shared memory buffers
+ * are sent by providing an offset into the heap and a buffer size.
+ */
+int32_t CryptoHal::setHeapBase(const sp<IMemoryHeap>& heap) {
+ if (heap == NULL) {
+ ALOGE("setHeapBase(): heap is NULL");
+ return -1;
+ }
+ native_handle_t* nativeHandle = native_handle_create(1, 0);
+ if (!nativeHandle) {
+ ALOGE("setHeapBase(), failed to create native handle");
+ return -1;
+ }
+
+ Mutex::Autolock autoLock(mLock);
+
+ int32_t seqNum = mHeapSeqNum++;
+ int fd = heap->getHeapID();
+ nativeHandle->data[0] = fd;
+ auto hidlHandle = hidl_handle(nativeHandle);
+ auto hidlMemory = hidl_memory("ashmem", hidlHandle, heap->getSize());
+ mHeapBases.add(seqNum, mNextBufferId);
+ Return<void> hResult = mPlugin->setSharedBufferBase(hidlMemory, mNextBufferId++);
+ ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
+ return seqNum;
+}
+
+void CryptoHal::clearHeapBase(int32_t seqNum) {
+ Mutex::Autolock autoLock(mLock);
+
+ mHeapBases.removeItem(seqNum);
+}
+
+status_t CryptoHal::toSharedBuffer(const sp<IMemory>& memory, int32_t seqNum, ::SharedBuffer* buffer) {
+ ssize_t offset;
+ size_t size;
+
+ if (memory == NULL && buffer == NULL) {
+ return UNEXPECTED_NULL;
+ }
+
+ sp<IMemoryHeap> heap = memory->getMemory(&offset, &size);
+ if (heap == NULL) {
+ return UNEXPECTED_NULL;
+ }
+
+ // memory must be in the declared heap
+ CHECK(mHeapBases.indexOfKey(seqNum) >= 0);
+
+ buffer->bufferId = mHeapBases.valueFor(seqNum);
+ buffer->offset = offset >= 0 ? offset : 0;
+ buffer->size = size;
+ return OK;
+}
+
+ssize_t CryptoHal::decrypt(const uint8_t keyId[16], const uint8_t iv[16],
+ CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+ const ICrypto::SourceBuffer &source, size_t offset,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ const ICrypto::DestinationBuffer &destination, AString *errorDetailMsg) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ Mode hMode;
+ switch(mode) {
+ case CryptoPlugin::kMode_Unencrypted:
+ hMode = Mode::UNENCRYPTED ;
+ break;
+ case CryptoPlugin::kMode_AES_CTR:
+ hMode = Mode::AES_CTR;
+ break;
+ case CryptoPlugin::kMode_AES_WV:
+ hMode = Mode::AES_CBC_CTS;
+ break;
+ case CryptoPlugin::kMode_AES_CBC:
+ hMode = Mode::AES_CBC;
+ break;
+ default:
+ return UNKNOWN_ERROR;
+ }
+
+ Pattern hPattern;
+ hPattern.encryptBlocks = pattern.mEncryptBlocks;
+ hPattern.skipBlocks = pattern.mSkipBlocks;
+
+ std::vector<SubSample> stdSubSamples;
+ for (size_t i = 0; i < numSubSamples; i++) {
+ SubSample subSample;
+ subSample.numBytesOfClearData = subSamples[i].mNumBytesOfClearData;
+ subSample.numBytesOfEncryptedData = subSamples[i].mNumBytesOfEncryptedData;
+ stdSubSamples.push_back(subSample);
+ }
+ auto hSubSamples = hidl_vec<SubSample>(stdSubSamples);
+
+ int32_t heapSeqNum = source.mHeapSeqNum;
+ bool secure;
+ ::DestinationBuffer hDestination;
+ if (destination.mType == kDestinationTypeSharedMemory) {
+ hDestination.type = BufferType::SHARED_MEMORY;
+ status_t status = toSharedBuffer(destination.mSharedMemory, heapSeqNum,
+ &hDestination.nonsecureMemory);
+ if (status != OK) {
+ return status;
+ }
+ secure = false;
+ } else {
+ hDestination.type = BufferType::NATIVE_HANDLE;
+ hDestination.secureMemory = hidl_handle(destination.mHandle);
+ secure = true;
+ }
+
+ ::SharedBuffer hSource;
+ status_t status = toSharedBuffer(source.mSharedMemory, heapSeqNum, &hSource);
+ if (status != OK) {
+ return status;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+ uint32_t bytesWritten = 0;
+
+ Return<void> hResult = mPlugin->decrypt(secure, toHidlArray16(keyId), toHidlArray16(iv), hMode,
+ hPattern, hSubSamples, hSource, offset, hDestination,
+ [&](Status status, uint32_t hBytesWritten, hidl_string hDetailedError) {
+ if (status == Status::OK) {
+ bytesWritten = hBytesWritten;
+ *errorDetailMsg = toString8(hDetailedError);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ if (!hResult.isOk()) {
+ err = DEAD_OBJECT;
+ }
+
+ if (err == OK) {
+ return bytesWritten;
+ }
+ return err;
+}
+
+void CryptoHal::notifyResolution(uint32_t width, uint32_t height) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return;
+ }
+
+ mPlugin->notifyResolution(width, height);
+}
+
+status_t CryptoHal::setMediaDrmSession(const Vector<uint8_t> &sessionId) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ return toStatusT(mPlugin->setMediaDrmSession(toHidlVec(sessionId)));
+}
+
+} // namespace android
diff --git a/drm/libmediadrm/DescramblerImpl.cpp b/drm/libmediadrm/DescramblerImpl.cpp
new file mode 100644
index 0000000..94e09e2
--- /dev/null
+++ b/drm/libmediadrm/DescramblerImpl.cpp
@@ -0,0 +1,107 @@
+
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DescramblerImpl"
+
+#include <media/cas/DescramblerAPI.h>
+#include <media/DescramblerImpl.h>
+#include <media/SharedLibrary.h>
+#include <utils/Log.h>
+#include <binder/IMemory.h>
+
+namespace android {
+
+static Status getBinderStatus(status_t err) {
+ if (err == OK) {
+ return Status::ok();
+ }
+ if (err == BAD_VALUE) {
+ return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT);
+ }
+ if (err == INVALID_OPERATION) {
+ return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE);
+ }
+ return Status::fromServiceSpecificError(err);
+}
+
+static String8 sessionIdToString(const CasSessionId &sessionId) {
+ String8 result;
+ for (size_t i = 0; i < sessionId.size(); i++) {
+ result.appendFormat("%02x ", sessionId[i]);
+ }
+ if (result.isEmpty()) {
+ result.append("(null)");
+ }
+ return result;
+}
+
+DescramblerImpl::DescramblerImpl(
+ const sp<SharedLibrary>& library, DescramblerPlugin *plugin) :
+ mLibrary(library), mPlugin(plugin) {
+ ALOGV("CTOR: mPlugin=%p", mPlugin);
+}
+
+DescramblerImpl::~DescramblerImpl() {
+ ALOGV("DTOR: mPlugin=%p", mPlugin);
+ release();
+}
+
+Status DescramblerImpl::setMediaCasSession(const CasSessionId& sessionId) {
+ ALOGV("setMediaCasSession: sessionId=%s",
+ sessionIdToString(sessionId).string());
+
+ return getBinderStatus(mPlugin->setMediaCasSession(sessionId));
+}
+
+Status DescramblerImpl::requiresSecureDecoderComponent(
+ const String16& mime, bool *result) {
+ *result = mPlugin->requiresSecureDecoderComponent(String8(mime));
+
+ return getBinderStatus(OK);
+}
+
+Status DescramblerImpl::descramble(
+ const DescrambleInfo& info, int32_t *result) {
+ ALOGV("descramble");
+
+ *result = mPlugin->descramble(
+ info.dstType != DescrambleInfo::kDestinationTypeVmPointer,
+ info.scramblingControl,
+ info.numSubSamples,
+ info.subSamples,
+ info.srcMem->pointer(),
+ info.srcOffset,
+ info.dstType == DescrambleInfo::kDestinationTypeVmPointer ?
+ info.srcMem->pointer() : info.dstPtr,
+ info.dstOffset,
+ NULL);
+
+ return getBinderStatus(*result >= 0 ? OK : *result);
+}
+
+Status DescramblerImpl::release() {
+ ALOGV("release: mPlugin=%p", mPlugin);
+
+ if (mPlugin != NULL) {
+ delete mPlugin;
+ mPlugin = NULL;
+ }
+ return Status::ok();
+}
+
+} // namespace android
+
diff --git a/drm/libmediadrm/Drm.cpp b/drm/libmediadrm/Drm.cpp
deleted file mode 100644
index 9ab08db..0000000
--- a/drm/libmediadrm/Drm.cpp
+++ /dev/null
@@ -1,791 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Drm"
-#include <utils/Log.h>
-
-#include <dirent.h>
-#include <dlfcn.h>
-
-#include <media/DrmSessionClientInterface.h>
-#include <media/DrmSessionManager.h>
-#include <media/Drm.h>
-#include <media/drm/DrmAPI.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/MediaErrors.h>
-#include <binder/IServiceManager.h>
-#include <binder/IPCThreadState.h>
-
-namespace android {
-
-static inline int getCallingPid() {
- return IPCThreadState::self()->getCallingPid();
-}
-
-static bool checkPermission(const char* permissionString) {
- if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
- bool ok = checkCallingPermission(String16(permissionString));
- if (!ok) ALOGE("Request requires %s", permissionString);
- return ok;
-}
-
-KeyedVector<Vector<uint8_t>, String8> Drm::mUUIDToLibraryPathMap;
-KeyedVector<String8, wp<SharedLibrary> > Drm::mLibraryPathToOpenLibraryMap;
-Mutex Drm::mMapLock;
-Mutex Drm::mLock;
-
-static bool operator<(const Vector<uint8_t> &lhs, const Vector<uint8_t> &rhs) {
- if (lhs.size() < rhs.size()) {
- return true;
- } else if (lhs.size() > rhs.size()) {
- return false;
- }
-
- return memcmp((void *)lhs.array(), (void *)rhs.array(), rhs.size()) < 0;
-}
-
-struct DrmSessionClient : public DrmSessionClientInterface {
- explicit DrmSessionClient(Drm* drm) : mDrm(drm) {}
-
- virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
- sp<Drm> drm = mDrm.promote();
- if (drm == NULL) {
- return true;
- }
- status_t err = drm->closeSession(sessionId);
- if (err != OK) {
- return false;
- }
- drm->sendEvent(DrmPlugin::kDrmPluginEventSessionReclaimed, 0, &sessionId, NULL);
- return true;
- }
-
-protected:
- virtual ~DrmSessionClient() {}
-
-private:
- wp<Drm> mDrm;
-
- DISALLOW_EVIL_CONSTRUCTORS(DrmSessionClient);
-};
-
-Drm::Drm()
- : mInitCheck(NO_INIT),
- mDrmSessionClient(new DrmSessionClient(this)),
- mListener(NULL),
- mFactory(NULL),
- mPlugin(NULL) {
-}
-
-Drm::~Drm() {
- DrmSessionManager::Instance()->removeDrm(mDrmSessionClient);
- delete mPlugin;
- mPlugin = NULL;
- closeFactory();
-}
-
-void Drm::closeFactory() {
- delete mFactory;
- mFactory = NULL;
- mLibrary.clear();
-}
-
-status_t Drm::initCheck() const {
- return mInitCheck;
-}
-
-status_t Drm::setListener(const sp<IDrmClient>& listener)
-{
- Mutex::Autolock lock(mEventLock);
- if (mListener != NULL){
- IInterface::asBinder(mListener)->unlinkToDeath(this);
- }
- if (listener != NULL) {
- IInterface::asBinder(listener)->linkToDeath(this);
- }
- mListener = listener;
- return NO_ERROR;
-}
-
-void Drm::sendEvent(DrmPlugin::EventType eventType, int extra,
- Vector<uint8_t> const *sessionId,
- Vector<uint8_t> const *data)
-{
- mEventLock.lock();
- sp<IDrmClient> listener = mListener;
- mEventLock.unlock();
-
- if (listener != NULL) {
- Parcel obj;
- writeByteArray(obj, sessionId);
- writeByteArray(obj, data);
-
- Mutex::Autolock lock(mNotifyLock);
- listener->notify(eventType, extra, &obj);
- }
-}
-
-void Drm::sendExpirationUpdate(Vector<uint8_t> const *sessionId,
- int64_t expiryTimeInMS)
-{
- mEventLock.lock();
- sp<IDrmClient> listener = mListener;
- mEventLock.unlock();
-
- if (listener != NULL) {
- Parcel obj;
- writeByteArray(obj, sessionId);
- obj.writeInt64(expiryTimeInMS);
-
- Mutex::Autolock lock(mNotifyLock);
- listener->notify(DrmPlugin::kDrmPluginEventExpirationUpdate, 0, &obj);
- }
-}
-
-void Drm::sendKeysChange(Vector<uint8_t> const *sessionId,
- Vector<DrmPlugin::KeyStatus> const *keyStatusList,
- bool hasNewUsableKey)
-{
- mEventLock.lock();
- sp<IDrmClient> listener = mListener;
- mEventLock.unlock();
-
- if (listener != NULL) {
- Parcel obj;
- writeByteArray(obj, sessionId);
-
- size_t nkeys = keyStatusList->size();
- obj.writeInt32(keyStatusList->size());
- for (size_t i = 0; i < nkeys; ++i) {
- const DrmPlugin::KeyStatus *keyStatus = &keyStatusList->itemAt(i);
- writeByteArray(obj, &keyStatus->mKeyId);
- obj.writeInt32(keyStatus->mType);
- }
- obj.writeInt32(hasNewUsableKey);
-
- Mutex::Autolock lock(mNotifyLock);
- listener->notify(DrmPlugin::kDrmPluginEventKeysChange, 0, &obj);
- }
-}
-
-/*
- * Search the plugins directory for a plugin that supports the scheme
- * specified by uuid
- *
- * If found:
- * mLibrary holds a strong pointer to the dlopen'd library
- * mFactory is set to the library's factory method
- * mInitCheck is set to OK
- *
- * If not found:
- * mLibrary is cleared and mFactory are set to NULL
- * mInitCheck is set to an error (!OK)
- */
-void Drm::findFactoryForScheme(const uint8_t uuid[16]) {
-
- closeFactory();
-
- // lock static maps
- Mutex::Autolock autoLock(mMapLock);
-
- // first check cache
- Vector<uint8_t> uuidVector;
- uuidVector.appendArray(uuid, sizeof(uuid[0]) * 16);
- ssize_t index = mUUIDToLibraryPathMap.indexOfKey(uuidVector);
- if (index >= 0) {
- if (loadLibraryForScheme(mUUIDToLibraryPathMap[index], uuid)) {
- mInitCheck = OK;
- return;
- } else {
- ALOGE("Failed to load from cached library path!");
- mInitCheck = ERROR_UNSUPPORTED;
- return;
- }
- }
-
- // no luck, have to search
- String8 dirPath("/vendor/lib/mediadrm");
- DIR* pDir = opendir(dirPath.string());
-
- if (pDir == NULL) {
- mInitCheck = ERROR_UNSUPPORTED;
- ALOGE("Failed to open plugin directory %s", dirPath.string());
- return;
- }
-
-
- struct dirent* pEntry;
- while ((pEntry = readdir(pDir))) {
-
- String8 pluginPath = dirPath + "/" + pEntry->d_name;
-
- if (pluginPath.getPathExtension() == ".so") {
-
- if (loadLibraryForScheme(pluginPath, uuid)) {
- mUUIDToLibraryPathMap.add(uuidVector, pluginPath);
- mInitCheck = OK;
- closedir(pDir);
- return;
- }
- }
- }
-
- closedir(pDir);
-
- ALOGE("Failed to find drm plugin");
- mInitCheck = ERROR_UNSUPPORTED;
-}
-
-bool Drm::loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]) {
-
- // get strong pointer to open shared library
- ssize_t index = mLibraryPathToOpenLibraryMap.indexOfKey(path);
- if (index >= 0) {
- mLibrary = mLibraryPathToOpenLibraryMap[index].promote();
- } else {
- index = mLibraryPathToOpenLibraryMap.add(path, NULL);
- }
-
- if (!mLibrary.get()) {
- mLibrary = new SharedLibrary(path);
- if (!*mLibrary) {
- return false;
- }
-
- mLibraryPathToOpenLibraryMap.replaceValueAt(index, mLibrary);
- }
-
- typedef DrmFactory *(*CreateDrmFactoryFunc)();
-
- CreateDrmFactoryFunc createDrmFactory =
- (CreateDrmFactoryFunc)mLibrary->lookup("createDrmFactory");
-
- if (createDrmFactory == NULL ||
- (mFactory = createDrmFactory()) == NULL ||
- !mFactory->isCryptoSchemeSupported(uuid)) {
- closeFactory();
- return false;
- }
- return true;
-}
-
-bool Drm::isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
-
- Mutex::Autolock autoLock(mLock);
-
- if (!mFactory || !mFactory->isCryptoSchemeSupported(uuid)) {
- findFactoryForScheme(uuid);
- if (mInitCheck != OK) {
- return false;
- }
- }
-
- if (mimeType != "") {
- return mFactory->isContentTypeSupported(mimeType);
- }
-
- return true;
-}
-
-status_t Drm::createPlugin(const uint8_t uuid[16]) {
- Mutex::Autolock autoLock(mLock);
-
- if (mPlugin != NULL) {
- return -EINVAL;
- }
-
- if (!mFactory || !mFactory->isCryptoSchemeSupported(uuid)) {
- findFactoryForScheme(uuid);
- }
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- status_t result = mFactory->createDrmPlugin(uuid, &mPlugin);
- mPlugin->setListener(this);
- return result;
-}
-
-status_t Drm::destroyPlugin() {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- delete mPlugin;
- mPlugin = NULL;
-
- return OK;
-}
-
-status_t Drm::openSession(Vector<uint8_t> &sessionId) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- status_t err = mPlugin->openSession(sessionId);
- if (err == ERROR_DRM_RESOURCE_BUSY) {
- bool retry = false;
- mLock.unlock();
- // reclaimSession may call back to closeSession, since mLock is shared between Drm
- // instances, we should unlock here to avoid deadlock.
- retry = DrmSessionManager::Instance()->reclaimSession(getCallingPid());
- mLock.lock();
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
- if (retry) {
- err = mPlugin->openSession(sessionId);
- }
- }
- if (err == OK) {
- DrmSessionManager::Instance()->addSession(getCallingPid(), mDrmSessionClient, sessionId);
- }
- return err;
-}
-
-status_t Drm::closeSession(Vector<uint8_t> const &sessionId) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- status_t err = mPlugin->closeSession(sessionId);
- if (err == OK) {
- DrmSessionManager::Instance()->removeSession(sessionId);
- }
- return err;
-}
-
-status_t Drm::getKeyRequest(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &initData,
- String8 const &mimeType, DrmPlugin::KeyType keyType,
- KeyedVector<String8, String8> const &optionalParameters,
- Vector<uint8_t> &request, String8 &defaultUrl,
- DrmPlugin::KeyRequestType *keyRequestType) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->getKeyRequest(sessionId, initData, mimeType, keyType,
- optionalParameters, request, defaultUrl,
- keyRequestType);
-}
-
-status_t Drm::provideKeyResponse(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &response,
- Vector<uint8_t> &keySetId) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->provideKeyResponse(sessionId, response, keySetId);
-}
-
-status_t Drm::removeKeys(Vector<uint8_t> const &keySetId) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->removeKeys(keySetId);
-}
-
-status_t Drm::restoreKeys(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keySetId) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->restoreKeys(sessionId, keySetId);
-}
-
-status_t Drm::queryKeyStatus(Vector<uint8_t> const &sessionId,
- KeyedVector<String8, String8> &infoMap) const {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->queryKeyStatus(sessionId, infoMap);
-}
-
-status_t Drm::getProvisionRequest(String8 const &certType, String8 const &certAuthority,
- Vector<uint8_t> &request, String8 &defaultUrl) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->getProvisionRequest(certType, certAuthority,
- request, defaultUrl);
-}
-
-status_t Drm::provideProvisionResponse(Vector<uint8_t> const &response,
- Vector<uint8_t> &certificate,
- Vector<uint8_t> &wrappedKey) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->provideProvisionResponse(response, certificate, wrappedKey);
-}
-
-status_t Drm::getSecureStops(List<Vector<uint8_t> > &secureStops) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->getSecureStops(secureStops);
-}
-
-status_t Drm::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->getSecureStop(ssid, secureStop);
-}
-
-status_t Drm::releaseSecureStops(Vector<uint8_t> const &ssRelease) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->releaseSecureStops(ssRelease);
-}
-
-status_t Drm::releaseAllSecureStops() {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->releaseAllSecureStops();
-}
-
-status_t Drm::getPropertyString(String8 const &name, String8 &value ) const {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->getPropertyString(name, value);
-}
-
-status_t Drm::getPropertyByteArray(String8 const &name, Vector<uint8_t> &value ) const {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->getPropertyByteArray(name, value);
-}
-
-status_t Drm::setPropertyString(String8 const &name, String8 const &value ) const {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->setPropertyString(name, value);
-}
-
-status_t Drm::setPropertyByteArray(String8 const &name,
- Vector<uint8_t> const &value ) const {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- return mPlugin->setPropertyByteArray(name, value);
-}
-
-
-status_t Drm::setCipherAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->setCipherAlgorithm(sessionId, algorithm);
-}
-
-status_t Drm::setMacAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->setMacAlgorithm(sessionId, algorithm);
-}
-
-status_t Drm::encrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->encrypt(sessionId, keyId, input, iv, output);
-}
-
-status_t Drm::decrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->decrypt(sessionId, keyId, input, iv, output);
-}
-
-status_t Drm::sign(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> &signature) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->sign(sessionId, keyId, message, signature);
-}
-
-status_t Drm::verify(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &signature,
- bool &match) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->verify(sessionId, keyId, message, signature, match);
-}
-
-status_t Drm::signRSA(Vector<uint8_t> const &sessionId,
- String8 const &algorithm,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &wrappedKey,
- Vector<uint8_t> &signature) {
- Mutex::Autolock autoLock(mLock);
-
- if (mInitCheck != OK) {
- return mInitCheck;
- }
-
- if (mPlugin == NULL) {
- return -EINVAL;
- }
-
- if (!checkPermission("android.permission.ACCESS_DRM_CERTIFICATES")) {
- return -EPERM;
- }
-
- DrmSessionManager::Instance()->useSession(sessionId);
-
- return mPlugin->signRSA(sessionId, algorithm, message, wrappedKey, signature);
-}
-
-void Drm::binderDied(const wp<IBinder> &the_late_who __unused)
-{
- mEventLock.lock();
- mListener.clear();
- mEventLock.unlock();
-
- Mutex::Autolock autoLock(mLock);
- delete mPlugin;
- mPlugin = NULL;
- closeFactory();
-}
-
-void Drm::writeByteArray(Parcel &obj, Vector<uint8_t> const *array)
-{
- if (array && array->size()) {
- obj.writeInt32(array->size());
- obj.write(array->array(), array->size());
- } else {
- obj.writeInt32(0);
- }
-}
-
-} // namespace android
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
new file mode 100644
index 0000000..386546f
--- /dev/null
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -0,0 +1,983 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmHal"
+#include <utils/Log.h>
+
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+
+#include <android/hardware/drm/1.0/IDrmFactory.h>
+#include <android/hardware/drm/1.0/IDrmPlugin.h>
+#include <android/hardware/drm/1.0/types.h>
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <hidl/ServiceManagement.h>
+
+#include <media/DrmHal.h>
+#include <media/DrmSessionClientInterface.h>
+#include <media/DrmSessionManager.h>
+#include <media/drm/DrmAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+
+using ::android::hardware::drm::V1_0::EventType;
+using ::android::hardware::drm::V1_0::IDrmFactory;
+using ::android::hardware::drm::V1_0::IDrmPlugin;
+using ::android::hardware::drm::V1_0::KeyedVector;
+using ::android::hardware::drm::V1_0::KeyRequestType;
+using ::android::hardware::drm::V1_0::KeyStatus;
+using ::android::hardware::drm::V1_0::KeyStatusType;
+using ::android::hardware::drm::V1_0::KeyType;
+using ::android::hardware::drm::V1_0::KeyValue;
+using ::android::hardware::drm::V1_0::SecureStop;
+using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hidl::manager::V1_0::IServiceManager;
+using ::android::sp;
+
+namespace android {
+
+static inline int getCallingPid() {
+ return IPCThreadState::self()->getCallingPid();
+}
+
+static bool checkPermission(const char* permissionString) {
+ if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
+ bool ok = checkCallingPermission(String16(permissionString));
+ if (!ok) ALOGE("Request requires %s", permissionString);
+ return ok;
+}
+
+static const Vector<uint8_t> toVector(const hidl_vec<uint8_t> &vec) {
+ Vector<uint8_t> vector;
+ vector.appendArray(vec.data(), vec.size());
+ return *const_cast<const Vector<uint8_t> *>(&vector);
+}
+
+static hidl_vec<uint8_t> toHidlVec(const Vector<uint8_t> &vector) {
+ hidl_vec<uint8_t> vec;
+ vec.setToExternal(const_cast<uint8_t *>(vector.array()), vector.size());
+ return vec;
+}
+
+static String8 toString8(const hidl_string &string) {
+ return String8(string.c_str());
+}
+
+static hidl_string toHidlString(const String8& string) {
+ return hidl_string(string.string());
+}
+
+
+static ::KeyedVector toHidlKeyedVector(const KeyedVector<String8, String8>&
+ keyedVector) {
+ std::vector<KeyValue> stdKeyedVector;
+ for (size_t i = 0; i < keyedVector.size(); i++) {
+ KeyValue keyValue;
+ keyValue.key = toHidlString(keyedVector.keyAt(i));
+ keyValue.value = toHidlString(keyedVector.valueAt(i));
+ stdKeyedVector.push_back(keyValue);
+ }
+ return ::KeyedVector(stdKeyedVector);
+}
+
+static KeyedVector<String8, String8> toKeyedVector(const ::KeyedVector&
+ hKeyedVector) {
+ KeyedVector<String8, String8> keyedVector;
+ for (size_t i = 0; i < hKeyedVector.size(); i++) {
+ keyedVector.add(toString8(hKeyedVector[i].key),
+ toString8(hKeyedVector[i].value));
+ }
+ return keyedVector;
+}
+
+static List<Vector<uint8_t>> toSecureStops(const hidl_vec<SecureStop>&
+ hSecureStops) {
+ List<Vector<uint8_t>> secureStops;
+ for (size_t i = 0; i < hSecureStops.size(); i++) {
+ secureStops.push_back(toVector(hSecureStops[i].opaqueData));
+ }
+ return secureStops;
+}
+
+static status_t toStatusT(Status status) {
+ switch (status) {
+ case Status::OK:
+ return OK;
+ break;
+ case Status::ERROR_DRM_NO_LICENSE:
+ return ERROR_DRM_NO_LICENSE;
+ break;
+ case Status::ERROR_DRM_LICENSE_EXPIRED:
+ return ERROR_DRM_LICENSE_EXPIRED;
+ break;
+ case Status::ERROR_DRM_SESSION_NOT_OPENED:
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ break;
+ case Status::ERROR_DRM_CANNOT_HANDLE:
+ return ERROR_DRM_CANNOT_HANDLE;
+ break;
+ case Status::ERROR_DRM_INVALID_STATE:
+ return ERROR_DRM_TAMPER_DETECTED;
+ break;
+ case Status::BAD_VALUE:
+ return BAD_VALUE;
+ break;
+ case Status::ERROR_DRM_NOT_PROVISIONED:
+ return ERROR_DRM_NOT_PROVISIONED;
+ break;
+ case Status::ERROR_DRM_RESOURCE_BUSY:
+ return ERROR_DRM_RESOURCE_BUSY;
+ break;
+ case Status::ERROR_DRM_DEVICE_REVOKED:
+ return ERROR_DRM_DEVICE_REVOKED;
+ break;
+ case Status::ERROR_DRM_UNKNOWN:
+ default:
+ return ERROR_DRM_UNKNOWN;
+ break;
+ }
+}
+
+
+Mutex DrmHal::mLock;
+
+struct DrmSessionClient : public DrmSessionClientInterface {
+ explicit DrmSessionClient(DrmHal* drm) : mDrm(drm) {}
+
+ virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
+ sp<DrmHal> drm = mDrm.promote();
+ if (drm == NULL) {
+ return true;
+ }
+ status_t err = drm->closeSession(sessionId);
+ if (err != OK) {
+ return false;
+ }
+ drm->sendEvent(EventType::SESSION_RECLAIMED,
+ toHidlVec(sessionId), hidl_vec<uint8_t>());
+ return true;
+ }
+
+protected:
+ virtual ~DrmSessionClient() {}
+
+private:
+ wp<DrmHal> mDrm;
+
+ DISALLOW_EVIL_CONSTRUCTORS(DrmSessionClient);
+};
+
+DrmHal::DrmHal()
+ : mDrmSessionClient(new DrmSessionClient(this)),
+ mFactories(makeDrmFactories()),
+ mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT) {
+}
+
+DrmHal::~DrmHal() {
+ DrmSessionManager::Instance()->removeDrm(mDrmSessionClient);
+}
+
+Vector<sp<IDrmFactory>> DrmHal::makeDrmFactories() {
+ Vector<sp<IDrmFactory>> factories;
+
+ auto manager = hardware::defaultServiceManager();
+
+ if (manager != NULL) {
+ manager->listByInterface(IDrmFactory::descriptor,
+ [&factories](const hidl_vec<hidl_string> ®istered) {
+ for (const auto &instance : registered) {
+ auto factory = IDrmFactory::getService(instance);
+ if (factory != NULL) {
+ factories.push_back(factory);
+ ALOGI("makeDrmFactories: factory instance %s is %s",
+ instance.c_str(),
+ factory->isRemote() ? "Remote" : "Not Remote");
+ }
+ }
+ }
+ );
+ }
+
+ if (factories.size() == 0) {
+ // must be in passthrough mode, load the default passthrough service
+ auto passthrough = IDrmFactory::getService();
+ if (passthrough != NULL) {
+ ALOGI("makeDrmFactories: using default drm instance");
+ factories.push_back(passthrough);
+ } else {
+ ALOGE("Failed to find any drm factories");
+ }
+ }
+ return factories;
+}
+
+sp<IDrmPlugin> DrmHal::makeDrmPlugin(const sp<IDrmFactory>& factory,
+ const uint8_t uuid[16], const String8& appPackageName) {
+
+ sp<IDrmPlugin> plugin;
+ Return<void> hResult = factory->createPlugin(uuid, appPackageName.string(),
+ [&](Status status, const sp<IDrmPlugin>& hPlugin) {
+ if (status != Status::OK) {
+ ALOGE("Failed to make drm plugin");
+ return;
+ }
+ plugin = hPlugin;
+ }
+ );
+ return plugin;
+}
+
+status_t DrmHal::initCheck() const {
+ return mInitCheck;
+}
+
+status_t DrmHal::setListener(const sp<IDrmClient>& listener)
+{
+ Mutex::Autolock lock(mEventLock);
+ if (mListener != NULL){
+ IInterface::asBinder(mListener)->unlinkToDeath(this);
+ }
+ if (listener != NULL) {
+ IInterface::asBinder(listener)->linkToDeath(this);
+ }
+ mListener = listener;
+ return NO_ERROR;
+}
+
+Return<void> DrmHal::sendEvent(EventType hEventType,
+ const hidl_vec<uint8_t>& sessionId, const hidl_vec<uint8_t>& data) {
+
+ mEventLock.lock();
+ sp<IDrmClient> listener = mListener;
+ mEventLock.unlock();
+
+ if (listener != NULL) {
+ Parcel obj;
+ writeByteArray(obj, sessionId);
+ writeByteArray(obj, data);
+
+ Mutex::Autolock lock(mNotifyLock);
+ DrmPlugin::EventType eventType;
+ switch(hEventType) {
+ case EventType::PROVISION_REQUIRED:
+ eventType = DrmPlugin::kDrmPluginEventProvisionRequired;
+ break;
+ case EventType::KEY_NEEDED:
+ eventType = DrmPlugin::kDrmPluginEventKeyNeeded;
+ break;
+ case EventType::KEY_EXPIRED:
+ eventType = DrmPlugin::kDrmPluginEventKeyExpired;
+ break;
+ case EventType::VENDOR_DEFINED:
+ eventType = DrmPlugin::kDrmPluginEventVendorDefined;
+ break;
+ case EventType::SESSION_RECLAIMED:
+ eventType = DrmPlugin::kDrmPluginEventSessionReclaimed;
+ break;
+ default:
+ return Void();
+ }
+ listener->notify(eventType, 0, &obj);
+ }
+ return Void();
+}
+
+Return<void> DrmHal::sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId,
+ int64_t expiryTimeInMS) {
+
+ mEventLock.lock();
+ sp<IDrmClient> listener = mListener;
+ mEventLock.unlock();
+
+ if (listener != NULL) {
+ Parcel obj;
+ writeByteArray(obj, sessionId);
+ obj.writeInt64(expiryTimeInMS);
+
+ Mutex::Autolock lock(mNotifyLock);
+ listener->notify(DrmPlugin::kDrmPluginEventExpirationUpdate, 0, &obj);
+ }
+ return Void();
+}
+
+Return<void> DrmHal::sendKeysChange(const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey) {
+
+ mEventLock.lock();
+ sp<IDrmClient> listener = mListener;
+ mEventLock.unlock();
+
+ if (listener != NULL) {
+ Parcel obj;
+ writeByteArray(obj, sessionId);
+
+ size_t nKeys = keyStatusList.size();
+ obj.writeInt32(nKeys);
+ for (size_t i = 0; i < nKeys; ++i) {
+ const KeyStatus &keyStatus = keyStatusList[i];
+ writeByteArray(obj, keyStatus.keyId);
+ uint32_t type;
+ switch(keyStatus.type) {
+ case KeyStatusType::USABLE:
+ type = DrmPlugin::kKeyStatusType_Usable;
+ break;
+ case KeyStatusType::EXPIRED:
+ type = DrmPlugin::kKeyStatusType_Expired;
+ break;
+ case KeyStatusType::OUTPUTNOTALLOWED:
+ type = DrmPlugin::kKeyStatusType_OutputNotAllowed;
+ break;
+ case KeyStatusType::STATUSPENDING:
+ type = DrmPlugin::kKeyStatusType_StatusPending;
+ break;
+ case KeyStatusType::INTERNALERROR:
+ default:
+ type = DrmPlugin::kKeyStatusType_InternalError;
+ break;
+ }
+ obj.writeInt32(type);
+ }
+ obj.writeInt32(hasNewUsableKey);
+
+ Mutex::Autolock lock(mNotifyLock);
+ listener->notify(DrmPlugin::kDrmPluginEventKeysChange, 0, &obj);
+ }
+ return Void();
+}
+
+bool DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
+ Mutex::Autolock autoLock(mLock);
+
+ for (size_t i = 0; i < mFactories.size(); i++) {
+ if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
+ if (mimeType != "") {
+ if (mFactories[i]->isContentTypeSupported(mimeType.string())) {
+ return true;
+ }
+ } else {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+status_t DrmHal::createPlugin(const uint8_t uuid[16],
+ const String8& appPackageName) {
+ Mutex::Autolock autoLock(mLock);
+
+ for (size_t i = 0; i < mFactories.size(); i++) {
+ if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
+ mPlugin = makeDrmPlugin(mFactories[i], uuid, appPackageName);
+ }
+ }
+
+ if (mPlugin == NULL) {
+ mInitCheck = ERROR_UNSUPPORTED;
+ } else {
+ mInitCheck = OK;
+ mPlugin->setListener(this);
+ }
+
+ return mInitCheck;
+}
+
+status_t DrmHal::destroyPlugin() {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ setListener(NULL);
+ if (mPlugin != NULL) {
+ mPlugin->setListener(NULL);
+ }
+ mPlugin.clear();
+ mInitCheck = NO_INIT;
+
+ return OK;
+}
+
+status_t DrmHal::openSession(Vector<uint8_t> &sessionId) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+
+ bool retry = true;
+ do {
+ hidl_vec<uint8_t> hSessionId;
+
+ Return<void> hResult = mPlugin->openSession(
+ [&](Status status, const hidl_vec<uint8_t>& id) {
+ if (status == Status::OK) {
+ sessionId = toVector(id);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ if (!hResult.isOk()) {
+ err = DEAD_OBJECT;
+ }
+
+ if (err == ERROR_DRM_RESOURCE_BUSY && retry) {
+ mLock.unlock();
+ // reclaimSession may call back to closeSession, since mLock is
+ // shared between Drm instances, we should unlock here to avoid
+ // deadlock.
+ retry = DrmSessionManager::Instance()->reclaimSession(getCallingPid());
+ mLock.lock();
+ } else {
+ retry = false;
+ }
+ } while (retry);
+
+ if (err == OK) {
+ DrmSessionManager::Instance()->addSession(getCallingPid(),
+ mDrmSessionClient, sessionId);
+ }
+ return err;
+}
+
+status_t DrmHal::closeSession(Vector<uint8_t> const &sessionId) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ Status status = mPlugin->closeSession(toHidlVec(sessionId));
+ if (status == Status::OK) {
+ DrmSessionManager::Instance()->removeSession(sessionId);
+ }
+ return toStatusT(status);
+}
+
+status_t DrmHal::getKeyRequest(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &initData, String8 const &mimeType,
+ DrmPlugin::KeyType keyType, KeyedVector<String8,
+ String8> const &optionalParameters, Vector<uint8_t> &request,
+ String8 &defaultUrl, DrmPlugin::KeyRequestType *keyRequestType) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ KeyType hKeyType;
+ if (keyType == DrmPlugin::kKeyType_Streaming) {
+ hKeyType = KeyType::STREAMING;
+ } else if (keyType == DrmPlugin::kKeyType_Offline) {
+ hKeyType = KeyType::OFFLINE;
+ } else if (keyType == DrmPlugin::kKeyType_Release) {
+ hKeyType = KeyType::RELEASE;
+ } else {
+ return BAD_VALUE;
+ }
+
+ ::KeyedVector hOptionalParameters = toHidlKeyedVector(optionalParameters);
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->getKeyRequest(toHidlVec(sessionId),
+ toHidlVec(initData), toHidlString(mimeType), hKeyType, hOptionalParameters,
+ [&](Status status, const hidl_vec<uint8_t>& hRequest,
+ KeyRequestType hKeyRequestType, const hidl_string& hDefaultUrl) {
+
+ if (status == Status::OK) {
+ request = toVector(hRequest);
+ defaultUrl = toString8(hDefaultUrl);
+
+ switch (hKeyRequestType) {
+ case KeyRequestType::INITIAL:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Initial;
+ break;
+ case KeyRequestType::RENEWAL:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Renewal;
+ break;
+ case KeyRequestType::RELEASE:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Release;
+ break;
+ default:
+ *keyRequestType = DrmPlugin::kKeyRequestType_Unknown;
+ break;
+ }
+ err = toStatusT(status);
+ }
+ });
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::provideKeyResponse(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &response, Vector<uint8_t> &keySetId) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->provideKeyResponse(toHidlVec(sessionId),
+ toHidlVec(response),
+ [&](Status status, const hidl_vec<uint8_t>& hKeySetId) {
+ if (status == Status::OK) {
+ keySetId = toVector(hKeySetId);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::removeKeys(Vector<uint8_t> const &keySetId) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ return toStatusT(mPlugin->removeKeys(toHidlVec(keySetId)));
+}
+
+status_t DrmHal::restoreKeys(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keySetId) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ return toStatusT(mPlugin->restoreKeys(toHidlVec(sessionId),
+ toHidlVec(keySetId)));
+}
+
+status_t DrmHal::queryKeyStatus(Vector<uint8_t> const &sessionId,
+ KeyedVector<String8, String8> &infoMap) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ ::KeyedVector hInfoMap;
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->queryKeyStatus(toHidlVec(sessionId),
+ [&](Status status, const hidl_vec<KeyValue>& map) {
+ if (status == Status::OK) {
+ infoMap = toKeyedVector(map);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::getProvisionRequest(String8 const &certType,
+ String8 const &certAuthority, Vector<uint8_t> &request,
+ String8 &defaultUrl) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->getProvisionRequest(
+ toHidlString(certType), toHidlString(certAuthority),
+ [&](Status status, const hidl_vec<uint8_t>& hRequest,
+ const hidl_string& hDefaultUrl) {
+ if (status == Status::OK) {
+ request = toVector(hRequest);
+ defaultUrl = toString8(hDefaultUrl);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::provideProvisionResponse(Vector<uint8_t> const &response,
+ Vector<uint8_t> &certificate, Vector<uint8_t> &wrappedKey) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->provideProvisionResponse(toHidlVec(response),
+ [&](Status status, const hidl_vec<uint8_t>& hCertificate,
+ const hidl_vec<uint8_t>& hWrappedKey) {
+ if (status == Status::OK) {
+ certificate = toVector(hCertificate);
+ wrappedKey = toVector(hWrappedKey);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::getSecureStops(List<Vector<uint8_t>> &secureStops) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->getSecureStops(
+ [&](Status status, const hidl_vec<SecureStop>& hSecureStops) {
+ if (status == Status::OK) {
+ secureStops = toSecureStops(hSecureStops);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+
+status_t DrmHal::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->getSecureStop(toHidlVec(ssid),
+ [&](Status status, const SecureStop& hSecureStop) {
+ if (status == Status::OK) {
+ secureStop = toVector(hSecureStop.opaqueData);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::releaseSecureStops(Vector<uint8_t> const &ssRelease) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ return toStatusT(mPlugin->releaseSecureStop(toHidlVec(ssRelease)));
+}
+
+status_t DrmHal::releaseAllSecureStops() {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ return toStatusT(mPlugin->releaseAllSecureStops());
+}
+
+status_t DrmHal::getPropertyString(String8 const &name, String8 &value ) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->getPropertyString(toHidlString(name),
+ [&](Status status, const hidl_string& hValue) {
+ if (status == Status::OK) {
+ value = toString8(hValue);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::getPropertyByteArray(String8 const &name, Vector<uint8_t> &value ) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->getPropertyByteArray(toHidlString(name),
+ [&](Status status, const hidl_vec<uint8_t>& hValue) {
+ if (status == Status::OK) {
+ value = toVector(hValue);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::setPropertyString(String8 const &name, String8 const &value ) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ Status status = mPlugin->setPropertyString(toHidlString(name),
+ toHidlString(value));
+ return toStatusT(status);
+}
+
+status_t DrmHal::setPropertyByteArray(String8 const &name,
+ Vector<uint8_t> const &value ) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ Status status = mPlugin->setPropertyByteArray(toHidlString(name),
+ toHidlVec(value));
+ return toStatusT(status);
+}
+
+
+status_t DrmHal::setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ Status status = mPlugin->setCipherAlgorithm(toHidlVec(sessionId),
+ toHidlString(algorithm));
+ return toStatusT(status);
+}
+
+status_t DrmHal::setMacAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ Status status = mPlugin->setMacAlgorithm(toHidlVec(sessionId),
+ toHidlString(algorithm));
+ return toStatusT(status);
+}
+
+status_t DrmHal::encrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->encrypt(toHidlVec(sessionId),
+ toHidlVec(keyId), toHidlVec(input), toHidlVec(iv),
+ [&](Status status, const hidl_vec<uint8_t>& hOutput) {
+ if (status == Status::OK) {
+ output = toVector(hOutput);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::decrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId, Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv, Vector<uint8_t> &output) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->decrypt(toHidlVec(sessionId),
+ toHidlVec(keyId), toHidlVec(input), toHidlVec(iv),
+ [&](Status status, const hidl_vec<uint8_t>& hOutput) {
+ if (status == Status::OK) {
+ output = toVector(hOutput);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::sign(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
+ Vector<uint8_t> &signature) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->sign(toHidlVec(sessionId),
+ toHidlVec(keyId), toHidlVec(message),
+ [&](Status status, const hidl_vec<uint8_t>& hSignature) {
+ if (status == Status::OK) {
+ signature = toVector(hSignature);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::verify(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId, Vector<uint8_t> const &message,
+ Vector<uint8_t> const &signature, bool &match) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->verify(toHidlVec(sessionId),toHidlVec(keyId),
+ toHidlVec(message), toHidlVec(signature),
+ [&](Status status, bool hMatch) {
+ if (status == Status::OK) {
+ match = hMatch;
+ } else {
+ match = false;
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+status_t DrmHal::signRSA(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm, Vector<uint8_t> const &message,
+ Vector<uint8_t> const &wrappedKey, Vector<uint8_t> &signature) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ if (!checkPermission("android.permission.ACCESS_DRM_CERTIFICATES")) {
+ return -EPERM;
+ }
+
+ DrmSessionManager::Instance()->useSession(sessionId);
+
+ status_t err = UNKNOWN_ERROR;
+
+ Return<void> hResult = mPlugin->signRSA(toHidlVec(sessionId),
+ toHidlString(algorithm), toHidlVec(message), toHidlVec(wrappedKey),
+ [&](Status status, const hidl_vec<uint8_t>& hSignature) {
+ if (status == Status::OK) {
+ signature = toVector(hSignature);
+ }
+ err = toStatusT(status);
+ }
+ );
+
+ return hResult.isOk() ? err : DEAD_OBJECT;
+}
+
+void DrmHal::binderDied(const wp<IBinder> &the_late_who __unused)
+{
+ Mutex::Autolock autoLock(mLock);
+ setListener(NULL);
+ if (mPlugin != NULL) {
+ mPlugin->setListener(NULL);
+ }
+ mPlugin.clear();
+ mInitCheck = NO_INIT;
+}
+
+void DrmHal::writeByteArray(Parcel &obj, hidl_vec<uint8_t> const &vec)
+{
+ if (vec.size()) {
+ obj.writeInt32(vec.size());
+ obj.write(vec.data(), vec.size());
+ } else {
+ obj.writeInt32(0);
+ }
+}
+
+} // namespace android
diff --git a/drm/libmediadrm/DrmPluginPath.cpp b/drm/libmediadrm/DrmPluginPath.cpp
new file mode 100644
index 0000000..c760825
--- /dev/null
+++ b/drm/libmediadrm/DrmPluginPath.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmPluginPath"
+#include <utils/Log.h>
+
+#include <cutils/properties.h>
+#include <media/DrmPluginPath.h>
+
+namespace android {
+
+const char* getDrmPluginPath() {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("drm.64bit.enabled", value, NULL) == 0) {
+ return "/vendor/lib/mediadrm";
+ } else {
+ return "/vendor/lib64/mediadrm";
+ }
+}
+
+} // namespace android
diff --git a/drm/libmediadrm/ICrypto.cpp b/drm/libmediadrm/ICrypto.cpp
index 7b261be..8506d95 100644
--- a/drm/libmediadrm/ICrypto.cpp
+++ b/drm/libmediadrm/ICrypto.cpp
@@ -36,6 +36,8 @@
DECRYPT,
NOTIFY_RESOLUTION,
SET_MEDIADRM_SESSION,
+ SET_HEAP,
+ UNSET_HEAP,
};
struct BpCrypto : public BpInterface<ICrypto> {
@@ -94,18 +96,13 @@
return reply.readInt32() != 0;
}
- virtual ssize_t decrypt(
- DestinationType dstType,
- const uint8_t key[16],
- const uint8_t iv[16],
+ virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
- const sp<IMemory> &sharedBuffer, size_t offset,
+ const SourceBuffer &source, size_t offset,
const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
- void *dstPtr,
- AString *errorDetailMsg) {
+ const DestinationBuffer &destination, AString *errorDetailMsg) {
Parcel data, reply;
data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
- data.writeInt32((int32_t)dstType);
data.writeInt32(mode);
data.writeInt32(pattern.mEncryptBlocks);
data.writeInt32(pattern.mSkipBlocks);
@@ -130,18 +127,24 @@
}
data.writeInt32(totalSize);
- data.writeStrongBinder(IInterface::asBinder(sharedBuffer));
+ data.writeStrongBinder(IInterface::asBinder(source.mSharedMemory));
+ data.writeInt32(source.mHeapSeqNum);
data.writeInt32(offset);
data.writeInt32(numSubSamples);
data.write(subSamples, sizeof(CryptoPlugin::SubSample) * numSubSamples);
- if (dstType == kDestinationTypeNativeHandle) {
- data.writeNativeHandle(static_cast<native_handle_t *>(dstPtr));
- } else if (dstType == kDestinationTypeOpaqueHandle) {
- data.writeInt64(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(dstPtr)));
+ data.writeInt32((int32_t)destination.mType);
+ if (destination.mType == kDestinationTypeNativeHandle) {
+ if (destination.mHandle == NULL) {
+ return BAD_VALUE;
+ }
+ data.writeNativeHandle(destination.mHandle);
} else {
- dstType = kDestinationTypeVmPointer;
+ if (destination.mSharedMemory == NULL) {
+ return BAD_VALUE;
+ }
+ data.writeStrongBinder(IInterface::asBinder(destination.mSharedMemory));
}
remote()->transact(DECRYPT, data, &reply);
@@ -149,11 +152,10 @@
ssize_t result = reply.readInt32();
if (isCryptoError(result)) {
- errorDetailMsg->setTo(reply.readCString());
- } else if (dstType == kDestinationTypeVmPointer) {
- // For the non-secure case, copy the decrypted
- // data from shared memory to its final destination
- memcpy(dstPtr, sharedBuffer->pointer(), result);
+ AString msg = reply.readCString();
+ if (errorDetailMsg) {
+ *errorDetailMsg = msg;
+ }
}
return result;
@@ -178,6 +180,30 @@
return reply.readInt32();
}
+ virtual int32_t setHeap(const sp<IMemoryHeap> &heap) {
+ Parcel data, reply;
+ data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
+ data.writeStrongBinder(IInterface::asBinder(heap));
+ status_t err = remote()->transact(SET_HEAP, data, &reply);
+ if (err != NO_ERROR) {
+ return -1;
+ }
+ int32_t seqNum;
+ if (reply.readInt32(&seqNum) != NO_ERROR) {
+ return -1;
+ }
+ return seqNum;
+ }
+
+ virtual void unsetHeap(int32_t seqNum) {
+ Parcel data, reply;
+ data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
+ data.writeInt32(seqNum);
+ remote()->transact(UNSET_HEAP, data, &reply);
+ return;
+ }
+
+
private:
void readVector(Parcel &reply, Vector<uint8_t> &vector) const {
uint32_t size = reply.readInt32();
@@ -239,17 +265,21 @@
size_t opaqueSize = data.readInt32();
void *opaqueData = NULL;
- if (opaqueSize > 0) {
- opaqueData = malloc(opaqueSize);
- data.read(opaqueData, opaqueSize);
+ const size_t kMaxOpaqueSize = 100 * 1024;
+ if (opaqueSize > kMaxOpaqueSize) {
+ return BAD_VALUE;
}
+ opaqueData = malloc(opaqueSize);
+ if (NULL == opaqueData) {
+ return NO_MEMORY;
+ }
+
+ data.read(opaqueData, opaqueSize);
reply->writeInt32(createPlugin(uuid, opaqueData, opaqueSize));
- if (opaqueData != NULL) {
- free(opaqueData);
- opaqueData = NULL;
- }
+ free(opaqueData);
+ opaqueData = NULL;
return OK;
}
@@ -280,7 +310,6 @@
{
CHECK_INTERFACE(ICrypto, data, reply);
- DestinationType dstType = (DestinationType)data.readInt32();
CryptoPlugin::Mode mode = (CryptoPlugin::Mode)data.readInt32();
CryptoPlugin::Pattern pattern;
pattern.mEncryptBlocks = data.readInt32();
@@ -293,34 +322,46 @@
data.read(iv, sizeof(iv));
size_t totalSize = data.readInt32();
- sp<IMemory> sharedBuffer =
+
+ SourceBuffer source;
+
+ source.mSharedMemory =
interface_cast<IMemory>(data.readStrongBinder());
- if (sharedBuffer == NULL) {
+ if (source.mSharedMemory == NULL) {
reply->writeInt32(BAD_VALUE);
return OK;
}
+ source.mHeapSeqNum = data.readInt32();
+
int32_t offset = data.readInt32();
int32_t numSubSamples = data.readInt32();
+ if (numSubSamples < 0 || numSubSamples > 0xffff) {
+ reply->writeInt32(BAD_VALUE);
+ return OK;
+ }
CryptoPlugin::SubSample *subSamples =
- new CryptoPlugin::SubSample[numSubSamples];
+ new CryptoPlugin::SubSample[numSubSamples];
- data.read(
- subSamples,
+ data.read(subSamples,
sizeof(CryptoPlugin::SubSample) * numSubSamples);
- native_handle_t *nativeHandle = NULL;
- void *secureBufferId = NULL, *dstPtr;
- if (dstType == kDestinationTypeNativeHandle) {
- nativeHandle = data.readNativeHandle();
- dstPtr = static_cast<void *>(nativeHandle);
- } else if (dstType == kDestinationTypeOpaqueHandle) {
- secureBufferId = reinterpret_cast<void *>(static_cast<uintptr_t>(data.readInt64()));
- dstPtr = secureBufferId;
- } else {
- dstType = kDestinationTypeVmPointer;
- dstPtr = malloc(totalSize);
+ DestinationBuffer destination;
+ destination.mType = (DestinationType)data.readInt32();
+ if (destination.mType == kDestinationTypeNativeHandle) {
+ destination.mHandle = data.readNativeHandle();
+ if (destination.mHandle == NULL) {
+ reply->writeInt32(BAD_VALUE);
+ return OK;
+ }
+ } else if (destination.mType == kDestinationTypeSharedMemory) {
+ destination.mSharedMemory =
+ interface_cast<IMemory>(data.readStrongBinder());
+ if (destination.mSharedMemory == NULL) {
+ reply->writeInt32(BAD_VALUE);
+ return OK;
+ }
}
AString errorDetailMsg;
@@ -344,20 +385,13 @@
if (overflow || sumSubsampleSizes != totalSize) {
result = -EINVAL;
- } else if (totalSize > sharedBuffer->size()) {
+ } else if (totalSize > source.mSharedMemory->size()) {
result = -EINVAL;
- } else if ((size_t)offset > sharedBuffer->size() - totalSize) {
+ } else if ((size_t)offset > source.mSharedMemory->size() - totalSize) {
result = -EINVAL;
} else {
- result = decrypt(
- dstType,
- key,
- iv,
- mode, pattern,
- sharedBuffer, offset,
- subSamples, numSubSamples,
- dstPtr,
- &errorDetailMsg);
+ result = decrypt(key, iv, mode, pattern, source, offset,
+ subSamples, numSubSamples, destination, &errorDetailMsg);
}
reply->writeInt32(result);
@@ -366,23 +400,12 @@
reply->writeCString(errorDetailMsg.c_str());
}
- if (dstType == kDestinationTypeVmPointer) {
- if (result >= 0) {
- CHECK_LE(result, static_cast<ssize_t>(totalSize));
- // For the non-secure case, pass the decrypted
- // data back via the shared buffer rather than
- // copying it separately over binder to avoid
- // binder's 1MB limit.
- memcpy(sharedBuffer->pointer(), dstPtr, result);
- }
- free(dstPtr);
- dstPtr = NULL;
- } else if (dstType == kDestinationTypeNativeHandle) {
+ if (destination.mType == kDestinationTypeNativeHandle) {
int err;
- if ((err = native_handle_close(nativeHandle)) < 0) {
+ if ((err = native_handle_close(destination.mHandle)) < 0) {
ALOGW("secure buffer native_handle_close failed: %d", err);
}
- if ((err = native_handle_delete(nativeHandle)) < 0) {
+ if ((err = native_handle_delete(destination.mHandle)) < 0) {
ALOGW("secure buffer native_handle_delete failed: %d", err);
}
}
@@ -413,6 +436,23 @@
return OK;
}
+ case SET_HEAP:
+ {
+ CHECK_INTERFACE(ICrypto, data, reply);
+ sp<IMemoryHeap> heap =
+ interface_cast<IMemoryHeap>(data.readStrongBinder());
+ reply->writeInt32(setHeap(heap));
+ return OK;
+ }
+
+ case UNSET_HEAP:
+ {
+ CHECK_INTERFACE(ICrypto, data, reply);
+ int32_t seqNum = data.readInt32();
+ unsetHeap(seqNum);
+ return OK;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/drm/libmediadrm/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
index c4558c6..4e47112 100644
--- a/drm/libmediadrm/IDrm.cpp
+++ b/drm/libmediadrm/IDrm.cpp
@@ -88,13 +88,15 @@
return reply.readInt32() != 0;
}
- virtual status_t createPlugin(const uint8_t uuid[16]) {
+ virtual status_t createPlugin(const uint8_t uuid[16],
+ const String8& appPackageName) {
Parcel data, reply;
data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
data.write(uuid, 16);
-
+ data.writeString8(appPackageName);
status_t status = remote()->transact(CREATE_PLUGIN, data, &reply);
if (status != OK) {
+ ALOGE("createPlugin: binder call failed: %d", status);
return status;
}
@@ -585,7 +587,6 @@
data.read(uuid, sizeof(uuid));
String8 mimeType = data.readString8();
reply->writeInt32(isCryptoSchemeSupported(uuid, mimeType));
-
return OK;
}
@@ -594,7 +595,8 @@
CHECK_INTERFACE(IDrm, data, reply);
uint8_t uuid[16];
data.read(uuid, sizeof(uuid));
- reply->writeInt32(createPlugin(uuid));
+ String8 appPackageName = data.readString8();
+ reply->writeInt32(createPlugin(uuid, appPackageName));
return OK;
}
diff --git a/drm/libmediadrm/IMediaDrmService.cpp b/drm/libmediadrm/IMediaDrmService.cpp
index 9b6ecfd..84812dc 100644
--- a/drm/libmediadrm/IMediaDrmService.cpp
+++ b/drm/libmediadrm/IMediaDrmService.cpp
@@ -37,7 +37,7 @@
class BpMediaDrmService: public BpInterface<IMediaDrmService>
{
public:
- BpMediaDrmService(const sp<IBinder>& impl)
+ explicit BpMediaDrmService(const sp<IBinder>& impl)
: BpInterface<IMediaDrmService>(impl)
{
}
diff --git a/drm/libmediadrm/MediaCasDefs.cpp b/drm/libmediadrm/MediaCasDefs.cpp
new file mode 100644
index 0000000..9c2ba38
--- /dev/null
+++ b/drm/libmediadrm/MediaCasDefs.cpp
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCas"
+
+#include <media/MediaCasDefs.h>
+#include <utils/Log.h>
+#include <binder/IMemory.h>
+
+namespace android {
+namespace media {
+
+///////////////////////////////////////////////////////////////////////////////
+namespace MediaCas {
+
+status_t ParcelableCasData::readFromParcel(const Parcel* parcel) {
+ return parcel->readByteVector(this);
+}
+
+status_t ParcelableCasData::writeToParcel(Parcel* parcel) const {
+ return parcel->writeByteVector(*this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+status_t ParcelableCasPluginDescriptor::readFromParcel(const Parcel* /*parcel*/) {
+ ALOGE("CAPluginDescriptor::readFromParcel() shouldn't be called");
+ return INVALID_OPERATION;
+}
+
+status_t ParcelableCasPluginDescriptor::writeToParcel(Parcel* parcel) const {
+ status_t err = parcel->writeInt32(mCASystemId);
+ if (err != NO_ERROR) {
+ return err;
+ }
+ return parcel->writeString16(mName);
+}
+
+} // namespace MediaCas
+///////////////////////////////////////////////////////////////////////////////
+
+namespace MediaDescrambler {
+
+DescrambleInfo::DescrambleInfo() {}
+
+DescrambleInfo::~DescrambleInfo() {}
+
+status_t DescrambleInfo::readFromParcel(const Parcel* parcel) {
+ status_t err = parcel->readInt32((int32_t*)&dstType);
+ if (err != OK) {
+ return err;
+ }
+ if (dstType != kDestinationTypeNativeHandle
+ && dstType != kDestinationTypeVmPointer) {
+ return BAD_VALUE;
+ }
+
+ err = parcel->readInt32((int32_t*)&scramblingControl);
+ if (err != OK) {
+ return err;
+ }
+
+ err = parcel->readUint32((uint32_t*)&numSubSamples);
+ if (err != OK) {
+ return err;
+ }
+ if (numSubSamples > 0xffff) {
+ return BAD_VALUE;
+ }
+
+ subSamples = new DescramblerPlugin::SubSample[numSubSamples];
+ if (subSamples == NULL) {
+ return NO_MEMORY;
+ }
+
+ for (size_t i = 0; i < numSubSamples; i++) {
+ err = parcel->readUint32(&subSamples[i].mNumBytesOfClearData);
+ if (err != OK) {
+ return err;
+ }
+ err = parcel->readUint32(&subSamples[i].mNumBytesOfEncryptedData);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ srcMem = interface_cast<IMemory>(parcel->readStrongBinder());
+ if (srcMem == NULL) {
+ return BAD_VALUE;
+ }
+
+ err = parcel->readInt32(&srcOffset);
+ if (err != OK) {
+ return err;
+ }
+
+ native_handle_t *nativeHandle = NULL;
+ if (dstType == kDestinationTypeNativeHandle) {
+ nativeHandle = parcel->readNativeHandle();
+ dstPtr = static_cast<void *>(nativeHandle);
+ } else {
+ dstPtr = NULL;
+ }
+
+ err = parcel->readInt32(&dstOffset);
+ if (err != OK) {
+ return err;
+ }
+
+ return OK;
+}
+
+status_t DescrambleInfo::writeToParcel(Parcel* parcel) const {
+ if (dstType != kDestinationTypeNativeHandle
+ && dstType != kDestinationTypeVmPointer) {
+ return BAD_VALUE;
+ }
+
+ status_t err = parcel->writeInt32((int32_t)dstType);
+ if (err != OK) {
+ return err;
+ }
+
+ err = parcel->writeInt32(scramblingControl);
+ if (err != OK) {
+ return err;
+ }
+
+ err = parcel->writeUint32(numSubSamples);
+ if (err != OK) {
+ return err;
+ }
+
+ for (size_t i = 0; i < numSubSamples; i++) {
+ err = parcel->writeUint32(subSamples[i].mNumBytesOfClearData);
+ if (err != OK) {
+ return err;
+ }
+ err = parcel->writeUint32(subSamples[i].mNumBytesOfEncryptedData);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ err = parcel->writeStrongBinder(IInterface::asBinder(srcMem));
+ if (err != OK) {
+ return err;
+ }
+
+ err = parcel->writeInt32(srcOffset);
+ if (err != OK) {
+ return err;
+ }
+
+ if (dstType == kDestinationTypeNativeHandle) {
+ parcel->writeNativeHandle(static_cast<native_handle_t *>(dstPtr));
+ }
+
+ err = parcel->writeInt32(dstOffset);
+ if (err != OK) {
+ return err;
+ }
+
+ return OK;
+}
+
+} // namespace MediaDescrambler
+
+} // namespace media
+} // namespace android
+
diff --git a/drm/libmediadrm/SharedLibrary.cpp b/drm/libmediadrm/SharedLibrary.cpp
index 74b3a71..bebafa8 100644
--- a/drm/libmediadrm/SharedLibrary.cpp
+++ b/drm/libmediadrm/SharedLibrary.cpp
@@ -43,6 +43,9 @@
if (!mLibHandle) {
return NULL;
}
+ // Clear last error before we load the symbol again,
+ // in case the caller didn't retrieve it.
+ (void)dlerror();
return dlsym(mLibHandle, symbol);
}
diff --git a/drm/libmediadrm/aidl/android/media/ICas.aidl b/drm/libmediadrm/aidl/android/media/ICas.aidl
new file mode 100644
index 0000000..9746593
--- /dev/null
+++ b/drm/libmediadrm/aidl/android/media/ICas.aidl
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaCas;
+
+/** @hide */
+interface ICas {
+ void setPrivateData(in byte[] pvtData);
+ byte[] openSession();
+ void closeSession(in byte[] sessionId);
+ void setSessionPrivateData(in byte[] sessionId, in byte[] pvtData);
+ void processEcm(in byte[] sessionId, in MediaCas.ParcelableCasData ecm);
+ void processEmm(in MediaCas.ParcelableCasData emm);
+ void sendEvent(int event, int arg, in @nullable byte[] eventData);
+ void provision(String provisionString);
+ void refreshEntitlements(int refreshType, in @nullable byte[] refreshData);
+ void release();
+}
\ No newline at end of file
diff --git a/drm/libmediadrm/aidl/android/media/ICasListener.aidl b/drm/libmediadrm/aidl/android/media/ICasListener.aidl
new file mode 100644
index 0000000..01a5abc
--- /dev/null
+++ b/drm/libmediadrm/aidl/android/media/ICasListener.aidl
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/** @hide */
+interface ICasListener {
+ void onEvent(int event, int arg, in @nullable byte[] data);
+}
\ No newline at end of file
diff --git a/drm/libmediadrm/aidl/android/media/IDescrambler.aidl b/drm/libmediadrm/aidl/android/media/IDescrambler.aidl
new file mode 100644
index 0000000..fdf99eb
--- /dev/null
+++ b/drm/libmediadrm/aidl/android/media/IDescrambler.aidl
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaDescrambler;
+
+/** @hide */
+interface IDescrambler {
+ void setMediaCasSession(in byte[] sessionId);
+ boolean requiresSecureDecoderComponent(String mime);
+ int descramble(in MediaDescrambler.DescrambleInfo descrambleInfo);
+ void release();
+}
\ No newline at end of file
diff --git a/drm/libmediadrm/aidl/android/media/IMediaCasService.aidl b/drm/libmediadrm/aidl/android/media/IMediaCasService.aidl
new file mode 100644
index 0000000..44f6825
--- /dev/null
+++ b/drm/libmediadrm/aidl/android/media/IMediaCasService.aidl
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.IDescrambler;
+import android.media.ICas;
+import android.media.ICasListener;
+import android.media.MediaCas;
+
+/** @hide */
+interface IMediaCasService {
+ MediaCas.ParcelableCasPluginDescriptor[] enumeratePlugins();
+ boolean isSystemIdSupported(int CA_system_id);
+ ICas createPlugin(int CA_system_id, ICasListener listener);
+ boolean isDescramblerSupported(int CA_system_id);
+ IDescrambler createDescrambler(int CA_system_id);
+}
+
diff --git a/drm/libmediadrm/aidl/android/media/MediaCas.aidl b/drm/libmediadrm/aidl/android/media/MediaCas.aidl
new file mode 100644
index 0000000..cb8d0c6
--- /dev/null
+++ b/drm/libmediadrm/aidl/android/media/MediaCas.aidl
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/** @hide */
+parcelable MediaCas.ParcelableCasPluginDescriptor cpp_header "media/MediaCasDefs.h";
+
+/** @hide */
+parcelable MediaCas.ParcelableCasData cpp_header "media/MediaCasDefs.h";
\ No newline at end of file
diff --git a/drm/libmediadrm/aidl/android/media/MediaDescrambler.aidl b/drm/libmediadrm/aidl/android/media/MediaDescrambler.aidl
new file mode 100644
index 0000000..e789244
--- /dev/null
+++ b/drm/libmediadrm/aidl/android/media/MediaDescrambler.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/** @hide */
+parcelable MediaDescrambler.DescrambleInfo cpp_header "media/MediaCasDefs.h";
\ No newline at end of file
diff --git a/drm/mediacas/plugins/clearkey/Android.mk b/drm/mediacas/plugins/clearkey/Android.mk
new file mode 100644
index 0000000..8fd866c
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/Android.mk
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ ClearKeyCasPlugin.cpp \
+ ClearKeyFetcher.cpp \
+ ClearKeyLicenseFetcher.cpp \
+ ClearKeySessionLibrary.cpp \
+ ecm.cpp \
+ ecm_generator.cpp \
+ JsonAssetLoader.cpp \
+ protos/license_protos.proto \
+
+LOCAL_MODULE := libclearkeycasplugin
+
+#TODO: move this back to /vendor/lib after conversion to treble
+#LOCAL_PROPRIETARY_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := mediacas
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils \
+ liblog \
+ libcrypto \
+ libstagefright_foundation \
+ libprotobuf-cpp-lite \
+
+LOCAL_STATIC_LIBRARIES := \
+ libjsmn \
+
+LOCAL_MODULE_CLASS := SHARED_LIBRARIES
+
+LOCAL_PROTOC_OPTIMIZE_TYPE := full
+
+define proto_includes
+$(call local-generated-sources-dir)/proto/$(LOCAL_PATH)
+endef
+
+LOCAL_C_INCLUDES += \
+ external/jsmn \
+ frameworks/av/include \
+ frameworks/native/include/media \
+ $(call proto_includes)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ $(call proto_includes)
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
+
+#########################################################################
+# Build unit tests
+
+include $(LOCAL_PATH)/tests/Android.mk
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
new file mode 100644
index 0000000..4ed5fce
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -0,0 +1,485 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyCasPlugin"
+
+#include "ClearKeyFetcher.h"
+#include "ecm.h"
+#include "ClearKeyLicenseFetcher.h"
+#include "ClearKeyCasPlugin.h"
+#include "ClearKeySessionLibrary.h"
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Log.h>
+
+android::CasFactory* createCasFactory() {
+ return new android::clearkeycas::ClearKeyCasFactory();
+}
+
+android::DescramblerFactory *createDescramblerFactory()
+{
+ return new android::clearkeycas::ClearKeyDescramblerFactory();
+}
+
+namespace android {
+namespace clearkeycas {
+
+static const int32_t sClearKeySystemId = 0xF6D8;
+
+bool ClearKeyCasFactory::isSystemIdSupported(int32_t CA_system_id) const {
+ return CA_system_id == sClearKeySystemId;
+}
+
+status_t ClearKeyCasFactory::queryPlugins(
+ std::vector<CasPluginDescriptor> *descriptors) const {
+ descriptors->clear();
+ descriptors->push_back({sClearKeySystemId, String8("Clear Key CAS")});
+ return OK;
+}
+
+status_t ClearKeyCasFactory::createPlugin(
+ int32_t CA_system_id,
+ uint64_t appData,
+ CasPluginCallback callback,
+ CasPlugin **plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new ClearKeyCasPlugin(appData, callback);
+ return OK;
+}
+///////////////////////////////////////////////////////////////////////////////
+bool ClearKeyDescramblerFactory::isSystemIdSupported(
+ int32_t CA_system_id) const {
+ return CA_system_id == sClearKeySystemId;
+}
+
+status_t ClearKeyDescramblerFactory::createPlugin(
+ int32_t CA_system_id, DescramblerPlugin** plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new ClearKeyDescramblerPlugin();
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+ClearKeyCasPlugin::ClearKeyCasPlugin(
+ uint64_t appData, CasPluginCallback callback)
+ : mCallback(callback), mAppData(appData) {
+ ALOGV("CTOR");
+}
+
+ClearKeyCasPlugin::~ClearKeyCasPlugin() {
+ ALOGV("DTOR");
+ ClearKeySessionLibrary::get()->destroyPlugin(this);
+}
+
+status_t ClearKeyCasPlugin::setPrivateData(const CasData &/*data*/) {
+ ALOGV("setPrivateData");
+
+ return OK;
+}
+
+static String8 sessionIdToString(const std::vector<uint8_t> &array) {
+ String8 result;
+ for (size_t i = 0; i < array.size(); i++) {
+ result.appendFormat("%02x ", array[i]);
+ }
+ if (result.isEmpty()) {
+ result.append("(null)");
+ }
+ return result;
+}
+
+status_t ClearKeyCasPlugin::openSession(CasSessionId* sessionId) {
+ ALOGV("openSession");
+
+ return ClearKeySessionLibrary::get()->addSession(this, sessionId);
+}
+
+status_t ClearKeyCasPlugin::closeSession(const CasSessionId &sessionId) {
+ ALOGV("closeSession: sessionId=%s", sessionIdToString(sessionId).string());
+ sp<ClearKeyCasSession> session =
+ ClearKeySessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ ClearKeySessionLibrary::get()->destroySession(sessionId);
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::setSessionPrivateData(
+ const CasSessionId &sessionId, const CasData & /*data*/) {
+ ALOGV("setSessionPrivateData: sessionId=%s",
+ sessionIdToString(sessionId).string());
+ sp<ClearKeyCasSession> session =
+ ClearKeySessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::processEcm(
+ const CasSessionId &sessionId, const CasEcm& ecm) {
+ ALOGV("processEcm: sessionId=%s", sessionIdToString(sessionId).string());
+ sp<ClearKeyCasSession> session =
+ ClearKeySessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ Mutex::Autolock lock(mKeyFetcherLock);
+
+ return session->updateECM(mKeyFetcher.get(), (void*)ecm.data(), ecm.size());
+}
+
+status_t ClearKeyCasPlugin::processEmm(const CasEmm& /*emm*/) {
+ ALOGV("processEmm");
+ Mutex::Autolock lock(mKeyFetcherLock);
+
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::sendEvent(
+ int32_t event, int32_t arg, const CasData &eventData) {
+ ALOGV("sendEvent: event=%d, arg=%d", event, arg);
+ // Echo the received event to the callback.
+ // Clear key plugin doesn't use any event, echo'ing for testing only.
+ if (mCallback != NULL) {
+ mCallback((void*)mAppData, event, arg, (uint8_t*)eventData.data(), eventData.size());
+ }
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::provision(const String8 &str) {
+ ALOGV("provision: provisionString=%s", str.string());
+ Mutex::Autolock lock(mKeyFetcherLock);
+
+ std::unique_ptr<ClearKeyLicenseFetcher> license_fetcher;
+ license_fetcher.reset(new ClearKeyLicenseFetcher());
+ status_t err = license_fetcher->Init(str.string());
+ if (err != OK) {
+ ALOGE("provision: failed to init ClearKeyLicenseFetcher (err=%d)", err);
+ return err;
+ }
+
+ std::unique_ptr<ClearKeyFetcher> key_fetcher;
+ key_fetcher.reset(new ClearKeyFetcher(std::move(license_fetcher)));
+ err = key_fetcher->Init();
+ if (err != OK) {
+ ALOGE("provision: failed to init ClearKeyFetcher (err=%d)", err);
+ return err;
+ }
+
+ ALOGV("provision: using ClearKeyFetcher");
+ mKeyFetcher = std::move(key_fetcher);
+
+ return OK;
+}
+
+status_t ClearKeyCasPlugin::refreshEntitlements(
+ int32_t refreshType, const CasData &/*refreshData*/) {
+ ALOGV("refreshEntitlements: refreshType=%d", refreshType);
+ Mutex::Autolock lock(mKeyFetcherLock);
+
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////
+
+// AES-128 CBC-CTS decrypt optimized for Transport Packets. |key| is the AES
+// key (odd key or even key), |length| is the data size, and |buffer| is the
+// ciphertext to be decrypted in place.
+status_t TpBlockCtsDecrypt(const AES_KEY& key, size_t length, char* buffer) {
+ CHECK(buffer);
+
+ // Invariant: Packet must be at least 16 bytes.
+ CHECK(length >= AES_BLOCK_SIZE);
+
+ // OpenSSL uses unsigned char.
+ unsigned char* data = reinterpret_cast<unsigned char*>(buffer);
+
+ // Start with zero-filled initialization vector.
+ unsigned char iv[AES_BLOCK_SIZE];
+ memset(iv, 0, AES_BLOCK_SIZE);
+
+ // Size of partial last block handled via CTS.
+ int cts_byte_count = length % AES_BLOCK_SIZE;
+
+ // If there no is no partial last block, then process using normal CBC.
+ if (cts_byte_count == 0) {
+ AES_cbc_encrypt(data, data, length, &key, iv, 0);
+ return OK;
+ }
+
+ // Cipher text stealing (CTS) - Schneier Figure 9.5 p 196.
+ // In CTS mode, the last two blocks have been swapped. Block[n-1] is really
+ // the original block[n] combined with the low-order bytes of the original
+ // block[n-1], while block[n] is the high-order bytes of the original
+ // block[n-1] padded with zeros.
+
+ // Block[0] - block[n-2] are handled with normal CBC.
+ int cbc_byte_count = length - cts_byte_count - AES_BLOCK_SIZE;
+ if (cbc_byte_count > 0) {
+ AES_cbc_encrypt(data, data, cbc_byte_count, &key, iv, 0);
+ // |data| points to block[n-1].
+ data += cbc_byte_count;
+ }
+
+ // Save block[n] to use as IV when decrypting block[n-1].
+ unsigned char block_n[AES_BLOCK_SIZE];
+ memset(block_n, 0, AES_BLOCK_SIZE);
+ memcpy(block_n, data + AES_BLOCK_SIZE, cts_byte_count);
+
+ // Decrypt block[n-1] using block[n] as IV, consistent with the original
+ // block order.
+ AES_cbc_encrypt(data, data, AES_BLOCK_SIZE, &key, block_n, 0);
+
+ // Return the stolen ciphertext: swap the high-order bytes of block[n]
+ // and block[n-1].
+ for (int i = 0; i < cts_byte_count; i++) {
+ unsigned char temp = *(data + i);
+ *(data + i) = *(data + AES_BLOCK_SIZE + i);
+ *(data + AES_BLOCK_SIZE + i) = temp;
+ }
+
+ // Decrypt block[n-1] using previous IV.
+ AES_cbc_encrypt(data, data, AES_BLOCK_SIZE, &key, iv, 0);
+ return OK;
+}
+
+// PES header and ECM stream header layout
+//
+// processECM() receives the data_byte portion from the transport packet.
+// Below is the layout of the first 16 bytes of the ECM PES packet. Here
+// we don't parse them, we skip them and go to the ECM container directly.
+// The layout is included here only for reference.
+//
+// 0-2: 0x00 00 01 = start code prefix.
+// 3: 0xf0 = stream type (90 = ECM).
+// 4-5: 0x00 00 = PES length (filled in later, this is the length of the
+// PES header (16) plus the length of the ECM container).
+// 6-7: 0x00 00 = ECM major version.
+// 8-9: 0x00 01 = ECM minor version.
+// 10-11: 0x00 00 = Crypto period ID (filled in later).
+// 12-13: 0x00 00 = ECM container length (filled in later, either 84 or
+// 166).
+// 14-15: 0x00 00 = offset = 0.
+
+const static size_t kEcmHeaderLength = 16;
+const static size_t kUserKeyLength = 16;
+
+status_t ClearKeyCasSession::updateECM(
+ KeyFetcher *keyFetcher, void *ecm, size_t size) {
+ if (keyFetcher == nullptr) {
+ return ERROR_DRM_NOT_PROVISIONED;
+ }
+
+ if (size < kEcmHeaderLength) {
+ ALOGE("updateECM: invalid ecm size %zu", size);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _lock(mKeyLock);
+
+ if (mEcmBuffer != NULL && mEcmBuffer->capacity() == size
+ && !memcmp(mEcmBuffer->base(), ecm, size)) {
+ return OK;
+ }
+
+ mEcmBuffer = ABuffer::CreateAsCopy(ecm, size);
+ mEcmBuffer->setRange(kEcmHeaderLength, size - kEcmHeaderLength);
+
+ uint64_t asset_id;
+ std::vector<KeyFetcher::KeyInfo> keys;
+ status_t err = keyFetcher->ObtainKey(mEcmBuffer, &asset_id, &keys);
+ if (err != OK) {
+ ALOGE("updateECM: failed to obtain key (err=%d)", err);
+ return err;
+ }
+
+ ALOGV("updateECM: %zu key(s) found", keys.size());
+ for (size_t keyIndex = 0; keyIndex < keys.size(); keyIndex++) {
+ String8 str;
+
+ const sp<ABuffer>& keyBytes = keys[keyIndex].key_bytes;
+ CHECK(keyBytes->size() == kUserKeyLength);
+
+ int result = AES_set_decrypt_key(
+ reinterpret_cast<const uint8_t*>(keyBytes->data()),
+ AES_BLOCK_SIZE * 8, &mKeyInfo[keyIndex].contentKey);
+ mKeyInfo[keyIndex].valid = (result == 0);
+ if (!mKeyInfo[keyIndex].valid) {
+ ALOGE("updateECM: failed to set key %zu, key_id=%d",
+ keyIndex, keys[keyIndex].key_id);
+ }
+ }
+ return OK;
+}
+
+// Decryption of a set of sub-samples
+ssize_t ClearKeyCasSession::decrypt(
+ bool secure, DescramblerPlugin::ScramblingControl scramblingControl,
+ size_t numSubSamples, const DescramblerPlugin::SubSample *subSamples,
+ const void *srcPtr, void *dstPtr, AString * /* errorDetailMsg */) {
+ if (secure) {
+ return ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ AES_KEY contentKey;
+
+ if (scramblingControl != DescramblerPlugin::kScrambling_Unscrambled) {
+ // Hold lock to get the key only to avoid contention for decryption
+ Mutex::Autolock _lock(mKeyLock);
+
+ int32_t keyIndex = (scramblingControl & 1);
+ if (!mKeyInfo[keyIndex].valid) {
+ ALOGE("decrypt: key %d is invalid", keyIndex);
+ return ERROR_DRM_DECRYPT;
+ }
+ contentKey = mKeyInfo[keyIndex].contentKey;
+ }
+
+ uint8_t *src = (uint8_t*)srcPtr;
+ uint8_t *dst = (uint8_t*)dstPtr;
+
+ for (size_t i = 0; i < numSubSamples; i++) {
+ size_t numBytesinSubSample = subSamples[i].mNumBytesOfClearData
+ + subSamples[i].mNumBytesOfEncryptedData;
+ if (src != dst) {
+ memcpy(dst, src, numBytesinSubSample);
+ }
+ status_t err = OK;
+ // Don't decrypt if len < AES_BLOCK_SIZE.
+ // The last chunk shorter than AES_BLOCK_SIZE is not encrypted.
+ if (scramblingControl != DescramblerPlugin::kScrambling_Unscrambled
+ && subSamples[i].mNumBytesOfEncryptedData >= AES_BLOCK_SIZE) {
+ err = decryptPayload(
+ contentKey,
+ numBytesinSubSample,
+ subSamples[i].mNumBytesOfClearData,
+ (char *)dst);
+ }
+
+ dst += numBytesinSubSample;
+ src += numBytesinSubSample;
+ }
+ return dst - (uint8_t *)dstPtr;
+}
+
+// Decryption of a TS payload
+status_t ClearKeyCasSession::decryptPayload(
+ const AES_KEY& key, size_t length, size_t offset, char* buffer) const {
+ CHECK(buffer);
+
+ // Invariant: only call decryptPayload with TS packets with at least 16
+ // bytes of payload (AES_BLOCK_SIZE).
+
+ CHECK(length >= offset + AES_BLOCK_SIZE);
+
+ return TpBlockCtsDecrypt(key, length - offset, buffer + offset);
+}
+
+///////////////////////////////////////////////////////////////////////////
+#undef LOG_TAG
+#define LOG_TAG "ClearKeyDescramblerPlugin"
+
+bool ClearKeyDescramblerPlugin::requiresSecureDecoderComponent(
+ const char *mime) const {
+ ALOGV("requiresSecureDecoderComponent: mime=%s", mime);
+ return false;
+}
+
+status_t ClearKeyDescramblerPlugin::setMediaCasSession(
+ const CasSessionId &sessionId) {
+ ALOGV("setMediaCasSession: sessionId=%s", sessionIdToString(sessionId).string());
+
+ sp<ClearKeyCasSession> session =
+ ClearKeySessionLibrary::get()->findSession(sessionId);
+
+ if (session == NULL) {
+ ALOGE("ClearKeyDescramblerPlugin: session not found");
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ mCASSession = session;
+ return OK;
+}
+
+ssize_t ClearKeyDescramblerPlugin::descramble(
+ bool secure,
+ ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const SubSample *subSamples,
+ const void *srcPtr,
+ int32_t srcOffset,
+ void *dstPtr,
+ int32_t dstOffset,
+ AString *errorDetailMsg) {
+
+ ALOGV("descramble: secure=%d, sctrl=%d, subSamples=%s, "
+ "srcPtr=%p, dstPtr=%p, srcOffset=%d, dstOffset=%d",
+ (int)secure, (int)scramblingControl,
+ subSamplesToString(subSamples, numSubSamples).string(),
+ srcPtr, dstPtr, srcOffset, dstOffset);
+
+ if (mCASSession == NULL) {
+ ALOGE("Uninitialized CAS session!");
+ return ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED;
+ }
+
+ return mCASSession->decrypt(
+ secure, scramblingControl,
+ numSubSamples, subSamples,
+ (uint8_t*)srcPtr + srcOffset,
+ dstPtr == NULL ? NULL : ((uint8_t*)dstPtr + dstOffset),
+ errorDetailMsg);
+}
+
+// Conversion utilities
+String8 ClearKeyDescramblerPlugin::arrayToString(
+ uint8_t const *array, size_t len) const
+{
+ String8 result("{ ");
+ for (size_t i = 0; i < len; i++) {
+ result.appendFormat("0x%02x ", array[i]);
+ }
+ result += "}";
+ return result;
+}
+
+String8 ClearKeyDescramblerPlugin::subSamplesToString(
+ SubSample const *subSamples, size_t numSubSamples) const
+{
+ String8 result;
+ for (size_t i = 0; i < numSubSamples; i++) {
+ result.appendFormat("[%zu] {clear:%u, encrypted:%u} ", i,
+ subSamples[i].mNumBytesOfClearData,
+ subSamples[i].mNumBytesOfEncryptedData);
+ }
+ return result;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
new file mode 100644
index 0000000..b7134e4
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_CAS_PLUGIN_H_
+#define CLEARKEY_CAS_PLUGIN_H_
+
+#include <media/cas/CasAPI.h>
+#include <media/cas/DescramblerAPI.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+
+extern "C" {
+ android::CasFactory *createCasFactory();
+ android::DescramblerFactory *createDescramblerFactory();
+}
+
+namespace android {
+namespace clearkeycas {
+
+class KeyFetcher;
+class ClearKeyCasSession;
+
+class ClearKeyCasFactory : public CasFactory {
+public:
+ ClearKeyCasFactory() {}
+ virtual ~ClearKeyCasFactory() {}
+
+ virtual bool isSystemIdSupported(
+ int32_t CA_system_id) const override;
+ virtual status_t queryPlugins(
+ std::vector<CasPluginDescriptor> *descriptors) const override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id,
+ uint64_t appData,
+ CasPluginCallback callback,
+ CasPlugin **plugin) override;
+};
+
+class ClearKeyDescramblerFactory : public DescramblerFactory {
+public:
+ ClearKeyDescramblerFactory() {}
+ virtual ~ClearKeyDescramblerFactory() {}
+
+ virtual bool isSystemIdSupported(
+ int32_t CA_system_id) const override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id, DescramblerPlugin **plugin) override;
+};
+
+class ClearKeyCasPlugin : public CasPlugin {
+public:
+ ClearKeyCasPlugin(uint64_t appData, CasPluginCallback callback);
+ virtual ~ClearKeyCasPlugin();
+
+ virtual status_t setPrivateData(
+ const CasData &data) override;
+
+ virtual status_t openSession(CasSessionId *sessionId) override;
+
+ virtual status_t closeSession(
+ const CasSessionId &sessionId) override;
+
+ virtual status_t setSessionPrivateData(
+ const CasSessionId &sessionId,
+ const CasData &data) override;
+
+ virtual status_t processEcm(
+ const CasSessionId &sessionId, const CasEcm &ecm) override;
+
+ virtual status_t processEmm(const CasEmm &emm) override;
+
+ virtual status_t sendEvent(
+ int32_t event, int32_t arg, const CasData &eventData) override;
+
+ virtual status_t provision(const String8 &str) override;
+
+ virtual status_t refreshEntitlements(
+ int32_t refreshType, const CasData &refreshData) override;
+
+private:
+ Mutex mKeyFetcherLock;
+ std::unique_ptr<KeyFetcher> mKeyFetcher;
+ CasPluginCallback mCallback;
+ uint64_t mAppData;
+};
+
+class ClearKeyDescramblerPlugin : public DescramblerPlugin {
+public:
+ ClearKeyDescramblerPlugin() {}
+ virtual ~ClearKeyDescramblerPlugin() {};
+
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const override;
+
+ virtual status_t setMediaCasSession(
+ const CasSessionId &sessionId) override;
+
+ virtual ssize_t descramble(
+ bool secure,
+ ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const SubSample *subSamples,
+ const void *srcPtr,
+ int32_t srcOffset,
+ void *dstPtr,
+ int32_t dstOffset,
+ AString *errorDetailMsg) override;
+
+private:
+ sp<ClearKeyCasSession> mCASSession;
+
+ String8 subSamplesToString(
+ SubSample const *subSamples,
+ size_t numSubSamples) const;
+ String8 arrayToString(uint8_t const *array, size_t len) const;
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEARKEY_CAS_PLUGIN_H_
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
new file mode 100644
index 0000000..cb69f91
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyFetcher"
+
+#include <algorithm>
+#include <inttypes.h>
+#include <string>
+
+#include "ClearKeyFetcher.h"
+#include "ecm.h"
+#include "LicenseFetcher.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <utils/Log.h>
+
+namespace android {
+namespace clearkeycas {
+
+ClearKeyFetcher::ClearKeyFetcher(
+ std::unique_ptr<LicenseFetcher> license_fetcher) :
+ initialized_(false),
+ license_fetcher_(std::move(license_fetcher)) {
+ CHECK(license_fetcher_);
+}
+
+ClearKeyFetcher::~ClearKeyFetcher() {}
+
+// This is a no-op but other KeyFetcher subclasses require initialization
+// so this is necessary to preserve the contract.
+status_t ClearKeyFetcher::Init() {
+ initialized_ = true;
+ return OK;
+}
+
+status_t ClearKeyFetcher::ObtainKey(const sp<ABuffer>& buffer,
+ uint64_t* asset_id, std::vector<KeyInfo>* keys) {
+ CHECK(asset_id);
+ CHECK(keys);
+ CHECK(initialized_);
+ *asset_id = 0;
+ keys->clear();
+
+ EcmContainer container;
+ status_t status = container.Parse(buffer);
+ if (status != OK) {
+ return status;
+ }
+ ALOGV("descriptor_size=%zu", container.descriptor_size());
+
+ // Sanity check to verify that the BroadcastEncryptor is sending a properly
+ // formed EcmContainer. If it contains two Ecms, the ids should have different
+ // parity (one odd, one even). This does not necessarily affect decryption
+ // but indicates a problem with Ecm generation.
+ if (container.descriptor_size() == 2) {
+ // XOR the least significant bits to verify different parity.
+ bool same_parity = (((container.descriptor(0).id() & 0x01) ^
+ (container.descriptor(1).id() & 0x01)) == 0);
+ if (same_parity) {
+ ALOGW("asset_id=%" PRIu64 ": malformed Ecm, "
+ "content keys have same parity, id0=%d, id1=%d",
+ container.descriptor(0).ecm().asset_id(),
+ container.descriptor(0).id(),
+ container.descriptor(1).id());
+ }
+ }
+
+ *asset_id = container.descriptor(0).ecm().asset_id();
+
+ // Detect asset_id change. This could be caused by a configuration change
+ // in the BroadcastEncryptor. This is unusual so log it in case it is an
+ // operational mistake. This invalidates the current asset_key causing a
+ // new license to be fetched.
+ // TODO(rkint): test against BroadcastEncryptor to verify what BE sends on
+ // asset_id change. If it sends an EcmContainer with 2 Ecms with different
+ // asset_ids (old and new) then it might be best to prefetch the Emm.
+ if ((asset_.id() != 0) && (*asset_id != asset_.id())) {
+ ALOGW("Asset_id change from %" PRIu64 " to %" PRIu64, asset_.id(), *asset_id);
+ asset_.Clear();
+ }
+
+ // Fetch license to get asset_id
+ if (!asset_.has_id()) {
+ status = license_fetcher_->FetchLicense(*asset_id, &asset_);
+ if (status != OK) {
+ *asset_id = 0;
+ return status;
+ }
+ ALOGV("FetchLicense succeeded, has_id=%d", asset_.has_id());
+ }
+ keys->resize(container.descriptor_size());
+
+ for (size_t i = 0; i < container.descriptor_size(); ++i) {
+ status = container.mutable_descriptor(i)->mutable_ecm()->Decrypt(
+ container.descriptor(i).ecm().buffer(), asset_);
+ if (status != OK) {
+ *asset_id = 0;
+ keys->clear();
+ return status;
+ }
+ // TODO: if 2 Ecms have same parity, key from Ecm with higher id
+ // should be keys[1].
+ KeyInfo key;
+ key.key_id = container.descriptor(i).id();
+ key.key_bytes = container.descriptor(i).ecm().content_key();
+
+ keys->at(key.key_id & 1) = key;
+ }
+ return OK;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.h b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.h
new file mode 100644
index 0000000..d58b9df
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_KEY_FETCHER_H_
+#define CLEAR_KEY_FETCHER_H_
+
+#include <vector>
+
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ABase.h>
+#include "KeyFetcher.h"
+
+namespace android {
+namespace clearkeycas {
+
+class LicenseFetcher;
+
+class ClearKeyFetcher : public KeyFetcher {
+public:
+ // ClearKeyFetcher takes ownership of |license_fetcher|.
+ explicit ClearKeyFetcher(
+ std::unique_ptr<LicenseFetcher> license_fetcher);
+
+ virtual ~ClearKeyFetcher();
+
+ // Initializes the fetcher. Must be called before ObtainKey.
+ status_t Init() override;
+
+ // Obtains the |asset_id| and |keys| from the Ecm contained in |ecm|.
+ // Returns
+ // - errors returned by EcmContainer::Parse.
+ // - errors returned by ClassicLicenseFetcher::FetchLicense.
+ // - errors returned by Ecm::Decrypt.
+ // |asset_id| and |keys| are owned by the caller and cannot be null.
+ // Init() must have been called.
+ status_t ObtainKey(const sp<ABuffer>& ecm, uint64_t* asset_id,
+ std::vector<KeyInfo>* keys) override;
+
+private:
+ clearkeycas::Asset asset_;
+ bool initialized_;
+ std::unique_ptr<LicenseFetcher> license_fetcher_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ClearKeyFetcher);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEAR_KEY_FETCHER_H_
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.cpp b/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.cpp
new file mode 100644
index 0000000..603337d
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyLicenseFetcher"
+
+#include "ClearKeyLicenseFetcher.h"
+#include "protos/license_protos.pb.h"
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include "JsonAssetLoader.h"
+
+namespace android {
+namespace clearkeycas {
+
+status_t ClearKeyLicenseFetcher::Init(const char *input) {
+ JsonAssetLoader *extractor = new JsonAssetLoader();
+ return extractor->extractAssetFromString(String8(input), &asset_);
+}
+
+status_t ClearKeyLicenseFetcher::FetchLicense(
+ uint64_t /* asset_id */, Asset* asset) {
+ *asset = asset_;
+ return OK;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.h b/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.h
new file mode 100644
index 0000000..ebbcbeb
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeyLicenseFetcher.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_KEY_LICENSE_FETCHER_H_
+#define CLEAR_KEY_LICENSE_FETCHER_H_
+
+#include "KeyFetcher.h"
+#include "LicenseFetcher.h"
+
+namespace android {
+namespace clearkeycas {
+
+class ClearKeyLicenseFetcher : public LicenseFetcher {
+public:
+ ClearKeyLicenseFetcher() {}
+ virtual ~ClearKeyLicenseFetcher() {}
+
+ virtual status_t Init(const char *input);
+
+ virtual status_t FetchLicense(uint64_t asset_id, Asset* asset);
+
+private:
+ Asset asset_;
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEAR_KEY_LICENSE_FETCHER_H_
diff --git a/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.cpp b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.cpp
new file mode 100644
index 0000000..faea008
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeySessionLibrary"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+#include "ClearKeySessionLibrary.h"
+
+namespace android {
+namespace clearkeycas {
+
+Mutex ClearKeySessionLibrary::sSingletonLock;
+ClearKeySessionLibrary* ClearKeySessionLibrary::sSingleton = NULL;
+
+ClearKeyCasSession::ClearKeyCasSession(CasPlugin *plugin)
+ : mPlugin(plugin) {
+ mKeyInfo[0].valid = mKeyInfo[1].valid = false;
+}
+
+ClearKeyCasSession::~ClearKeyCasSession() {
+}
+
+ClearKeySessionLibrary* ClearKeySessionLibrary::get() {
+ Mutex::Autolock lock(sSingletonLock);
+
+ if (sSingleton == NULL) {
+ ALOGV("Instantiating Session Library Singleton.");
+ sSingleton = new ClearKeySessionLibrary();
+ }
+
+ return sSingleton;
+}
+
+ClearKeySessionLibrary::ClearKeySessionLibrary() : mNextSessionId(1) {}
+
+status_t ClearKeySessionLibrary::addSession(
+ CasPlugin *plugin, CasSessionId *sessionId) {
+ CHECK(sessionId);
+
+ Mutex::Autolock lock(mSessionsLock);
+
+ sp<ClearKeyCasSession> session = new ClearKeyCasSession(plugin);
+
+ uint8_t *byteArray = (uint8_t *) &mNextSessionId;
+ sessionId->push_back(byteArray[3]);
+ sessionId->push_back(byteArray[2]);
+ sessionId->push_back(byteArray[1]);
+ sessionId->push_back(byteArray[0]);
+ mNextSessionId++;
+
+ mIDToSessionMap.add(*sessionId, session);
+ return OK;
+}
+
+sp<ClearKeyCasSession> ClearKeySessionLibrary::findSession(
+ const CasSessionId& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ ssize_t index = mIDToSessionMap.indexOfKey(sessionId);
+ if (index < 0) {
+ return NULL;
+ }
+ return mIDToSessionMap.valueFor(sessionId);
+}
+
+void ClearKeySessionLibrary::destroySession(const CasSessionId& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ ssize_t index = mIDToSessionMap.indexOfKey(sessionId);
+ if (index < 0) {
+ return;
+ }
+
+ sp<ClearKeyCasSession> session = mIDToSessionMap.valueAt(index);
+ mIDToSessionMap.removeItemsAt(index);
+}
+
+void ClearKeySessionLibrary::destroyPlugin(CasPlugin *plugin) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ for (ssize_t index = mIDToSessionMap.size() - 1; index >= 0; index--) {
+ sp<ClearKeyCasSession> session = mIDToSessionMap.valueAt(index);
+ if (session->getPlugin() == plugin) {
+ mIDToSessionMap.removeItemsAt(index);
+ }
+ }
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
new file mode 100644
index 0000000..01f5f47
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ClearKeySessionLibrary.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_SESSION_LIBRARY_H_
+#define CLEARKEY_SESSION_LIBRARY_H_
+
+#include <media/cas/CasAPI.h>
+#include <media/cas/DescramblerAPI.h>
+#include <openssl/aes.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+namespace android {
+struct ABuffer;
+
+namespace clearkeycas {
+class KeyFetcher;
+
+class ClearKeyCasSession : public RefBase {
+public:
+ ssize_t decrypt(
+ bool secure,
+ DescramblerPlugin::ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const DescramblerPlugin::SubSample *subSamples,
+ const void *srcPtr,
+ void *dstPtr,
+ AString * /* errorDetailMsg */);
+
+ status_t updateECM(KeyFetcher *keyFetcher, void *ecm, size_t size);
+
+private:
+ enum {
+ kNumKeys = 2,
+ };
+ struct KeyInfo {
+ bool valid;
+ AES_KEY contentKey;
+ };
+ sp<ABuffer> mEcmBuffer;
+ Mutex mKeyLock;
+ CasPlugin* mPlugin;
+ KeyInfo mKeyInfo[kNumKeys];
+
+ friend class ClearKeySessionLibrary;
+
+ explicit ClearKeyCasSession(CasPlugin *plugin);
+ virtual ~ClearKeyCasSession();
+ CasPlugin* getPlugin() const { return mPlugin; }
+ status_t decryptPayload(
+ const AES_KEY& key, size_t length, size_t offset, char* buffer) const;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ClearKeyCasSession);
+};
+
+class ClearKeySessionLibrary {
+public:
+ static ClearKeySessionLibrary* get();
+
+ status_t addSession(CasPlugin *plugin, CasSessionId *sessionId);
+
+ sp<ClearKeyCasSession> findSession(const CasSessionId& sessionId);
+
+ void destroySession(const CasSessionId& sessionId);
+
+ void destroyPlugin(CasPlugin *plugin);
+
+private:
+ static Mutex sSingletonLock;
+ static ClearKeySessionLibrary* sSingleton;
+
+ Mutex mSessionsLock;
+ uint32_t mNextSessionId;
+ KeyedVector<CasSessionId, sp<ClearKeyCasSession>> mIDToSessionMap;
+
+ ClearKeySessionLibrary();
+ DISALLOW_EVIL_CONSTRUCTORS(ClearKeySessionLibrary);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEARKEY_SESSION_LIBRARY_H_
diff --git a/drm/mediacas/plugins/clearkey/JsonAssetLoader.cpp b/drm/mediacas/plugins/clearkey/JsonAssetLoader.cpp
new file mode 100644
index 0000000..9cd77e9
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/JsonAssetLoader.cpp
@@ -0,0 +1,234 @@
+
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "JsonAssetLoader"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/base64.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Log.h>
+
+#include "JsonAssetLoader.h"
+#include "protos/license_protos.pb.h"
+
+namespace android {
+namespace clearkeycas {
+
+const String8 kIdTag("id");
+const String8 kNameTag("name");
+const String8 kLowerCaseOgranizationNameTag("lowercase_organization_name");
+const String8 kEncryptionKeyTag("encryption_key");
+const String8 kCasTypeTag("cas_type");
+const String8 kBase64Padding("=");
+
+const uint32_t kKeyLength = 16;
+
+JsonAssetLoader::JsonAssetLoader() {
+}
+
+JsonAssetLoader::~JsonAssetLoader() {
+}
+
+/*
+ * Extract a clear key asset from a JSON string.
+ *
+ * Returns OK if a clear key asset is extracted successfully,
+ * or ERROR_DRM_NO_LICENSE if the string doesn't contain a valid
+ * clear key asset.
+ */
+status_t JsonAssetLoader::extractAssetFromString(
+ const String8& jsonAssetString, Asset *asset) {
+ if (!parseJsonAssetString(jsonAssetString, &mJsonObjects)) {
+ return ERROR_DRM_NO_LICENSE;
+ }
+
+ if (mJsonObjects.size() < 1) {
+ return ERROR_DRM_NO_LICENSE;
+ }
+
+ if (!parseJsonObject(mJsonObjects[0], &mTokens))
+ return ERROR_DRM_NO_LICENSE;
+
+ if (!findKey(mJsonObjects[0], asset)) {
+ return ERROR_DRM_NO_LICENSE;
+ }
+ return OK;
+}
+
+//static
+sp<ABuffer> JsonAssetLoader::decodeBase64String(const String8& encodedText) {
+ // Since android::decodeBase64() requires padding characters,
+ // add them so length of encodedText is exactly a multiple of 4.
+ int remainder = encodedText.length() % 4;
+ String8 paddedText(encodedText);
+ if (remainder > 0) {
+ for (int i = 0; i < 4 - remainder; ++i) {
+ paddedText.append(kBase64Padding);
+ }
+ }
+
+ return decodeBase64(AString(paddedText.string()));
+}
+
+bool JsonAssetLoader::findKey(const String8& jsonObject, Asset *asset) {
+
+ String8 value;
+
+ if (jsonObject.find(kIdTag) < 0) {
+ return false;
+ }
+ findValue(kIdTag, &value);
+ ALOGV("found %s=%s", kIdTag.string(), value.string());
+ asset->set_id(atoi(value.string()));
+
+ if (jsonObject.find(kNameTag) < 0) {
+ return false;
+ }
+ findValue(kNameTag, &value);
+ ALOGV("found %s=%s", kNameTag.string(), value.string());
+ asset->set_name(value.string());
+
+ if (jsonObject.find(kLowerCaseOgranizationNameTag) < 0) {
+ return false;
+ }
+ findValue(kLowerCaseOgranizationNameTag, &value);
+ ALOGV("found %s=%s", kLowerCaseOgranizationNameTag.string(), value.string());
+ asset->set_lowercase_organization_name(value.string());
+
+ if (jsonObject.find(kCasTypeTag) < 0) {
+ return false;
+ }
+ findValue(kCasTypeTag, &value);
+ ALOGV("found %s=%s", kCasTypeTag.string(), value.string());
+ // Asset_CasType_CLEARKEY_CAS = 1
+ asset->set_cas_type((Asset_CasType)atoi(value.string()));
+
+ return true;
+}
+
+void JsonAssetLoader::findValue(const String8 &key, String8* value) {
+ value->clear();
+ const char* valueToken;
+ for (Vector<String8>::const_iterator nextToken = mTokens.begin();
+ nextToken != mTokens.end(); ++nextToken) {
+ if (0 == (*nextToken).compare(key)) {
+ if (nextToken + 1 == mTokens.end())
+ break;
+ valueToken = (*(nextToken + 1)).string();
+ value->setTo(valueToken);
+ nextToken++;
+ break;
+ }
+ }
+}
+
+/*
+ * Parses a JSON objects string and initializes a vector of tokens.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonAssetLoader::parseJsonObject(const String8& jsonObject,
+ Vector<String8>* tokens) {
+ jsmn_parser parser;
+
+ jsmn_init(&parser);
+ int numTokens = jsmn_parse(&parser,
+ jsonObject.string(), jsonObject.size(), NULL, 0);
+ if (numTokens < 0) {
+ ALOGE("Parser returns error code=%d", numTokens);
+ return false;
+ }
+
+ unsigned int jsmnTokensSize = numTokens * sizeof(jsmntok_t);
+ mJsmnTokens.clear();
+ mJsmnTokens.setCapacity(jsmnTokensSize);
+
+ jsmn_init(&parser);
+ int status = jsmn_parse(&parser, jsonObject.string(),
+ jsonObject.size(), mJsmnTokens.editArray(), numTokens);
+ if (status < 0) {
+ ALOGE("Parser returns error code=%d", status);
+ return false;
+ }
+
+ tokens->clear();
+ String8 token;
+ const char *pjs;
+ ALOGV("numTokens: %d", numTokens);
+ for (int j = 0; j < numTokens; ++j) {
+ pjs = jsonObject.string() + mJsmnTokens[j].start;
+ if (mJsmnTokens[j].type == JSMN_STRING ||
+ mJsmnTokens[j].type == JSMN_PRIMITIVE) {
+ token.setTo(pjs, mJsmnTokens[j].end - mJsmnTokens[j].start);
+ tokens->add(token);
+ ALOGV("add token: %s", token.string());
+ }
+ }
+ return true;
+}
+
+/*
+ * Parses JSON asset string and initializes a vector of JSON objects.
+ *
+ * @return Returns false for errors, true for success.
+ */
+bool JsonAssetLoader::parseJsonAssetString(const String8& jsonAsset,
+ Vector<String8>* jsonObjects) {
+ if (jsonAsset.isEmpty()) {
+ ALOGE("Empty JSON Web Key");
+ return false;
+ }
+
+ // The jsmn parser only supports unicode encoding.
+ jsmn_parser parser;
+
+ // Computes number of tokens. A token marks the type, offset in
+ // the original string.
+ jsmn_init(&parser);
+ int numTokens = jsmn_parse(&parser,
+ jsonAsset.string(), jsonAsset.size(), NULL, 0);
+ if (numTokens < 0) {
+ ALOGE("Parser returns error code=%d", numTokens);
+ return false;
+ }
+
+ unsigned int jsmnTokensSize = numTokens * sizeof(jsmntok_t);
+ mJsmnTokens.setCapacity(jsmnTokensSize);
+
+ jsmn_init(&parser);
+ int status = jsmn_parse(&parser, jsonAsset.string(),
+ jsonAsset.size(), mJsmnTokens.editArray(), numTokens);
+ if (status < 0) {
+ ALOGE("Parser returns error code=%d", status);
+ return false;
+ }
+
+ String8 token;
+ const char *pjs;
+ for (int i = 0; i < numTokens; ++i) {
+ pjs = jsonAsset.string() + mJsmnTokens[i].start;
+ if (mJsmnTokens[i].type == JSMN_OBJECT) {
+ token.setTo(pjs, mJsmnTokens[i].end - mJsmnTokens[i].start);
+ jsonObjects->add(token);
+ }
+ }
+ return true;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/JsonAssetLoader.h b/drm/mediacas/plugins/clearkey/JsonAssetLoader.h
new file mode 100644
index 0000000..06f9389
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/JsonAssetLoader.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef JSON_ASSET_LOADER_H_
+#define JSON_ASSET_LOADER_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/StrongPointer.h>
+#include "protos/license_protos.pb.h"
+
+#include "jsmn.h"
+
+namespace android {
+struct ABuffer;
+
+namespace clearkeycas {
+
+class JsonAssetLoader {
+ public:
+ JsonAssetLoader();
+ virtual ~JsonAssetLoader();
+
+ status_t extractAssetFromString(
+ const String8& jsonAssetString, Asset *asset);
+
+ private:
+ Vector<jsmntok_t> mJsmnTokens;
+ Vector<String8> mJsonObjects;
+ Vector<String8> mTokens;
+
+ static sp<ABuffer> decodeBase64String(
+ const String8& encodedText);
+ bool findKey(const String8& jsonObject, Asset *asset);
+ void findValue(
+ const String8 &key, String8* value);
+ bool parseJsonObject(
+ const String8& jsonObject, Vector<String8>* tokens);
+ bool parseJsonAssetString(
+ const String8& jsonString, Vector<String8>* jsonObjects);
+
+ DISALLOW_EVIL_CONSTRUCTORS(JsonAssetLoader);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // JSON_ASSET_LOADER_H_
diff --git a/drm/mediacas/plugins/clearkey/KeyFetcher.h b/drm/mediacas/plugins/clearkey/KeyFetcher.h
new file mode 100644
index 0000000..83fe50a
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/KeyFetcher.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef KEY_FETCHER_H_
+#define KEY_FETCHER_H_
+
+#include <vector>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Errors.h>
+
+using namespace std;
+
+namespace android {
+namespace clearkeycas {
+
+// Interface for classes which extract the content key from an Ecm.
+class KeyFetcher {
+public:
+ struct KeyInfo {
+ sp<ABuffer> key_bytes;
+ int key_id;
+ };
+
+ KeyFetcher() {}
+ virtual ~KeyFetcher() {}
+
+ // Initializes resources set in subclass-specific calls. This must be called
+ // before threads are started.
+ virtual status_t Init() = 0;
+
+ // Obtains content key(s) based on contents of |ecm|. |asset_id| is the
+ // internal id of the asset, |keys| is a vector containing instances of a
+ // class containing a content key and an id. |asset_id| and |keys| are
+ // owned by the caller and must be non-null.
+ virtual status_t ObtainKey(const sp<ABuffer>& ecm,
+ uint64_t* asset_id, vector<KeyInfo>* keys) = 0;
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // KEY_FETCHER_H_
diff --git a/drm/mediacas/plugins/clearkey/LicenseFetcher.h b/drm/mediacas/plugins/clearkey/LicenseFetcher.h
new file mode 100644
index 0000000..2a33dd8
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/LicenseFetcher.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LICENSE_FETCHER_H_
+#define LICENSE_FETCHER_H_
+
+#include "protos/license_protos.pb.h"
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+namespace clearkeycas {
+
+// Interface for classes which request a license.
+class LicenseFetcher {
+public:
+ LicenseFetcher() {}
+ virtual ~LicenseFetcher() {}
+
+ // Initializes resources set in subclass-specific calls. This must be called
+ // before threads are started.
+ virtual status_t Init(const char *input) = 0;
+
+ // Fetches license based on |asset_id|. On return, |asset| contains the
+ // decrypted asset_key needed to decrypt content keys.
+ // |asset| must be non-null.
+ virtual status_t FetchLicense(
+ uint64_t asset_id, clearkeycas::Asset* asset) = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(LicenseFetcher);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // LICENSE_FETCHER_H_
diff --git a/drm/mediacas/plugins/clearkey/ecm.cpp b/drm/mediacas/plugins/clearkey/ecm.cpp
new file mode 100644
index 0000000..9fde13a
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ecm.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ecm"
+
+#include "ecm.h"
+#include "ecm_generator.h"
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Log.h>
+
+namespace android {
+namespace clearkeycas {
+
+Ecm::Ecm()
+ : asset_id_(0),
+ asset_id_set_(false),
+ system_id_(0),
+ system_id_set_(false) {}
+
+Ecm::~Ecm() {}
+
+status_t Ecm::Parse(const sp<ABuffer>& buffer_as_binary) {
+ if (buffer_as_binary->size() < kSizeBytes) {
+ ALOGE("Short Ecm buffer: expected %zu, received %zu.",
+ kSizeBytes, buffer_as_binary->size());
+ return BAD_VALUE;
+ }
+
+ Asset asset;
+ ecm_generator::DefaultEcmFields default_fields;
+ status_t status = ecm_generator::DecodeECMClearFields(
+ buffer_as_binary, &asset, &default_fields);
+ if (status != OK) {
+ ALOGE("DecodeECMClearFields failed with status %d", status);
+ return status;
+ }
+ set_asset_id(asset.id());
+ set_system_id(default_fields.system_id);
+
+ // Save a copy of the buffer_as_binary for a future DecryptEcm call.
+ set_buffer(buffer_as_binary);
+ return OK;
+}
+
+status_t Ecm::Decrypt(
+ const sp<ABuffer>& buffer_as_binary,
+ const Asset& asset_from_emm) {
+ // Invariant: asset has id. These are postconditions for Emm::Decrypt().
+ CHECK(asset_from_emm.has_id());
+
+ // DecodeEcm fills in |asset|.id() with the asset_id from the encoded Ecm.
+ Asset asset(asset_from_emm);
+ ecm_generator::DefaultEcmFields default_fields;
+ sp<ABuffer> content_key;
+ status_t status = ecm_generator::DecodeECM(
+ buffer_as_binary, &asset, &content_key, &default_fields);
+ if (status != OK) {
+ ALOGE("DecodeECM failed with status %d", status);
+ return status;
+ }
+ if (asset.id() != asset_from_emm.id()) {
+ ALOGE("Asset_id from Emm (%llu) does not match asset_id from Ecm (%llu).",
+ asset_from_emm.id(), asset.id());
+ return CLEARKEY_STATUS_INVALID_PARAMETER;
+ }
+ set_asset_id(asset.id());
+ set_system_id(default_fields.system_id);
+ set_content_key(content_key);
+ return status;
+}
+
+EcmDescriptor::EcmDescriptor() : ecm_set_(false), id_(0), id_set_(false) {}
+
+EcmDescriptor::EcmDescriptor(uint16_t id, const Ecm& ecm)
+: ecm_(ecm), ecm_set_(true), id_(id), id_set_(true) {}
+
+EcmDescriptor::~EcmDescriptor() {}
+
+status_t EcmDescriptor::Parse(const sp<ABuffer>& buffer_as_binary) {
+ if (buffer_as_binary->size() < kSizeBytes) {
+ ALOGE("Short EcmDescriptor buffer: expected %zu, received %zu.",
+ kSizeBytes, buffer_as_binary->size());
+ return BAD_VALUE;
+ }
+ sp<ABuffer> id_buffer = new ABuffer(buffer_as_binary->data(), kIdSizeBytes);
+ const uint8_t *id_bytes = id_buffer->data();
+ uint16_t id = (id_bytes[0] << 8) | id_bytes[1];
+ set_id(id);
+
+ // Unmarshall the contained Ecm.
+ sp<ABuffer> ecm_buffer = new ABuffer(
+ buffer_as_binary->data() + kIdSizeBytes, Ecm::kSizeBytes);
+ status_t status = mutable_ecm()->Parse(ecm_buffer);
+ if (status != OK) {
+ return status;
+ }
+ return OK;
+}
+
+EcmContainer::EcmContainer() : count_(0), count_set_(false) {}
+
+EcmContainer::~EcmContainer() {}
+
+status_t EcmContainer::Add(const EcmDescriptor& descriptor) {
+ switch (count_) {
+ case 0:
+ descriptor_[0] = descriptor;
+ count_ = 1;
+ break;
+ case 1:
+ descriptor_[1] = descriptor;
+ count_ = 2;
+ break;
+ case 2:
+ descriptor_[0] = descriptor_[1];
+ descriptor_[1] = descriptor;
+ break;
+ default:
+ ALOGE("Bad state.");
+ return INVALID_OPERATION;
+ }
+ count_set_ = true;
+ return OK;
+}
+
+status_t EcmContainer::Parse(const sp<ABuffer>& buffer_as_binary) {
+ // EcmContainer can contain 1 or 2 EcmDescriptors so this is a check for
+ // minimum size.
+ if (buffer_as_binary->size() < kMinimumSizeBytes) {
+ ALOGE("Short EcmContainer buffer: expected >= %zu, received %zu.",
+ kMinimumSizeBytes, buffer_as_binary->size());
+ return BAD_VALUE;
+ }
+
+ sp<ABuffer> count_buffer = new ABuffer(
+ buffer_as_binary->data(), kCountSizeBytes);
+ const uint8_t *count_bytes = count_buffer->data();
+ size_t count = (count_bytes[0] << 8) | count_bytes[1];
+ // Check that count is a legal value.
+ if (!CountLegal(count)) {
+ ALOGE("Invalid descriptor count: expected %zu <= count <= %zu, received %zu.",
+ kMinDescriptorCount, kMaxDescriptorCount, count);
+ return ERROR_OUT_OF_RANGE;
+ }
+ // If needed, check that buffer_as_binary can hold 2 EcmDescriptors.
+ if (count > kMinDescriptorCount) {
+ size_t expected_bytes =
+ kCountSizeBytes + (count * EcmDescriptor::kSizeBytes);
+ if (buffer_as_binary->size() < expected_bytes) {
+ ALOGE("Short EcmContainer buffer: expected %zu, received %zu.",
+ expected_bytes, buffer_as_binary->size());
+ return BAD_VALUE;
+ }
+ }
+ set_count(count);
+ // Unmarshall the contained EcmDescriptors.
+ size_t offset = kCountSizeBytes;
+ for (size_t i = 0; i < count_; ++i) {
+ sp<ABuffer> descriptor_buffer = new ABuffer(
+ buffer_as_binary->data() + offset, EcmDescriptor::kSizeBytes);
+ status_t status = mutable_descriptor(i)->Parse(descriptor_buffer);
+ if (status != OK) {
+ return status;
+ }
+ offset += EcmDescriptor::kSizeBytes;
+ }
+ return OK;
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ecm.h b/drm/mediacas/plugins/clearkey/ecm.h
new file mode 100644
index 0000000..aef8afb
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ecm.h
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Data objects encapsulating the clear key Ecm (Entitlement Control
+// Message) and related container messages. Deserialization and decryption
+// are handled externally to reduce build-time dependencies.
+//
+// Simplified typical client-side use:
+// Asset asset; // from the AssetRegistry.
+// uint8[] ecm_buffer; // received over network, contains an EcmContainer.
+// EcmContainer ecm_container;
+// util::Status status = ecm_container.Parse(ecm_buffer);
+// status = ecm_container.descriptor(1).ecm().Decrypt(
+// ecm_container.descriptor(1).ecm().buffer(), asset_key);
+// string content_key;
+// if (ecm_container.descriptor(1).ecm().has_content_key()) {
+// content_key = ecm_container.descriptor(1).ecm().content_key();
+// }
+// // use |content_key| to decrypt content.
+//
+// Simplified typical server-side use:
+// EcmContainer container;
+// string encoded_ecm;
+// // Use the ecm_generator API to encode and encrypt an ECM from data fields.
+// util::Status status = ecm_generator::EncodeECM(..., &encoded_ecm);
+// // Use |encoded_ecm| to initialized the Ecm from this library.
+// Ecm ecm;
+// util::Status status = ecm.Parse(encoded_ecm);
+// EcmDescriptor descriptor(crypto_period_id, ecm);
+// status = container.Add(descriptor);
+// string serialized_container;
+// status = container.Marshall(&serialized_container);
+// // now |serialized_container| can be sent to the STB.
+//
+// Due to past overloading of the term "ECM" this library introduces some
+// new terminology.
+//
+// Ecm: the 32-byte message sent from the head end to a packager that contains
+// the asset_id, system_id, and content_key (clear).
+//
+// EcmDescriptor: contains an Ecm and an id (the crypto period id in the case
+// of the BroadcastEncryptor). It contains no encrypted fields.
+//
+// EcmContainer: sent by the server in the video stream using the ECM pid.
+// This contains 1 or 2 EcmDescriptors and a count. It contains no
+// encrypted fields.
+//
+// The first EcmContainer sent by the server has only one EcmDescriptor. After
+// the first crypto period change, an EcmContainer contains 2 EcmDescriptors.
+// One has an odd id and one has an even id. The decrypted content keys from the
+// Ecms in the EcmDescriptors are used by the Mpeg2 parser as odd and even
+// scrambling keys. As the crypto period changes, the oldest EcmDescriptor is
+// dropped from the EcmContainer and the new EcmDescriptor is added.
+//
+// These classes use a simplified protobuf model. For non-repeating fields,
+// - has_foo() indicates whether the field is populated.
+// - the accessor foo() returns either a value or a const reference.
+// - a mutator sets the value. Primitive types and strings use
+// set_foo(value) while for objects mutable_foo() returns a pointer.
+//
+// To prevent null references, objects (like the Asset contained in an Emm)
+// are allocated as members and can be accessed via foo() even if they have
+// not been populated. The caller must call has_foo() to make sure that the
+// object is valid. Calling mutable_foo() to obtain a pointer causes has_foo()
+// to return true.
+//
+// Repeated fields (like the EcmDescriptors contained in an EcmContainer) are
+// handled differently.
+// - foo_size() returns the number of instances.
+// - the accessor foo(index) returns either a value or a const reference to
+// the instance at index. It is illegal to call with |index| >= the value
+// returned by foo_size(). |index| is checked with CHECK.
+// - a mutator to change the value of the instance. Primitive types and
+// strings use set_foo(index, value) while for objects mutable_foo(index)
+// returns a pointer. It is illegal to call with |index| >= the value
+// returned by foo_size(). |index| is checked with CHECK.
+//
+// Accessing a repeated field with an invalid index causes CHECK to fail.
+// Be sure to call EcmContainer::decriptor_size() before calling descriptor()
+// or mutable_descriptor()!
+//
+#ifndef CLEAR_KEY_ECM_H_
+#define CLEAR_KEY_ECM_H_
+
+#include <stddef.h>
+#include <string>
+
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <utils/Errors.h>
+
+using namespace std;
+
+namespace android {
+namespace clearkeycas {
+
+// Entitlement Control Message. It contains clear fields. The asset_id
+// and system_id as well as the content_key are clear.
+//
+// This class is not thread-safe.
+class Ecm {
+public:
+ // Wire size of ECM.
+ static constexpr size_t kSizeBytes = 16 + 16; // clear fields + clear key
+
+ // Creates an empty ECM which must be initialized via Parse().
+ Ecm();
+
+ ~Ecm();
+
+ // Parses clear fields of Ecm serialized in |buffer_as_binary| and saves
+ // a copy of |buffer_as_binary| for a future DecryptEcm call.
+ // Returns:
+ // - BAD_VALUE if |buffer_as_binary| is too small.
+ // - CLEARKEY_STATUS_INVALIDASSETID via ecm_generator::DecodeEcmClearFields if
+ // asset_id is 0.
+ // - CLEARKEY_STATUS_INVALIDSYSTEMID via ecm_generator::DecodeEcmClearFields if
+ // system_id is 0.
+ // Postconditions:
+ // - |asset_id_| and |system_id_| are populated with non-zero values.
+ // - |buffer_| contains a copy of the serialized Ecm.
+ status_t Parse(const sp<ABuffer>& buffer_as_binary);
+
+ // Parses and decrypts Ecm serialized in |buffer_as_binary| using
+ // |asset_from_emm|.asset_key().encryption_key(). It is not necessary to call
+ // Parse() first.
+ // Returns BAD_VALUE if |buffer_as_binary| is too small.
+ // Returns CLEARKEY_STATUS_INVALIDASSETID via
+ // ecm_generator::DecodeEcmClearFields if asset_id is 0.
+ // Returns CLEARKEY_STATUS_INVALIDSYSTEMID via
+ // ecm_generator::DecodeEcmClearFields if system_id is 0.
+ // Returns CLEARKEY_STATUS_INVALID_PARAMETER if
+ // - asset_id in |asset_from_emm| does not match asset_id in serialized Ecm.
+ // Preconditions: |asset_from_emm| must contain asset_id and asset_key fields.
+ // Postconditions: asset_id() and system_id() are populated with non-zero
+ // values, content_key() is populated with the clear content key.
+ status_t Decrypt(const sp<ABuffer>& buffer_as_binary,
+ const Asset& asset_from_emm);
+
+ // |buffer_| is a serialized copy of the Ecm used for later decryption or
+ // for marshalling.
+ inline bool has_buffer() const { return buffer_ != NULL; }
+ const sp<ABuffer> buffer() const { return buffer_; }
+ inline void set_buffer(const sp<ABuffer>& buffer) {
+ buffer_ = ABuffer::CreateAsCopy(buffer->data(), buffer->size());
+ }
+
+ // |content_key| is the clear, encryption/decryption key generated by the server.
+ inline bool has_content_key() const { return content_key_ != NULL; }
+ inline void set_content_key(const sp<ABuffer>& value) {
+ content_key_ = ABuffer::CreateAsCopy(value->data(), value->size());
+ }
+ inline const sp<ABuffer> content_key() const { return content_key_; }
+
+ // |asset_id| from the server.
+ inline bool has_asset_id() const { return asset_id_set_; }
+ inline uint64_t asset_id() const { return asset_id_; }
+ inline void set_asset_id(uint64_t value) {
+ asset_id_ = value;
+ asset_id_set_ = true;
+ }
+
+ // |system_id| from the server.
+ inline bool has_system_id() const { return system_id_set_; }
+ inline uint32_t system_id() const { return system_id_; }
+ inline void set_system_id(uint32_t value) {
+ system_id_ = value;
+ system_id_set_ = true;
+ }
+
+private:
+ uint64_t asset_id_;
+ bool asset_id_set_;
+ sp<ABuffer> buffer_;
+ sp<ABuffer> content_key_;
+ uint32_t system_id_;
+ bool system_id_set_;
+};
+
+// Contains an Ecm and and Id.
+// This class is not thread-safe.
+class EcmDescriptor {
+public:
+ // Wire size of Id field.
+ static constexpr size_t kIdSizeBytes = sizeof(uint16_t);
+ // Wire size of EcmDescriptor.
+ static constexpr size_t kSizeBytes = Ecm::kSizeBytes + kIdSizeBytes;
+
+ // Client-side ctor. Populate from a buffer with Parse().
+ EcmDescriptor();
+
+ // Server-side ctor.
+ // Args:
+ // - |id| is the crypto period ID.
+ // - |ecm| is an ECM which must have been intialized with Ecm::Parse().
+ EcmDescriptor(uint16_t id, const Ecm& ecm);
+
+ ~EcmDescriptor();
+
+ // Parses EcmDescriptor and its contained Ecm which are serialized in the
+ // binary string |buffer_as_binary|.
+ // Returns
+ // - BAD_VALUE if |buffer_as_binary| is too short to contain a
+ // serialized EcmDescriptor.
+ // - Errors returned by Ecm::Parse.
+ // Postconditions:
+ // - id() is populated. Note that 0 is a legal value.
+ // - the clear fields of the contained Ecm have been populated.
+ status_t Parse(const sp<ABuffer>& buffer_as_binary);
+
+ // |id| of the contained Ecm. Typically the crypto period id.
+ inline bool has_id() const { return id_set_; }
+ inline void set_id(uint16_t value) {
+ id_ = value;
+ id_set_ = true;
+ }
+ inline uint16_t id() const { return id_; }
+
+ // The contained |ecm|.
+ inline bool has_ecm() const { return ecm_set_; }
+ inline Ecm* mutable_ecm() {
+ ecm_set_ = true;
+ return &ecm_;
+ }
+ inline const Ecm& ecm() const { return ecm_; }
+
+private:
+ Ecm ecm_;
+ bool ecm_set_;
+ uint16_t id_;
+ bool id_set_;
+};
+
+// Contains a count and 1 or 2 EcmDescriptors. This is included in the video
+// stream by the sender in the ECM pid.
+// This class is not thread-safe.
+class EcmContainer {
+public:
+ // Wire size of the count field.
+ static constexpr size_t kCountSizeBytes = sizeof(uint16_t);
+ // Minimum wire size assuming one EcmDescriptor.
+ static constexpr size_t kMinimumSizeBytes =
+ EcmDescriptor::kSizeBytes + kCountSizeBytes;
+ static constexpr size_t kMinDescriptorCount = 1;
+ static constexpr size_t kMaxDescriptorCount = 2;
+
+ // Creates an empty EcmContainer which must be populated via Parse()
+ // (client-side) or Add() (server-side).
+ EcmContainer();
+
+ ~EcmContainer();
+
+ // Adds an EcmDescriptor for server-side applications.
+ // If |count_| is 2, |descriptor| replaces the oldest EcmDescriptor.
+ //
+ // Returns:
+ // - INTERNAL if the EcmContainer is in a bad state (count != 0, 1, or 2).
+ // Postconditions:
+ // - count() is within bounds (1 or 2).
+ status_t Add(const EcmDescriptor& descriptor);
+
+ // Parses EcmContainer and its contained EcmDescriptors which are serialized
+ // in |buffer_as_binary|.
+ // Returns
+ // - BAD_VALUE if |buffer_as_binary| is too short to contain a
+ // serialized EcmDescriptor.
+ // - ERROR_OUT_OF_RANGE if the count contained in the serialized EcmContainer
+ // is not 1 or 2.
+ // - Errors returned by EcmDescriptor::Parse.
+ // Postconditions:
+ // - count() is within bounds (1 or 2) and.
+ // - contained EcmDescriptor(s) parsed and populated.
+ status_t Parse(const sp<ABuffer>& buffer_as_binary);
+
+ inline bool has_count() const { return count_set_; }
+ // Sets the |count| of contained EcmDecriptors. Illegal values are silently
+ // ignored.
+ inline void set_count(size_t count) {
+ if (!CountLegal(count)) return;
+ count_ = count;
+ count_set_ = true;
+ }
+ // Number of contained EcmDecriptors. Only 1 and 2 are legal values.
+ inline size_t count() const { return count_; }
+
+ // Returns the number of allowable descriptors. This is redundant but is
+ // provided for protobuf compatibility.
+ inline size_t descriptor_size() const { return count_; }
+
+ // Returns a pointer to the EcmDescriptor at |index| for valid index values,
+ // otherwise calls CHECK and aborts. Always call descriptor_size() first!
+ inline EcmDescriptor* mutable_descriptor(size_t index) {
+ //CHECK(IndexValid(index));
+ return &descriptor_[index];
+ }
+
+ // Returns a reference to the EcmDescriptor at |index| for valid index
+ // values, otherwise calls CHECK and aborts. Call descriptor_size() first!
+ inline const EcmDescriptor& descriptor(size_t index) const {
+ //CHECK(IndexValid(index));
+ return descriptor_[index];
+ }
+
+private:
+ // Count value must be 1 or 2.
+ inline bool CountLegal(size_t count) const {
+ return count <= kMaxDescriptorCount && count >= kMinDescriptorCount;
+ }
+ // Index must be 0 or 1.
+ inline bool IndexLegal(size_t index) const {
+ return index < kMaxDescriptorCount;
+ }
+ // |index| is valid for this object: it is legal and < count_.
+ inline bool IndexValid(size_t index) const {
+ if (!IndexLegal(index)) return false;
+ return index < count_;
+ }
+ size_t count_;
+ bool count_set_;
+ EcmDescriptor descriptor_[kMaxDescriptorCount];
+
+ DISALLOW_EVIL_CONSTRUCTORS(EcmContainer);
+};
+
+} // namespace clearkeycas
+} // namespace android
+
+#endif // CLEAR_KEY_ECM_H_
diff --git a/drm/mediacas/plugins/clearkey/ecm_generator.cpp b/drm/mediacas/plugins/clearkey/ecm_generator.cpp
new file mode 100644
index 0000000..7d29659
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ecm_generator.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ecm_generator"
+#include "ecm_generator.h"
+
+#include <string.h>
+#include <algorithm>
+#include <endian.h>
+
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
+#include <openssl/aes.h>
+#include <utils/Log.h>
+
+namespace android {
+namespace clearkeycas {
+
+// These constants are internal to this module.
+const uint16_t kEcmClearFieldsSize = 16;
+const uint32_t kContentKeyByteSize = 16;
+const uint16_t kTotalEcmSize =
+ kEcmClearFieldsSize + kContentKeyByteSize; // clear fields + clear key
+
+const uint32_t kKeyLength = 16;
+
+#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t *>(_p))
+
+static uint32_t Load32(const void *p) {
+ return ntohl(UNALIGNED_LOAD32(p));
+}
+
+static uint32_t LoadNext32(const uint8_t** pptr) {
+ CHECK(pptr);
+ CHECK(*pptr);
+ const uint8_t* p = *pptr;
+ *pptr += sizeof(uint32_t);
+ return Load32(p);
+}
+
+namespace ecm_generator {
+
+status_t DecodeECM(const sp<ABuffer>& ecm, Asset* asset,
+ sp<ABuffer> *content_key, DefaultEcmFields* default_fields) {
+ CHECK(asset);
+ CHECK(content_key);
+ CHECK(default_fields);
+
+ status_t status = DecodeECMClearFields(ecm, asset, default_fields);
+ if (status != OK) {
+ return status;
+ }
+
+ const uint8_t* ptr = ecm->data() + kEcmClearFieldsSize;
+ *content_key = new ABuffer(kContentKeyByteSize);
+ memcpy((*content_key)->data(), ptr, kContentKeyByteSize);
+
+ return OK;
+}
+
+status_t DecodeECMClearFields(const sp<ABuffer>& ecm, Asset* asset,
+ DefaultEcmFields* default_fields) {
+ CHECK(asset);
+ CHECK(default_fields);
+
+ if (ecm->size() < kTotalEcmSize) {
+ ALOGE("Short ECM: expected_length=%d, actual_length=%zu",
+ kTotalEcmSize, ecm->size());
+ return BAD_VALUE;
+ }
+ const uint8_t* ptr = ecm->data();
+ default_fields->old_version = LoadNext32(&ptr);
+ default_fields->clear_lead = LoadNext32(&ptr);
+ default_fields->system_id = LoadNext32(&ptr);
+ // The real ecm version is hidden in the system id.
+ default_fields->ecm_version = (default_fields->system_id >> 24) & 0xFF;
+ default_fields->system_id &= 0x00FFFFFF;
+ if (default_fields->system_id == 0) {
+ ALOGE("Ecm has invalid system_id 0");
+ return CLEARKEY_STATUS_INVALIDSYSTEMID;
+ }
+ asset->set_id(LoadNext32(&ptr));
+ if (asset->id() == 0) {
+ ALOGE("Ecm has invalid asset_id 0");
+ return CLEARKEY_STATUS_INVALIDASSETID;
+ }
+ return OK;
+}
+
+} // namespace ecm_generator
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ecm_generator.h b/drm/mediacas/plugins/clearkey/ecm_generator.h
new file mode 100644
index 0000000..2ef06c4
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/ecm_generator.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEAR_KEY_ECM_GENERATOR_H_
+#define CLEAR_KEY_ECM_GENERATOR_H_
+
+#include <string>
+
+#include "protos/license_protos.pb.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/MediaErrors.h>
+
+using namespace std;
+
+namespace android {
+namespace clearkeycas {
+enum {
+ CLEARKEY_STATUS_BASE = ERROR_DRM_VENDOR_MAX,
+ CLEARKEY_STATUS_INVALIDASSETID = CLEARKEY_STATUS_BASE - 1,
+ CLEARKEY_STATUS_INVALIDSYSTEMID = CLEARKEY_STATUS_BASE - 2,
+ CLEARKEY_STATUS_INVALID_PARAMETER = CLEARKEY_STATUS_BASE - 3,
+};
+class Organization;
+
+namespace ecm_generator {
+
+// Layout of the ECM
+// ECM
+// 0 - 3 : Old ECM version (deprecated)
+// 4 - 7 : Clear lead (milliseconds)
+// 8 : ECM Version
+// 9 - 11 : System ID
+// 12 - 15 : Asset ID
+// 16 - 31 : Content Key (clear)
+//
+// The clear asset ID (bytes 12-15) is compared to the encrypted asset ID
+// (bytes 48-51) as a consistency check.
+
+struct DefaultEcmFields {
+ uint32_t old_version;
+ uint32_t clear_lead;
+ uint32_t ecm_version;
+ uint32_t system_id;
+};
+
+// Decodes a clear key ecm.
+// The following fields are decoded from the clear fields portion of the ecm:
+// asset->id
+// default_fields->old_version
+// default_fields->clear_lead
+// default_fields->system_id
+// default_fields->ecm_version
+//
+// The following fields are decoded from the content key portion of the ecm:
+// content_key
+//
+// |asset|, |content_key|, |default_fields| are owned by caller and must not
+// be NULL.
+// Returns failure via ecm_generator::DecodeECMClearFields.
+//
+// Example usage:
+// Asset asset;
+// string content_key;
+// DefaultEcmFields default_fields;
+// // Get a clear key |ecm|.
+// status_t status = ecm_generator::DecodeECM(ecm, &asset, &content_key, &default_fields);
+status_t DecodeECM(const sp<ABuffer>& ecm, Asset* asset,
+ sp<ABuffer> *content_key, DefaultEcmFields* default_fields);
+
+// Decodes the following fields from the clear fields portion of the ecm:
+// asset->id
+// default_fields->old_version
+// default_fields->clear_lead
+// default_fields->system_id
+// default_fields->ecm_version
+//
+// offset, asset and default_fields are owned by caller and must not be NULL.
+// offset is updated to show the number of bytes consumed.
+// Returns:
+// - BAD_VALUE on short ECM, or
+// - CLEARKEY_STATUS_INVALIDASSETID via ecm_generator::DecodeEcmClearFields if
+// asset_id is 0, or
+// - CLEARKEY_STATUS_INVALIDSYSTEMID via ecm_generator::DecodeEcmClearFields if
+// system_id is 0.
+//
+// Example usage:
+// Asset asset;
+// DefaultEcmFields default_fields;
+// // Get a clear key ecm.
+// status_t status = ecm_generator::DecodeECMClearFields(ecm, &asset, &default_fields);
+status_t DecodeECMClearFields(const sp<ABuffer>& ecm, Asset* asset,
+ DefaultEcmFields* default_fields);
+
+} // namespace ecm_generator
+} // namespace clearkeycas
+} // namespace android
+#endif // CLEAR_KEY_ECM_GENERATOR_H_
diff --git a/drm/mediacas/plugins/clearkey/protos/license_protos.proto b/drm/mediacas/plugins/clearkey/protos/license_protos.proto
new file mode 100644
index 0000000..397145d
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/protos/license_protos.proto
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.clearkeycas;
+
+option java_package = "com.google.video.clearkey.protos";
+
+// The Asset is the data describing licensing requirements and polciy for a
+// customer's video asset.
+//
+// The asset_id must not be set on creation. It is only used for assets of
+// CasType: CLEARKEY_CAS.
+//
+message Asset {
+ // Indicates the type of digital rights management scheme used.
+ // CLEARKEY_CAS: Clearkey Media CAS.
+ enum CasType {
+ UNKNOWN = 0;
+ CLEARKEY_CAS = 1;
+ }
+
+ // Must be unset on creation. Required for mutate operations on CLEARKEY_CAS assets.
+ optional uint64 id = 1;
+
+ // Organization-specified name of the asset. Required. Must not be empty.
+ // 'bytes' instead of 'string' due to UTF-8 validation in the latter.
+ optional bytes name = 2;
+
+ // The lowercase_organization_name is required. It's a foreign key to the
+ // Organization table and part of the primary key for the Asset table.
+ optional string lowercase_organization_name = 3;
+
+ // The policy_name is required. It's a foreign key to the policy table.
+ optional string policy_name = 4; // Name of the Policy to apply to this asset.
+
+ // Key information for decrypting content. Not used for CLEARKEY_CAS.
+ optional AssetKey asset_key = 5;
+
+ optional CasType cas_type = 6 [default = UNKNOWN];
+}
+
+// AssetKey defines a key that can be used to decrypt the license.
+// Note: In the previous implementation, the schema accommodated multiple
+// asset keys per asset. This is not supported in this implementation.
+message AssetKey {
+ optional bytes encryption_key = 1; // 256-byte key for the asset.
+}
diff --git a/drm/mediacas/plugins/clearkey/tests/Android.mk b/drm/mediacas/plugins/clearkey/tests/Android.mk
new file mode 100644
index 0000000..cbf7be7
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/tests/Android.mk
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ ClearKeyFetcherTest.cpp
+
+LOCAL_MODULE := ClearKeyFetcherTest
+
+# LOCAL_LDFLAGS is needed here for the test to use the plugin, because
+# the plugin is not in standard library search path. Without this .so
+# loading fails at run-time (linking is okay).
+LOCAL_LDFLAGS := \
+ -Wl,--rpath,\$${ORIGIN}/../../../system/lib/mediacas -Wl,--enable-new-dtags
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils libclearkeycasplugin libstagefright_foundation libprotobuf-cpp-lite liblog
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/drm/mediacas/plugins/clearkey \
+ $(TOP)/frameworks/av/include \
+ $(TOP)/frameworks/native/include/media \
+
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_NATIVE_TEST)
+
+
+
diff --git a/drm/mediacas/plugins/clearkey/tests/ClearKeyFetcherTest.cpp b/drm/mediacas/plugins/clearkey/tests/ClearKeyFetcherTest.cpp
new file mode 100644
index 0000000..ace086a
--- /dev/null
+++ b/drm/mediacas/plugins/clearkey/tests/ClearKeyFetcherTest.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ClearKeyFetcherTest"
+#include <utils/Log.h>
+#include <gtest/gtest.h>
+#include <stddef.h>
+#include <algorithm>
+#include <string>
+
+#include "ClearKeyFetcher.h"
+#include "ClearKeyLicenseFetcher.h"
+#include "protos/license_protos.pb.h"
+
+namespace android {
+namespace clearkeycas {
+
+const char *kTestAssetInJson =
+ "{ "
+ " \"id\": 21140844, "
+ " \"name\": \"Test Title\", "
+ " \"lowercase_organization_name\": \"Android\", "
+ " \"asset_key\": { "
+ " \"encryption_key\": \"nezAr3CHFrmBR9R8Tedotw==\" "
+ " }, "
+ " \"cas_type\": 1, "
+ " \"track_types\": [ ] "
+ "} " ;
+
+const uint8_t kTestEcmContainer[] = {
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x27, 0x10, 0x02, 0x00, 0x01, 0x77,
+ 0x01, 0x42, 0x95, 0x6c, 0x0e, 0xe3, 0x91, 0xbc,
+ 0xfd, 0x05, 0xb1, 0x60, 0x4f, 0x17, 0x82, 0xa4,
+ 0x86, 0x9b, 0x23, 0x56, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x27, 0x10, 0x02, 0x00,
+ 0x01, 0x77, 0x01, 0x42, 0x95, 0x6c, 0xd7, 0x43,
+ 0x62, 0xf8, 0x1c, 0x62, 0x19, 0x05, 0xc7, 0x3a,
+ 0x42, 0xcd, 0xfd, 0xd9, 0x13, 0x48,
+};
+
+const uint8_t kTestContentKey0[] = {
+ 0x0e, 0xe3, 0x91, 0xbc, 0xfd, 0x05, 0xb1, 0x60,
+ 0x4f, 0x17, 0x82, 0xa4, 0x86, 0x9b, 0x23, 0x56};
+
+const uint8_t kTestContentKey1[] = {
+ 0xd7, 0x43, 0x62, 0xf8, 0x1c, 0x62, 0x19, 0x05,
+ 0xc7, 0x3a, 0x42, 0xcd, 0xfd, 0xd9, 0x13, 0x48};
+
+constexpr uint32_t kTestEcmCount = 2;
+
+class ClearKeyFetcherTest : public testing::Test {
+protected:
+ virtual void SetUp();
+
+protected:
+ std::unique_ptr<ClearKeyLicenseFetcher> license_fetcher_;
+ sp<ABuffer> ecm_;
+ sp<ABuffer> content_key_[kTestEcmCount];
+};
+
+void ClearKeyFetcherTest::SetUp() {
+ license_fetcher_.reset(new ClearKeyLicenseFetcher());
+ EXPECT_EQ(OK, license_fetcher_->Init(kTestAssetInJson));
+ ecm_ = new ABuffer((void*) (kTestEcmContainer), sizeof(kTestEcmContainer));
+ content_key_[0] = new ABuffer(
+ (void*)kTestContentKey0, sizeof(kTestContentKey0));
+ content_key_[1] = new ABuffer(
+ (void*)kTestContentKey1, sizeof(kTestContentKey1));
+}
+
+TEST_F(ClearKeyFetcherTest, Ctor) {
+ ClearKeyFetcher fetcher(std::move(license_fetcher_));
+}
+
+TEST_F(ClearKeyFetcherTest, Success) {
+ ClearKeyFetcher fetcher(std::move(license_fetcher_));
+ EXPECT_EQ(OK, fetcher.Init());
+ uint64_t asset_id;
+ std::vector<KeyFetcher::KeyInfo> keys;
+ EXPECT_EQ(OK, fetcher.ObtainKey(ecm_, &asset_id, &keys));
+ EXPECT_EQ(2, keys.size());
+ EXPECT_EQ(0, keys[0].key_id);
+ EXPECT_EQ(content_key_[0]->size(), keys[0].key_bytes->size());
+ EXPECT_EQ(0, memcmp(content_key_[0]->data(),
+ keys[0].key_bytes->data(), content_key_[0]->size()));
+ EXPECT_EQ(1, keys[1].key_id);
+ EXPECT_EQ(content_key_[1]->size(), keys[1].key_bytes->size());
+ EXPECT_EQ(0, memcmp(content_key_[1]->data(),
+ keys[1].key_bytes->data(), content_key_[1]->size()));
+}
+
+} // namespace clearkeycas
+} // namespace android
diff --git a/drm/mediacas/plugins/mock/Android.mk b/drm/mediacas/plugins/mock/Android.mk
new file mode 100644
index 0000000..a97fac6
--- /dev/null
+++ b/drm/mediacas/plugins/mock/Android.mk
@@ -0,0 +1,37 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ MockCasPlugin.cpp \
+ MockSessionLibrary.cpp \
+
+LOCAL_MODULE := libmockcasplugin
+
+LOCAL_PROPRIETARY_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := mediacas
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils liblog
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/include \
+ $(TOP)/frameworks/native/include/media \
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.cpp b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
new file mode 100644
index 0000000..18cd9a4
--- /dev/null
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "MockCasPlugin"
+
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Log.h>
+
+#include "MockCasPlugin.h"
+#include "MockSessionLibrary.h"
+
+android::CasFactory* createCasFactory() {
+ return new android::MockCasFactory();
+}
+
+android::DescramblerFactory* createDescramblerFactory() {
+ return new android::MockDescramblerFactory();
+}
+
+namespace android {
+
+static const int32_t sMockId = 0xFFFF;
+
+bool MockCasFactory::isSystemIdSupported(int32_t CA_system_id) const {
+ return CA_system_id == sMockId;
+}
+
+status_t MockCasFactory::queryPlugins(
+ std::vector<CasPluginDescriptor> *descriptors) const {
+ descriptors->clear();
+ descriptors->push_back({sMockId, String8("MockCAS")});
+ return OK;
+}
+
+status_t MockCasFactory::createPlugin(
+ int32_t CA_system_id,
+ uint64_t appData,
+ CasPluginCallback callback,
+ CasPlugin **plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new MockCasPlugin();
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+bool MockDescramblerFactory::isSystemIdSupported(int32_t CA_system_id) const {
+ return CA_system_id == sMockId;
+}
+
+status_t MockDescramblerFactory::createPlugin(
+ int32_t CA_system_id, DescramblerPlugin** plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new MockDescramblerPlugin();
+ return OK;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+static String8 arrayToString(const std::vector<uint8_t> &array) {
+ String8 result;
+ for (size_t i = 0; i < array.size(); i++) {
+ result.appendFormat("%02x ", array[i]);
+ }
+ if (result.isEmpty()) {
+ result.append("(null)");
+ }
+ return result;
+}
+
+MockCasPlugin::MockCasPlugin() {
+ ALOGV("CTOR");
+}
+
+MockCasPlugin::~MockCasPlugin() {
+ ALOGV("DTOR");
+ MockSessionLibrary::get()->destroyPlugin(this);
+}
+
+status_t MockCasPlugin::setPrivateData(const CasData &data) {
+ ALOGV("setPrivateData");
+ return OK;
+}
+
+status_t MockCasPlugin::openSession(CasSessionId* sessionId) {
+ ALOGV("openSession");
+ return MockSessionLibrary::get()->addSession(this, sessionId);
+}
+
+status_t MockCasPlugin::closeSession(const CasSessionId &sessionId) {
+ ALOGV("closeSession: sessionId=%s", arrayToString(sessionId).string());
+ Mutex::Autolock lock(mLock);
+
+ sp<MockCasSession> session =
+ MockSessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return BAD_VALUE;
+ }
+
+ MockSessionLibrary::get()->destroySession(sessionId);
+ return OK;
+}
+
+status_t MockCasPlugin::setSessionPrivateData(
+ const CasSessionId &sessionId, const CasData &data) {
+ ALOGV("setSessionPrivateData: sessionId=%s",
+ arrayToString(sessionId).string());
+ Mutex::Autolock lock(mLock);
+
+ sp<MockCasSession> session =
+ MockSessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+status_t MockCasPlugin::processEcm(
+ const CasSessionId &sessionId, const CasEcm& ecm) {
+ ALOGV("processEcm: sessionId=%s", arrayToString(sessionId).string());
+ Mutex::Autolock lock(mLock);
+
+ sp<MockCasSession> session =
+ MockSessionLibrary::get()->findSession(sessionId);
+ if (session == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("ECM: size=%d", ecm.size());
+ ALOGV("ECM: data=%s", arrayToString(ecm).string());
+
+ return OK;
+}
+
+status_t MockCasPlugin::processEmm(const CasEmm& emm) {
+ ALOGV("processEmm");
+ Mutex::Autolock lock(mLock);
+
+ ALOGV("EMM: size=%d", emm.size());
+ ALOGV("EMM: data=%s", arrayToString(emm).string());
+
+ return OK;
+}
+
+status_t MockCasPlugin::sendEvent(
+ int32_t event, int arg, const CasData &eventData) {
+ ALOGV("sendEvent: event=%d", event);
+ Mutex::Autolock lock(mLock);
+
+ return OK;
+}
+
+status_t MockCasPlugin::provision(const String8 &str) {
+ ALOGV("provision: provisionString=%s", str.string());
+ Mutex::Autolock lock(mLock);
+
+ return OK;
+}
+
+status_t MockCasPlugin::refreshEntitlements(
+ int32_t refreshType, const CasData &refreshData) {
+ ALOGV("refreshEntitlements: refreshData=%s", arrayToString(refreshData).string());
+ Mutex::Autolock lock(mLock);
+
+ return OK;
+}
+
+/////////////////////////////////////////////////////////////////
+bool MockDescramblerPlugin::requiresSecureDecoderComponent(
+ const char *mime) const {
+ ALOGV("MockDescramblerPlugin::requiresSecureDecoderComponent"
+ "(mime=%s)", mime);
+ return false;
+}
+
+status_t MockDescramblerPlugin::setMediaCasSession(
+ const CasSessionId &sessionId) {
+ ALOGV("MockDescramblerPlugin::setMediaCasSession");
+ sp<MockCasSession> session =
+ MockSessionLibrary::get()->findSession(sessionId);
+
+ if (session == NULL) {
+ ALOGE("MockDescramblerPlugin: session not found");
+ return ERROR_DRM_SESSION_NOT_OPENED;
+ }
+
+ return OK;
+}
+
+ssize_t MockDescramblerPlugin::descramble(
+ bool secure,
+ ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const SubSample *subSamples,
+ const void *srcPtr,
+ int32_t srcOffset,
+ void *dstPtr,
+ int32_t dstOffset,
+ AString *errorDetailMsg) {
+ ALOGV("MockDescramblerPlugin::descramble(secure=%d, sctrl=%d,"
+ "subSamples=%s, srcPtr=%p, dstPtr=%p, srcOffset=%d, dstOffset=%d)",
+ (int)secure, (int)scramblingControl,
+ subSamplesToString(subSamples, numSubSamples).string(),
+ srcPtr, dstPtr, srcOffset, dstOffset);
+
+ return 0;
+}
+
+// Conversion utilities
+String8 MockDescramblerPlugin::arrayToString(
+ uint8_t const *array, size_t len) const
+{
+ String8 result("{ ");
+ for (size_t i = 0; i < len; i++) {
+ result.appendFormat("0x%02x ", array[i]);
+ }
+ result += "}";
+ return result;
+}
+
+String8 MockDescramblerPlugin::subSamplesToString(
+ SubSample const *subSamples, size_t numSubSamples) const
+{
+ String8 result;
+ for (size_t i = 0; i < numSubSamples; i++) {
+ result.appendFormat("[%zu] {clear:%u, encrypted:%u} ", i,
+ subSamples[i].mNumBytesOfClearData,
+ subSamples[i].mNumBytesOfEncryptedData);
+ }
+ return result;
+}
+
+} // namespace android
+
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.h b/drm/mediacas/plugins/mock/MockCasPlugin.h
new file mode 100644
index 0000000..9632492
--- /dev/null
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOCK_CAS_PLUGIN_H_
+#define MOCK_CAS_PLUGIN_H_
+
+#include <media/cas/CasAPI.h>
+#include <media/cas/DescramblerAPI.h>
+#include <utils/Mutex.h>
+
+extern "C" {
+ android::CasFactory *createCasFactory();
+ android::DescramblerFactory *createDescramblerFactory();
+}
+
+namespace android {
+
+class MockCasFactory : public CasFactory {
+public:
+ MockCasFactory() {}
+ virtual ~MockCasFactory() {}
+
+ virtual bool isSystemIdSupported(
+ int32_t CA_system_id) const override;
+ virtual status_t queryPlugins(
+ std::vector<CasPluginDescriptor> *descriptors) const override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id,
+ uint64_t appData,
+ CasPluginCallback callback,
+ CasPlugin **plugin) override;
+};
+
+class MockDescramblerFactory : public DescramblerFactory {
+public:
+ MockDescramblerFactory() {}
+ virtual ~MockDescramblerFactory() {}
+
+ virtual bool isSystemIdSupported(
+ int32_t CA_system_id) const override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id, DescramblerPlugin **plugin) override;
+};
+
+class MockCasPlugin : public CasPlugin {
+public:
+ MockCasPlugin();
+ virtual ~MockCasPlugin();
+
+ virtual status_t setPrivateData(
+ const CasData &data) override;
+
+ virtual status_t openSession(CasSessionId *sessionId) override;
+
+ virtual status_t closeSession(
+ const CasSessionId &sessionId) override;
+
+ virtual status_t setSessionPrivateData(
+ const CasSessionId &sessionId,
+ const CasData &data) override;
+
+ virtual status_t processEcm(
+ const CasSessionId &sessionId, const CasEcm &ecm) override;
+
+ virtual status_t processEmm(const CasEmm &emm) override;
+
+ virtual status_t sendEvent(
+ int32_t event, int32_t arg, const CasData &eventData) override;
+
+ virtual status_t provision(const String8 &str) override;
+
+ virtual status_t refreshEntitlements(
+ int32_t refreshType, const CasData &refreshData) override;
+
+private:
+
+ Mutex mLock;
+};
+
+class MockDescramblerPlugin : public DescramblerPlugin {
+public:
+ MockDescramblerPlugin() {}
+ virtual ~MockDescramblerPlugin() {};
+
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const override;
+
+ virtual status_t setMediaCasSession(
+ const CasSessionId &sessionId) override;
+
+ virtual ssize_t descramble(
+ bool secure,
+ ScramblingControl scramblingControl,
+ size_t numSubSamples,
+ const SubSample *subSamples,
+ const void *srcPtr,
+ int32_t srcOffset,
+ void *dstPtr,
+ int32_t dstOffset,
+ AString *errorDetailMsg) override;
+
+private:
+ String8 subSamplesToString(
+ SubSample const *subSamples,
+ size_t numSubSamples) const;
+ String8 arrayToString(uint8_t const *array, size_t len) const;
+};
+} // namespace android
+
+#endif // MOCK_CAS_PLUGIN_H_
diff --git a/drm/mediacas/plugins/mock/MockSessionLibrary.cpp b/drm/mediacas/plugins/mock/MockSessionLibrary.cpp
new file mode 100644
index 0000000..a7ee9a0
--- /dev/null
+++ b/drm/mediacas/plugins/mock/MockSessionLibrary.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MockSessionLibrary"
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include "MockSessionLibrary.h"
+
+namespace android {
+
+Mutex MockSessionLibrary::sSingletonLock;
+MockSessionLibrary* MockSessionLibrary::sSingleton = NULL;
+
+MockSessionLibrary* MockSessionLibrary::get() {
+ Mutex::Autolock lock(sSingletonLock);
+
+ if (sSingleton == NULL) {
+ ALOGD("Instantiating Session Library Singleton.");
+ sSingleton = new MockSessionLibrary();
+ }
+
+ return sSingleton;
+}
+
+MockSessionLibrary::MockSessionLibrary() : mNextSessionId(1) {}
+
+status_t MockSessionLibrary::addSession(
+ CasPlugin *plugin, CasSessionId *sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ sp<MockCasSession> session = new MockCasSession(plugin);
+
+ uint8_t *byteArray = (uint8_t *) &mNextSessionId;
+ sessionId->push_back(byteArray[3]);
+ sessionId->push_back(byteArray[2]);
+ sessionId->push_back(byteArray[1]);
+ sessionId->push_back(byteArray[0]);
+ mNextSessionId++;
+
+ mIDToSessionMap.add(*sessionId, session);
+ return OK;
+}
+
+sp<MockCasSession> MockSessionLibrary::findSession(
+ const CasSessionId& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ ssize_t index = mIDToSessionMap.indexOfKey(sessionId);
+ if (index < 0) {
+ return NULL;
+ }
+ return mIDToSessionMap.valueFor(sessionId);
+}
+
+void MockSessionLibrary::destroySession(const CasSessionId& sessionId) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ ssize_t index = mIDToSessionMap.indexOfKey(sessionId);
+ if (index < 0) {
+ return;
+ }
+
+ sp<MockCasSession> session = mIDToSessionMap.valueAt(index);
+ mIDToSessionMap.removeItemsAt(index);
+}
+
+void MockSessionLibrary::destroyPlugin(CasPlugin *plugin) {
+ Mutex::Autolock lock(mSessionsLock);
+
+ for (ssize_t index = mIDToSessionMap.size() - 1; index >= 0; index--) {
+ sp<MockCasSession> session = mIDToSessionMap.valueAt(index);
+ if (session->getPlugin() == plugin) {
+ mIDToSessionMap.removeItemsAt(index);
+ }
+ }
+}
+
+} // namespace android
diff --git a/drm/mediacas/plugins/mock/MockSessionLibrary.h b/drm/mediacas/plugins/mock/MockSessionLibrary.h
new file mode 100644
index 0000000..0b30f4c
--- /dev/null
+++ b/drm/mediacas/plugins/mock/MockSessionLibrary.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MOCK_CAS_SESSION_LIBRARY_H_
+#define MOCK_CAS_SESSION_LIBRARY_H_
+
+#include <media/cas/CasAPI.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class MockCasSession : public RefBase {
+public:
+ explicit MockCasSession(CasPlugin *plugin) : mPlugin(plugin) {}
+ virtual ~MockCasSession() {}
+
+private:
+ friend class MockSessionLibrary;
+
+ CasPlugin* mPlugin;
+
+ CasPlugin* getPlugin() const { return mPlugin; }
+
+ DISALLOW_EVIL_CONSTRUCTORS(MockCasSession);
+};
+
+class MockSessionLibrary {
+public:
+ static MockSessionLibrary* get();
+
+ status_t addSession(CasPlugin *plugin, CasSessionId *sessionId);
+
+ sp<MockCasSession> findSession(const CasSessionId& sessionId);
+
+ void destroySession(const CasSessionId& sessionId);
+
+ void destroyPlugin(CasPlugin *plugin);
+
+private:
+ static Mutex sSingletonLock;
+ static MockSessionLibrary* sSingleton;
+
+ Mutex mSessionsLock;
+ uint32_t mNextSessionId;
+ KeyedVector<CasSessionId, sp<MockCasSession> > mIDToSessionMap;
+
+ MockSessionLibrary();
+ DISALLOW_EVIL_CONSTRUCTORS(MockSessionLibrary);
+};
+} // namespace android
+
+#endif // MOCK_CAS_SESSION_LIBRARY_H_
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
index d27956c..c83321b 100644
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmFactory.cpp
@@ -44,7 +44,8 @@
}
android::status_t DrmFactory::createDrmPlugin(
- const uint8_t uuid[16], android::DrmPlugin** plugin) {
+ const uint8_t uuid[16],
+ android::DrmPlugin** plugin) {
if (!isCryptoSchemeSupported(uuid)) {
*plugin = NULL;
return android::BAD_VALUE;
diff --git a/drm/mediadrm/plugins/clearkey/DrmFactory.h b/drm/mediadrm/plugins/clearkey/DrmFactory.h
index 87db982..0bc0843 100644
--- a/drm/mediadrm/plugins/clearkey/DrmFactory.h
+++ b/drm/mediadrm/plugins/clearkey/DrmFactory.h
@@ -35,7 +35,8 @@
virtual bool isContentTypeSupported(const android::String8 &mimeType);
virtual android::status_t createDrmPlugin(
- const uint8_t uuid[16], android::DrmPlugin** plugin);
+ const uint8_t uuid[16],
+ android::DrmPlugin** plugin);
private:
DISALLOW_EVIL_CONSTRUCTORS(DrmFactory);
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index 8356bcc..5fdac5c 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -37,10 +37,14 @@
status_t DrmPlugin::closeSession(const Vector<uint8_t>& sessionId) {
sp<Session> session = mSessionLibrary->findSession(sessionId);
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
if (session.get()) {
mSessionLibrary->destroySession(session);
+ return android::OK;
}
- return android::OK;
+ return android::ERROR_DRM_SESSION_NOT_OPENED;
}
status_t DrmPlugin::getKeyRequest(
@@ -53,6 +57,9 @@
String8& defaultUrl,
DrmPlugin::KeyRequestType *keyRequestType) {
UNUSED(optionalParameters);
+ if (scope.size() == 0) {
+ return android::BAD_VALUE;
+ }
if (keyType != kKeyType_Streaming) {
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -69,6 +76,9 @@
const Vector<uint8_t>& scope,
const Vector<uint8_t>& response,
Vector<uint8_t>& keySetId) {
+ if (scope.size() == 0 || response.size() == 0) {
+ return android::BAD_VALUE;
+ }
sp<Session> session = mSessionLibrary->findSession(scope);
if (!session.get()) {
return android::ERROR_DRM_SESSION_NOT_OPENED;
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index c4d934e..58421b9 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -63,22 +63,28 @@
Vector<uint8_t>& keySetId);
virtual status_t removeKeys(const Vector<uint8_t>& sessionId) {
- UNUSED(sessionId);
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
return android::ERROR_DRM_CANNOT_HANDLE;
}
virtual status_t restoreKeys(
const Vector<uint8_t>& sessionId,
const Vector<uint8_t>& keySetId) {
- UNUSED(sessionId);
- UNUSED(keySetId);
+ if (sessionId.size() == 0 || keySetId.size() == 0) {
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
virtual status_t queryKeyStatus(
const Vector<uint8_t>& sessionId,
KeyedVector<String8, String8>& infoMap) const {
- UNUSED(sessionId);
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(infoMap);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -99,9 +105,12 @@
const Vector<uint8_t>& response,
Vector<uint8_t>& certificate,
Vector<uint8_t>& wrappedKey) {
- UNUSED(response);
UNUSED(certificate);
UNUSED(wrappedKey);
+ if (response.size() == 0) {
+ // empty response
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -111,13 +120,18 @@
}
virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
- UNUSED(ssid);
+ if (ssid.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
UNUSED(secureStop);
return android::ERROR_DRM_CANNOT_HANDLE;
}
virtual status_t releaseSecureStops(const Vector<uint8_t>& ssRelease) {
- UNUSED(ssRelease);
+ if (ssRelease.size() == 0) {
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -151,15 +165,17 @@
virtual status_t setCipherAlgorithm(
const Vector<uint8_t>& sessionId, const String8& algorithm) {
- UNUSED(sessionId);
- UNUSED(algorithm);
+ if (sessionId.size() == 0 || algorithm.size() == 0) {
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
virtual status_t setMacAlgorithm(
const Vector<uint8_t>& sessionId, const String8& algorithm) {
- UNUSED(sessionId);
- UNUSED(algorithm);
+ if (sessionId.size() == 0 || algorithm.size() == 0) {
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -169,10 +185,10 @@
const Vector<uint8_t>& input,
const Vector<uint8_t>& iv,
Vector<uint8_t>& output) {
- UNUSED(sessionId);
- UNUSED(keyId);
- UNUSED(input);
- UNUSED(iv);
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ input.size() == 0 || iv.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(output);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -183,10 +199,10 @@
const Vector<uint8_t>& input,
const Vector<uint8_t>& iv,
Vector<uint8_t>& output) {
- UNUSED(sessionId);
- UNUSED(keyId);
- UNUSED(input);
- UNUSED(iv);
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ input.size() == 0 || iv.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(output);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -196,9 +212,10 @@
const Vector<uint8_t>& keyId,
const Vector<uint8_t>& message,
Vector<uint8_t>& signature) {
- UNUSED(sessionId);
- UNUSED(keyId);
- UNUSED(message);
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ message.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(signature);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -208,10 +225,10 @@
const Vector<uint8_t>& keyId,
const Vector<uint8_t>& message,
const Vector<uint8_t>& signature, bool& match) {
- UNUSED(sessionId);
- UNUSED(keyId);
- UNUSED(message);
- UNUSED(signature);
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ message.size() == 0 || signature.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(match);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -222,10 +239,10 @@
const Vector<uint8_t>& message,
const Vector<uint8_t>& wrappedKey,
Vector<uint8_t>& signature) {
- UNUSED(sessionId);
- UNUSED(algorithm);
- UNUSED(message);
- UNUSED(wrappedKey);
+ if (sessionId.size() == 0 || algorithm.size() == 0 ||
+ message.size() == 0 || wrappedKey.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(signature);
return android::ERROR_DRM_CANNOT_HANDLE;
}
diff --git a/drm/mediadrm/plugins/clearkey/InitDataParser.cpp b/drm/mediadrm/plugins/clearkey/InitDataParser.cpp
index 0216b8d..6a4f8d5 100644
--- a/drm/mediadrm/plugins/clearkey/InitDataParser.cpp
+++ b/drm/mediadrm/plugins/clearkey/InitDataParser.cpp
@@ -114,7 +114,7 @@
memcpy(&keyIdCount, &initData[readPosition], sizeof(keyIdCount));
keyIdCount = ntohl(keyIdCount);
readPosition += sizeof(keyIdCount);
- if (readPosition + (keyIdCount * kKeyIdSize) !=
+ if (readPosition + ((uint64_t)keyIdCount * kKeyIdSize) !=
initData.size() - sizeof(uint32_t)) {
return android::ERROR_DRM_CANNOT_HANDLE;
}
diff --git a/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp b/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
index 46d7f77..0419f97 100644
--- a/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
+++ b/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
@@ -43,7 +43,7 @@
return sSingleton;
}
-const sp<Session>& SessionLibrary::createSession() {
+sp<Session> SessionLibrary::createSession() {
Mutex::Autolock lock(mSessionsLock);
String8 sessionIdString = String8::format("%u", mNextSessionId);
@@ -57,9 +57,12 @@
return mSessions.valueFor(sessionId);
}
-const sp<Session>& SessionLibrary::findSession(
+sp<Session> SessionLibrary::findSession(
const Vector<uint8_t>& sessionId) {
Mutex::Autolock lock(mSessionsLock);
+ if (mSessions.indexOfKey(sessionId) < 0) {
+ return sp<Session>(NULL);
+ }
return mSessions.valueFor(sessionId);
}
diff --git a/drm/mediadrm/plugins/clearkey/SessionLibrary.h b/drm/mediadrm/plugins/clearkey/SessionLibrary.h
index 199ad64..6236fff 100644
--- a/drm/mediadrm/plugins/clearkey/SessionLibrary.h
+++ b/drm/mediadrm/plugins/clearkey/SessionLibrary.h
@@ -31,9 +31,9 @@
public:
static SessionLibrary* get();
- const android::sp<Session>& createSession();
+ android::sp<Session> createSession();
- const android::sp<Session>& findSession(
+ android::sp<Session> findSession(
const android::Vector<uint8_t>& sessionId);
void destroySession(const android::sp<Session>& session);
@@ -48,7 +48,7 @@
android::Mutex mSessionsLock;
uint32_t mNextSessionId;
- android::DefaultKeyedVector<android::Vector<uint8_t>, android::sp<Session> >
+ android::KeyedVector<android::Vector<uint8_t>, android::sp<Session> >
mSessions;
};
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index cb199e5..3b4145f 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -56,7 +56,8 @@
return true;
}
- status_t MockDrmFactory::createDrmPlugin(const uint8_t /* uuid */[16], DrmPlugin **plugin)
+ status_t MockDrmFactory::createDrmPlugin(const uint8_t /* uuid */[16],
+ DrmPlugin **plugin)
{
*plugin = new MockDrmPlugin();
return OK;
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
index 9012d2d..4178c03 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
@@ -33,7 +33,8 @@
bool isCryptoSchemeSupported(const uint8_t uuid[16]);
bool isContentTypeSupported(const String8 &mimeType);
- status_t createDrmPlugin(const uint8_t uuid[16], DrmPlugin **plugin);
+ status_t createDrmPlugin(const uint8_t uuid[16],
+ DrmPlugin **plugin);
};
class MockCryptoFactory : public CryptoFactory {
diff --git a/include/camera b/include/camera
index 67a148a..00848e3 120000
--- a/include/camera
+++ b/include/camera
@@ -1 +1 @@
-../camera/include/camera
\ No newline at end of file
+../camera/include/camera/
\ No newline at end of file
diff --git a/include/cpustats b/include/cpustats
new file mode 120000
index 0000000..4a02d41
--- /dev/null
+++ b/include/cpustats
@@ -0,0 +1 @@
+../media/libcpustats/include/cpustats/
\ No newline at end of file
diff --git a/include/drm/drm_framework_common.h b/include/drm/drm_framework_common.h
index 0750406..d75f71c 100644
--- a/include/drm/drm_framework_common.h
+++ b/include/drm/drm_framework_common.h
@@ -234,10 +234,6 @@
* POSIX based Decrypt API set for container based DRM
*/
static const int CONTAINER_BASED = 0x02;
- /**
- * Decrypt API for Widevine streams
- */
- static const int WV_BASED = 0x3;
};
/**
diff --git a/include/media/AVSyncSettings.h b/include/media/AVSyncSettings.h
new file mode 120000
index 0000000..bbe211f
--- /dev/null
+++ b/include/media/AVSyncSettings.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/AVSyncSettings.h
\ No newline at end of file
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
new file mode 120000
index 0000000..c4d6e79
--- /dev/null
+++ b/include/media/AudioBufferProvider.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioBufferProvider.h
\ No newline at end of file
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
deleted file mode 100644
index 7f6ccac..0000000
--- a/include/media/AudioEffect.h
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIOEFFECT_H
-#define ANDROID_AUDIOEFFECT_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <media/IAudioFlinger.h>
-#include <media/IAudioPolicyService.h>
-#include <media/IEffect.h>
-#include <media/IEffectClient.h>
-#include <hardware/audio_effect.h>
-#include <media/AudioSystem.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-struct effect_param_cblk_t;
-
-// ----------------------------------------------------------------------------
-
-class AudioEffect : public RefBase
-{
-public:
-
- /*
- * Static methods for effects enumeration.
- */
-
- /*
- * Returns the number of effects available. This method together
- * with queryEffect() is used to enumerate all effects:
- * The enumeration sequence is:
- * queryNumberEffects(&num_effects);
- * for (i = 0; i < num_effects; i++)
- * queryEffect(i,...);
- *
- * Parameters:
- * numEffects: address where the number of effects should be returned.
- *
- * Returned status (from utils/Errors.h) can be:
- * NO_ERROR successful operation.
- * PERMISSION_DENIED could not get AudioFlinger interface
- * NO_INIT effect library failed to initialize
- * BAD_VALUE invalid numEffects pointer
- *
- * Returned value
- * *numEffects: updated with number of effects available
- */
- static status_t queryNumberEffects(uint32_t *numEffects);
-
- /*
- * Returns an effect descriptor during effect
- * enumeration.
- *
- * Parameters:
- * index: index of the queried effect.
- * descriptor: address where the effect descriptor should be returned.
- *
- * Returned status (from utils/Errors.h) can be:
- * NO_ERROR successful operation.
- * PERMISSION_DENIED could not get AudioFlinger interface
- * NO_INIT effect library failed to initialize
- * BAD_VALUE invalid descriptor pointer or index
- * INVALID_OPERATION effect list has changed since last execution of queryNumberEffects()
- *
- * Returned value
- * *descriptor: updated with effect descriptor
- */
- static status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor);
-
-
- /*
- * Returns the descriptor for the specified effect uuid.
- *
- * Parameters:
- * uuid: pointer to effect uuid.
- * descriptor: address where the effect descriptor should be returned.
- *
- * Returned status (from utils/Errors.h) can be:
- * NO_ERROR successful operation.
- * PERMISSION_DENIED could not get AudioFlinger interface
- * NO_INIT effect library failed to initialize
- * BAD_VALUE invalid uuid or descriptor pointers
- * NAME_NOT_FOUND no effect with this uuid found
- *
- * Returned value
- * *descriptor updated with effect descriptor
- */
- static status_t getEffectDescriptor(const effect_uuid_t *uuid,
- effect_descriptor_t *descriptor) /*const*/;
-
-
- /*
- * Returns a list of descriptors corresponding to the pre processings enabled by default
- * on an AudioRecord with the supplied audio session ID.
- *
- * Parameters:
- * audioSession: audio session ID.
- * descriptors: address where the effect descriptors should be returned.
- * count: as input, the maximum number of descriptor than should be returned
- * as output, the number of descriptor returned if status is NO_ERROR or the actual
- * number of enabled pre processings if status is NO_MEMORY
- *
- * Returned status (from utils/Errors.h) can be:
- * NO_ERROR successful operation.
- * NO_MEMORY the number of descriptor to return is more than the maximum number
- * indicated by count.
- * PERMISSION_DENIED could not get AudioFlinger interface
- * NO_INIT effect library failed to initialize
- * BAD_VALUE invalid audio session or descriptor pointers
- *
- * Returned value
- * *descriptor updated with descriptors of pre processings enabled by default
- * *count number of descriptors returned if returned status is NO_ERROR.
- * total number of pre processing enabled by default if returned status is
- * NO_MEMORY. This happens if the count passed as input is less than the number
- * of descriptors to return.
- * *count is limited to kMaxPreProcessing on return.
- */
- static status_t queryDefaultPreProcessing(audio_session_t audioSession,
- effect_descriptor_t *descriptors,
- uint32_t *count);
-
- /*
- * Events used by callback function (effect_callback_t).
- */
- enum event_type {
- EVENT_CONTROL_STATUS_CHANGED = 0,
- EVENT_ENABLE_STATUS_CHANGED = 1,
- EVENT_PARAMETER_CHANGED = 2,
- EVENT_ERROR = 3
- };
-
- /* Callback function notifying client application of a change in effect engine state or
- * configuration.
- * An effect engine can be shared by several applications but only one has the control
- * of the engine activity and configuration at a time.
- * The EVENT_CONTROL_STATUS_CHANGED event is received when an application loses or
- * retrieves the control of the effect engine. Loss of control happens
- * if another application requests the use of the engine by creating an AudioEffect for
- * the same effect type but with a higher priority. Control is returned when the
- * application having the control deletes its AudioEffect object.
- * The EVENT_ENABLE_STATUS_CHANGED event is received by all applications not having the
- * control of the effect engine when the effect is enabled or disabled.
- * The EVENT_PARAMETER_CHANGED event is received by all applications not having the
- * control of the effect engine when an effect parameter is changed.
- * The EVENT_ERROR event is received when the media server process dies.
- *
- * Parameters:
- *
- * event: type of event notified (see enum AudioEffect::event_type).
- * user: Pointer to context for use by the callback receiver.
- * info: Pointer to optional parameter according to event type:
- * - EVENT_CONTROL_STATUS_CHANGED: boolean indicating if control is granted (true)
- * or stolen (false).
- * - EVENT_ENABLE_STATUS_CHANGED: boolean indicating if effect is now enabled (true)
- * or disabled (false).
- * - EVENT_PARAMETER_CHANGED: pointer to a effect_param_t structure.
- * - EVENT_ERROR: status_t indicating the error (DEAD_OBJECT when media server dies).
- */
-
- typedef void (*effect_callback_t)(int32_t event, void* user, void *info);
-
-
- /* Constructor.
- * AudioEffect is the base class for creating and controlling an effect engine from
- * the application process. Creating an AudioEffect object will create the effect engine
- * in the AudioFlinger if no engine of the specified type exists. If one exists, this engine
- * will be used. The application creating the AudioEffect object (or a derived class like
- * Reverb for instance) will either receive control of the effect engine or not, depending
- * on the priority parameter. If priority is higher than the priority used by the current
- * effect engine owner, the control will be transfered to the new application. Otherwise
- * control will remain to the previous application. In this case, the new application will be
- * notified of changes in effect engine state or control ownership by the effect callback.
- * After creating the AudioEffect, the application must call the initCheck() method and
- * check the creation status before trying to control the effect engine (see initCheck()).
- * If the effect is to be applied to an AudioTrack or MediaPlayer only the application
- * must specify the audio session ID corresponding to this player.
- */
-
- /* Simple Constructor.
- *
- * Parameters:
- *
- * opPackageName: The package name used for app op checks.
- */
- AudioEffect(const String16& opPackageName);
-
-
- /* Constructor.
- *
- * Parameters:
- *
- * type: type of effect created: can be null if uuid is specified. This corresponds to
- * the OpenSL ES interface implemented by this effect.
- * opPackageName: The package name used for app op checks.
- * uuid: Uuid of effect created: can be null if type is specified. This uuid corresponds to
- * a particular implementation of an effect type.
- * priority: requested priority for effect control: the priority level corresponds to the
- * value of priority parameter: negative values indicate lower priorities, positive values
- * higher priorities, 0 being the normal priority.
- * cbf: optional callback function (see effect_callback_t)
- * user: pointer to context for use by the callback receiver.
- * sessionID: audio session this effect is associated to.
- * If equal to AUDIO_SESSION_OUTPUT_MIX, the effect will be global to
- * the output mix. Otherwise, the effect will be applied to all players
- * (AudioTrack or MediaPLayer) within the same audio session.
- * io: HAL audio output or input stream to which this effect must be attached. Leave at 0 for
- * automatic output selection by AudioFlinger.
- */
-
- AudioEffect(const effect_uuid_t *type,
- const String16& opPackageName,
- const effect_uuid_t *uuid = NULL,
- int32_t priority = 0,
- effect_callback_t cbf = NULL,
- void* user = NULL,
- audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
- audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
- );
-
- /* Constructor.
- * Same as above but with type and uuid specified by character strings
- */
- AudioEffect(const char *typeStr,
- const String16& opPackageName,
- const char *uuidStr = NULL,
- int32_t priority = 0,
- effect_callback_t cbf = NULL,
- void* user = NULL,
- audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
- audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
- );
-
- /* Terminates the AudioEffect and unregisters it from AudioFlinger.
- * The effect engine is also destroyed if this AudioEffect was the last controlling
- * the engine.
- */
- ~AudioEffect();
-
- /* Initialize an uninitialized AudioEffect.
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR or ALREADY_EXISTS: successful initialization
- * - INVALID_OPERATION: AudioEffect is already initialized
- * - BAD_VALUE: invalid parameter
- * - NO_INIT: audio flinger or audio hardware not initialized
- * */
- status_t set(const effect_uuid_t *type,
- const effect_uuid_t *uuid = NULL,
- int32_t priority = 0,
- effect_callback_t cbf = NULL,
- void* user = NULL,
- audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
- audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
- );
-
- /* Result of constructing the AudioEffect. This must be checked
- * before using any AudioEffect API.
- * initCheck() can return:
- * - NO_ERROR: the effect engine is successfully created and the application has control.
- * - ALREADY_EXISTS: the effect engine is successfully created but the application does not
- * have control.
- * - NO_INIT: the effect creation failed.
- *
- */
- status_t initCheck() const;
-
-
- /* Returns the unique effect Id for the controlled effect engine. This ID is unique
- * system wide and is used for instance in the case of auxiliary effects to attach
- * the effect to an AudioTrack or MediaPlayer.
- *
- */
- int32_t id() const { return mId; }
-
- /* Returns a descriptor for the effect (see effect_descriptor_t in audio_effect.h).
- */
- effect_descriptor_t descriptor() const;
-
- /* Returns effect control priority of this AudioEffect object.
- */
- int32_t priority() const { return mPriority; }
-
-
- /* Enables or disables the effect engine.
- *
- * Parameters:
- * enabled: requested enable state.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: the application does not have control of the effect engine or the
- * effect is already in the requested state.
- */
- virtual status_t setEnabled(bool enabled);
- bool getEnabled() const;
-
- /* Sets a parameter value.
- *
- * Parameters:
- * param: pointer to effect_param_t structure containing the parameter
- * and its value (See audio_effect.h).
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation.
- * - INVALID_OPERATION: the application does not have control of the effect engine.
- * - BAD_VALUE: invalid parameter identifier or value.
- * - DEAD_OBJECT: the effect engine has been deleted.
- */
- virtual status_t setParameter(effect_param_t *param);
-
- /* Prepare a new parameter value that will be set by next call to
- * setParameterCommit(). This method can be used to set multiple parameters
- * in a synchronous manner or to avoid multiple binder calls for each
- * parameter.
- *
- * Parameters:
- * param: pointer to effect_param_t structure containing the parameter
- * and its value (See audio_effect.h).
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation.
- * - INVALID_OPERATION: the application does not have control of the effect engine.
- * - NO_MEMORY: no more space available in shared memory used for deferred parameter
- * setting.
- */
- virtual status_t setParameterDeferred(effect_param_t *param);
-
- /* Commit all parameter values previously prepared by setParameterDeferred().
- *
- * Parameters:
- * none
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation.
- * - INVALID_OPERATION: No new parameter values ready for commit.
- * - BAD_VALUE: invalid parameter identifier or value: there is no indication
- * as to which of the parameters caused this error.
- * - DEAD_OBJECT: the effect engine has been deleted.
- */
- virtual status_t setParameterCommit();
-
- /* Gets a parameter value.
- *
- * Parameters:
- * param: pointer to effect_param_t structure containing the parameter
- * and the returned value (See audio_effect.h).
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation.
- * - INVALID_OPERATION: the AudioEffect was not successfully initialized.
- * - BAD_VALUE: invalid parameter identifier.
- * - DEAD_OBJECT: the effect engine has been deleted.
- */
- virtual status_t getParameter(effect_param_t *param);
-
- /* Sends a command and receives a response to/from effect engine.
- * See audio_effect.h for details on effect command() function, valid command codes
- * and formats.
- */
- virtual status_t command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *cmdData,
- uint32_t *replySize,
- void *replyData);
-
-
- /*
- * Utility functions.
- */
-
- /* Converts the string passed as first argument to the effect_uuid_t
- * pointed to by second argument
- */
- static status_t stringToGuid(const char *str, effect_uuid_t *guid);
- /* Converts the effect_uuid_t pointed to by first argument to the
- * string passed as second argument
- */
- static status_t guidToString(const effect_uuid_t *guid, char *str, size_t maxLen);
-
- // kMaxPreProcessing is a reasonable value for the maximum number of preprocessing effects
- // that can be applied simultaneously.
- static const uint32_t kMaxPreProcessing = 10;
-
-protected:
- bool mEnabled; // enable state
- audio_session_t mSessionId; // audio session ID
- int32_t mPriority; // priority for effect control
- status_t mStatus; // effect status
- effect_callback_t mCbf; // callback function for status, control and
- // parameter changes notifications
- void* mUserData; // client context for callback function
- effect_descriptor_t mDescriptor; // effect descriptor
- int32_t mId; // system wide unique effect engine instance ID
- Mutex mLock; // Mutex for mEnabled access
-
- String16 mOpPackageName; // The package name used for app op checks.
-
- // IEffectClient
- virtual void controlStatusChanged(bool controlGranted);
- virtual void enableStatusChanged(bool enabled);
- virtual void commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData);
-
-private:
-
- // Implements the IEffectClient interface
- class EffectClient :
- public android::BnEffectClient, public android::IBinder::DeathRecipient
- {
- public:
-
- EffectClient(AudioEffect *effect) : mEffect(effect){}
-
- // IEffectClient
- virtual void controlStatusChanged(bool controlGranted) {
- sp<AudioEffect> effect = mEffect.promote();
- if (effect != 0) {
- effect->controlStatusChanged(controlGranted);
- }
- }
- virtual void enableStatusChanged(bool enabled) {
- sp<AudioEffect> effect = mEffect.promote();
- if (effect != 0) {
- effect->enableStatusChanged(enabled);
- }
- }
- virtual void commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData) {
- sp<AudioEffect> effect = mEffect.promote();
- if (effect != 0) {
- effect->commandExecuted(
- cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- }
- }
-
- // IBinder::DeathRecipient
- virtual void binderDied(const wp<IBinder>& /*who*/) {
- sp<AudioEffect> effect = mEffect.promote();
- if (effect != 0) {
- effect->binderDied();
- }
- }
-
- private:
- wp<AudioEffect> mEffect;
- };
-
- void binderDied();
-
- sp<IEffect> mIEffect; // IEffect binder interface
- sp<EffectClient> mIEffectClient; // IEffectClient implementation
- sp<IMemory> mCblkMemory; // shared memory for deferred parameter setting
- effect_param_cblk_t* mCblk; // control block for deferred parameter setting
- pid_t mClientPid;
-};
-
-
-}; // namespace android
-
-#endif // ANDROID_AUDIOEFFECT_H
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
new file mode 120000
index 0000000..bf52955
--- /dev/null
+++ b/include/media/AudioEffect.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioEffect.h
\ No newline at end of file
diff --git a/include/media/AudioIoDescriptor.h b/include/media/AudioIoDescriptor.h
new file mode 120000
index 0000000..68f54c9
--- /dev/null
+++ b/include/media/AudioIoDescriptor.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioIoDescriptor.h
\ No newline at end of file
diff --git a/include/media/AudioMixer.h b/include/media/AudioMixer.h
new file mode 120000
index 0000000..de839c6
--- /dev/null
+++ b/include/media/AudioMixer.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioMixer.h
\ No newline at end of file
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
deleted file mode 100644
index 891bc4b..0000000
--- a/include/media/AudioParameter.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2008-2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIOPARAMETER_H_
-#define ANDROID_AUDIOPARAMETER_H_
-
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class AudioParameter {
-
-public:
- AudioParameter() {}
- AudioParameter(const String8& keyValuePairs);
- virtual ~AudioParameter();
-
- // reserved parameter keys for changing standard parameters with setParameters() function.
- // Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
- // configuration changes and act accordingly.
- // keyRouting: to change audio routing, value is an int in audio_devices_t
- // keySamplingRate: to change sampling rate routing, value is an int
- // keyFormat: to change audio format, value is an int in audio_format_t
- // keyChannels: to change audio channel configuration, value is an int in audio_channels_t
- // keyFrameCount: to change audio output frame count, value is an int
- // keyInputSource: to change audio input source, value is an int in audio_source_t
- // (defined in media/mediarecorder.h)
- // keyScreenState: either "on" or "off"
- static const char * const keyRouting;
- static const char * const keySamplingRate;
- static const char * const keyFormat;
- static const char * const keyChannels;
- static const char * const keyFrameCount;
- static const char * const keyInputSource;
- static const char * const keyScreenState;
-
- String8 toString();
-
- status_t add(const String8& key, const String8& value);
- status_t addInt(const String8& key, const int value);
- status_t addFloat(const String8& key, const float value);
-
- status_t remove(const String8& key);
-
- status_t get(const String8& key, String8& value);
- status_t getInt(const String8& key, int& value);
- status_t getFloat(const String8& key, float& value);
- status_t getAt(size_t index, String8& key, String8& value);
-
- size_t size() { return mParameters.size(); }
-
-private:
- String8 mKeyValuePairs;
- KeyedVector <String8, String8> mParameters;
-};
-
-}; // namespace android
-
-#endif /*ANDROID_AUDIOPARAMETER_H_*/
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
new file mode 120000
index 0000000..a5889e5
--- /dev/null
+++ b/include/media/AudioParameter.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioParameter.h
\ No newline at end of file
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
new file mode 120000
index 0000000..dd4cd53
--- /dev/null
+++ b/include/media/AudioPolicy.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioPolicy.h
\ No newline at end of file
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
deleted file mode 100644
index d39aa20..0000000
--- a/include/media/AudioPolicyHelper.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef AUDIO_POLICY_HELPER_H_
-#define AUDIO_POLICY_HELPER_H_
-
-#include <system/audio.h>
-
-static inline
-audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
-{
- // flags to stream type mapping
- if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
- return AUDIO_STREAM_ENFORCED_AUDIBLE;
- }
- if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
- return AUDIO_STREAM_BLUETOOTH_SCO;
- }
-
- // usage to stream type mapping
- switch (attr->usage) {
- case AUDIO_USAGE_MEDIA:
- case AUDIO_USAGE_GAME:
- case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
- return AUDIO_STREAM_MUSIC;
- case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
- return AUDIO_STREAM_SYSTEM;
- case AUDIO_USAGE_VOICE_COMMUNICATION:
- return AUDIO_STREAM_VOICE_CALL;
-
- case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
- return AUDIO_STREAM_DTMF;
-
- case AUDIO_USAGE_ALARM:
- return AUDIO_STREAM_ALARM;
- case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
- return AUDIO_STREAM_RING;
-
- case AUDIO_USAGE_NOTIFICATION:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
- case AUDIO_USAGE_NOTIFICATION_EVENT:
- return AUDIO_STREAM_NOTIFICATION;
-
- case AUDIO_USAGE_UNKNOWN:
- default:
- return AUDIO_STREAM_MUSIC;
- }
-}
-
-static inline
-void stream_type_to_audio_attributes(audio_stream_type_t streamType,
- audio_attributes_t *attr) {
- memset(attr, 0, sizeof(audio_attributes_t));
-
- switch (streamType) {
- case AUDIO_STREAM_DEFAULT:
- case AUDIO_STREAM_MUSIC:
- attr->content_type = AUDIO_CONTENT_TYPE_MUSIC;
- attr->usage = AUDIO_USAGE_MEDIA;
- break;
- case AUDIO_STREAM_VOICE_CALL:
- attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
- attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION;
- break;
- case AUDIO_STREAM_ENFORCED_AUDIBLE:
- attr->flags |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
- // intended fall through, attributes in common with STREAM_SYSTEM
- case AUDIO_STREAM_SYSTEM:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
- break;
- case AUDIO_STREAM_RING:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
- break;
- case AUDIO_STREAM_ALARM:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_ALARM;
- break;
- case AUDIO_STREAM_NOTIFICATION:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_NOTIFICATION;
- break;
- case AUDIO_STREAM_BLUETOOTH_SCO:
- attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
- attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION;
- attr->flags |= AUDIO_FLAG_SCO;
- break;
- case AUDIO_STREAM_DTMF:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
- break;
- case AUDIO_STREAM_TTS:
- attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
- attr->usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
- break;
- default:
- ALOGE("invalid stream type %d when converting to attributes", streamType);
- }
-}
-
-#endif //AUDIO_POLICY_HELPER_H_
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
new file mode 120000
index 0000000..558657e
--- /dev/null
+++ b/include/media/AudioPolicyHelper.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioPolicyHelper.h
\ No newline at end of file
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
deleted file mode 100644
index 63076e9..0000000
--- a/include/media/AudioRecord.h
+++ /dev/null
@@ -1,657 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIORECORD_H
-#define ANDROID_AUDIORECORD_H
-
-#include <cutils/sched_policy.h>
-#include <media/AudioSystem.h>
-#include <media/AudioTimestamp.h>
-#include <media/IAudioRecord.h>
-#include <media/Modulo.h>
-#include <utils/threads.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-struct audio_track_cblk_t;
-class AudioRecordClientProxy;
-
-// ----------------------------------------------------------------------------
-
-class AudioRecord : public RefBase
-{
-public:
-
- /* Events used by AudioRecord callback function (callback_t).
- * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
- */
- enum event_type {
- EVENT_MORE_DATA = 0, // Request to read available data from buffer.
- // If this event is delivered but the callback handler
- // does not want to read the available data, the handler must
- // explicitly ignore the event by setting frameCount to zero.
- EVENT_OVERRUN = 1, // Buffer overrun occurred.
- EVENT_MARKER = 2, // Record head is at the specified marker position
- // (See setMarkerPosition()).
- EVENT_NEW_POS = 3, // Record head is at a new position
- // (See setPositionUpdatePeriod()).
- EVENT_NEW_IAUDIORECORD = 4, // IAudioRecord was re-created, either due to re-routing and
- // voluntary invalidation by mediaserver, or mediaserver crash.
- };
-
- /* Client should declare a Buffer and pass address to obtainBuffer()
- * and releaseBuffer(). See also callback_t for EVENT_MORE_DATA.
- */
-
- class Buffer
- {
- public:
- // FIXME use m prefix
- size_t frameCount; // number of sample frames corresponding to size;
- // on input to obtainBuffer() it is the number of frames desired
- // on output from obtainBuffer() it is the number of available
- // frames to be read
- // on input to releaseBuffer() it is currently ignored
-
- size_t size; // input/output in bytes == frameCount * frameSize
- // on input to obtainBuffer() it is ignored
- // on output from obtainBuffer() it is the number of available
- // bytes to be read, which is frameCount * frameSize
- // on input to releaseBuffer() it is the number of bytes to
- // release
- // FIXME This is redundant with respect to frameCount. Consider
- // removing size and making frameCount the primary field.
-
- union {
- void* raw;
- short* i16; // signed 16-bit
- int8_t* i8; // unsigned 8-bit, offset by 0x80
- // input to obtainBuffer(): unused, output: pointer to buffer
- };
- };
-
- /* As a convenience, if a callback is supplied, a handler thread
- * is automatically created with the appropriate priority. This thread
- * invokes the callback when a new buffer becomes available or various conditions occur.
- * Parameters:
- *
- * event: type of event notified (see enum AudioRecord::event_type).
- * user: Pointer to context for use by the callback receiver.
- * info: Pointer to optional parameter according to event type:
- * - EVENT_MORE_DATA: pointer to AudioRecord::Buffer struct. The callback must not read
- * more bytes than indicated by 'size' field and update 'size' if
- * fewer bytes are consumed.
- * - EVENT_OVERRUN: unused.
- * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
- * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
- * - EVENT_NEW_IAUDIORECORD: unused.
- */
-
- typedef void (*callback_t)(int event, void* user, void *info);
-
- /* Returns the minimum frame count required for the successful creation of
- * an AudioRecord object.
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - NO_INIT: audio server or audio hardware not initialized
- * - BAD_VALUE: unsupported configuration
- * frameCount is guaranteed to be non-zero if status is NO_ERROR,
- * and is undefined otherwise.
- * FIXME This API assumes a route, and so should be deprecated.
- */
-
- static status_t getMinFrameCount(size_t* frameCount,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask);
-
- /* How data is transferred from AudioRecord
- */
- enum transfer_type {
- TRANSFER_DEFAULT, // not specified explicitly; determine from the other parameters
- TRANSFER_CALLBACK, // callback EVENT_MORE_DATA
- TRANSFER_OBTAIN, // call obtainBuffer() and releaseBuffer()
- TRANSFER_SYNC, // synchronous read()
- };
-
- /* Constructs an uninitialized AudioRecord. No connection with
- * AudioFlinger takes place. Use set() after this.
- *
- * Parameters:
- *
- * opPackageName: The package name used for app ops.
- */
- AudioRecord(const String16& opPackageName);
-
- /* Creates an AudioRecord object and registers it with AudioFlinger.
- * Once created, the track needs to be started before it can be used.
- * Unspecified values are set to appropriate default values.
- *
- * Parameters:
- *
- * inputSource: Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT).
- * sampleRate: Data sink sampling rate in Hz. Zero means to use the source sample rate.
- * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
- * 16 bits per sample).
- * channelMask: Channel mask, such that audio_is_input_channel(channelMask) is true.
- * opPackageName: The package name used for app ops.
- * frameCount: Minimum size of track PCM buffer in frames. This defines the
- * application's contribution to the
- * latency of the track. The actual size selected by the AudioRecord could
- * be larger if the requested size is not compatible with current audio HAL
- * latency. Zero means to use a default value.
- * cbf: Callback function. If not null, this function is called periodically
- * to consume new data in TRANSFER_CALLBACK mode
- * and inform of marker, position updates, etc.
- * user: Context for use by the callback receiver.
- * notificationFrames: The callback function is called each time notificationFrames PCM
- * frames are ready in record track output buffer.
- * sessionId: Not yet supported.
- * transferType: How data is transferred from AudioRecord.
- * flags: See comments on audio_input_flags_t in <system/audio.h>
- * pAttributes: If not NULL, supersedes inputSource for use case selection.
- * threadCanCallJava: Not present in parameter list, and so is fixed at false.
- */
-
- AudioRecord(audio_source_t inputSource,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& opPackageName,
- size_t frameCount = 0,
- callback_t cbf = NULL,
- void* user = NULL,
- uint32_t notificationFrames = 0,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
- int uid = -1,
- pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL);
-
- /* Terminates the AudioRecord and unregisters it from AudioFlinger.
- * Also destroys all resources associated with the AudioRecord.
- */
-protected:
- virtual ~AudioRecord();
-public:
-
- /* Initialize an AudioRecord that was created using the AudioRecord() constructor.
- * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters.
- * set() is not multi-thread safe.
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful intialization
- * - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use
- * - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
- * - NO_INIT: audio server or audio hardware not initialized
- * - PERMISSION_DENIED: recording is not allowed for the requesting process
- * If status is not equal to NO_ERROR, don't call any other APIs on this AudioRecord.
- *
- * Parameters not listed in the AudioRecord constructors above:
- *
- * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
- */
- status_t set(audio_source_t inputSource,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount = 0,
- callback_t cbf = NULL,
- void* user = NULL,
- uint32_t notificationFrames = 0,
- bool threadCanCallJava = false,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
- int uid = -1,
- pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL);
-
- /* Result of constructing the AudioRecord. This must be checked for successful initialization
- * before using any AudioRecord API (except for set()), because using
- * an uninitialized AudioRecord produces undefined results.
- * See set() method above for possible return codes.
- */
- status_t initCheck() const { return mStatus; }
-
- /* Returns this track's estimated latency in milliseconds.
- * This includes the latency due to AudioRecord buffer size, resampling if applicable,
- * and audio hardware driver.
- */
- uint32_t latency() const { return mLatency; }
-
- /* getters, see constructor and set() */
-
- audio_format_t format() const { return mFormat; }
- uint32_t channelCount() const { return mChannelCount; }
- size_t frameCount() const { return mFrameCount; }
- size_t frameSize() const { return mFrameSize; }
- audio_source_t inputSource() const { return mAttributes.source; }
-
- /* After it's created the track is not active. Call start() to
- * make it active. If set, the callback will start being called.
- * If event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until
- * the specified event occurs on the specified trigger session.
- */
- status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
- audio_session_t triggerSession = AUDIO_SESSION_NONE);
-
- /* Stop a track. The callback will cease being called. Note that obtainBuffer() still
- * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK.
- */
- void stop();
- bool stopped() const;
-
- /* Return the sink sample rate for this record track in Hz.
- * If specified as zero in constructor or set(), this will be the source sample rate.
- * Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
- */
- uint32_t getSampleRate() const { return mSampleRate; }
-
- /* Sets marker position. When record reaches the number of frames specified,
- * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
- * with marker == 0 cancels marker notification callback.
- * To set a marker at a position which would compute as 0,
- * a workaround is to set the marker at a nearby position such as ~0 or 1.
- * If the AudioRecord has been opened with no callback function associated,
- * the operation will fail.
- *
- * Parameters:
- *
- * marker: marker position expressed in wrapping (overflow) frame units,
- * like the return value of getPosition().
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: the AudioRecord has no callback installed.
- */
- status_t setMarkerPosition(uint32_t marker);
- status_t getMarkerPosition(uint32_t *marker) const;
-
- /* Sets position update period. Every time the number of frames specified has been recorded,
- * a callback with event type EVENT_NEW_POS is called.
- * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
- * callback.
- * If the AudioRecord has been opened with no callback function associated,
- * the operation will fail.
- * Extremely small values may be rounded up to a value the implementation can support.
- *
- * Parameters:
- *
- * updatePeriod: position update notification period expressed in frames.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: the AudioRecord has no callback installed.
- */
- status_t setPositionUpdatePeriod(uint32_t updatePeriod);
- status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const;
-
- /* Return the total number of frames recorded since recording started.
- * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
- * It is reset to zero by stop().
- *
- * Parameters:
- *
- * position: Address where to return record head position.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - BAD_VALUE: position is NULL
- */
- status_t getPosition(uint32_t *position) const;
-
- /* Return the record timestamp.
- *
- * Parameters:
- * timestamp: A pointer to the timestamp to be filled.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - BAD_VALUE: timestamp is NULL
- */
- status_t getTimestamp(ExtendedTimestamp *timestamp);
-
- /* Returns a handle on the audio input used by this AudioRecord.
- *
- * Parameters:
- * none.
- *
- * Returned value:
- * handle on audio hardware input
- */
-// FIXME The only known public caller is frameworks/opt/net/voip/src/jni/rtp/AudioGroup.cpp
- audio_io_handle_t getInput() const __attribute__((__deprecated__))
- { return getInputPrivate(); }
-private:
- audio_io_handle_t getInputPrivate() const;
-public:
-
- /* Returns the audio session ID associated with this AudioRecord.
- *
- * Parameters:
- * none.
- *
- * Returned value:
- * AudioRecord session ID.
- *
- * No lock needed because session ID doesn't change after first set().
- */
- audio_session_t getSessionId() const { return mSessionId; }
-
- /* Public API for TRANSFER_OBTAIN mode.
- * Obtains a buffer of up to "audioBuffer->frameCount" full frames.
- * After draining these frames of data, the caller should release them with releaseBuffer().
- * If the track buffer is not empty, obtainBuffer() returns as many contiguous
- * full frames as are available immediately.
- *
- * If nonContig is non-NULL, it is an output parameter that will be set to the number of
- * additional non-contiguous frames that are predicted to be available immediately,
- * if the client were to release the first frames and then call obtainBuffer() again.
- * This value is only a prediction, and needs to be confirmed.
- * It will be set to zero for an error return.
- *
- * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK
- * regardless of the value of waitCount.
- * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a
- * maximum timeout based on waitCount; see chart below.
- * Buffers will be returned until the pool
- * is exhausted, at which point obtainBuffer() will either block
- * or return WOULD_BLOCK depending on the value of the "waitCount"
- * parameter.
- *
- * Interpretation of waitCount:
- * +n limits wait time to n * WAIT_PERIOD_MS,
- * -1 causes an (almost) infinite wait time,
- * 0 non-blocking.
- *
- * Buffer fields
- * On entry:
- * frameCount number of frames requested
- * size ignored
- * raw ignored
- * After error return:
- * frameCount 0
- * size 0
- * raw undefined
- * After successful return:
- * frameCount actual number of frames available, <= number requested
- * size actual number of bytes available
- * raw pointer to the buffer
- */
-
- status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
- size_t *nonContig = NULL);
-
- // Explicit Routing
- /**
- * TODO Document this method.
- */
- status_t setInputDevice(audio_port_handle_t deviceId);
-
- /**
- * TODO Document this method.
- */
- audio_port_handle_t getInputDevice();
-
- /* Returns the ID of the audio device actually used by the input to which this AudioRecord
- * is attached.
- * A value of AUDIO_PORT_HANDLE_NONE indicates the AudioRecord is not attached to any input.
- *
- * Parameters:
- * none.
- */
- audio_port_handle_t getRoutedDeviceId();
-
- /* Add an AudioDeviceCallback. The caller will be notified when the audio device
- * to which this AudioRecord is routed is updated.
- * Replaces any previously installed callback.
- * Parameters:
- * callback: The callback interface
- * Returns NO_ERROR if successful.
- * INVALID_OPERATION if the same callback is already installed.
- * NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
- * BAD_VALUE if the callback is NULL
- */
- status_t addAudioDeviceCallback(
- const sp<AudioSystem::AudioDeviceCallback>& callback);
-
- /* remove an AudioDeviceCallback.
- * Parameters:
- * callback: The callback interface
- * Returns NO_ERROR if successful.
- * INVALID_OPERATION if the callback is not installed
- * BAD_VALUE if the callback is NULL
- */
- status_t removeAudioDeviceCallback(
- const sp<AudioSystem::AudioDeviceCallback>& callback);
-
-private:
- /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
- * additional non-contiguous frames that are predicted to be available immediately,
- * if the client were to release the first frames and then call obtainBuffer() again.
- * This value is only a prediction, and needs to be confirmed.
- * It will be set to zero for an error return.
- * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
- * in case the requested amount of frames is in two or more non-contiguous regions.
- * FIXME requested and elapsed are both relative times. Consider changing to absolute time.
- */
- status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
- struct timespec *elapsed = NULL, size_t *nonContig = NULL);
-public:
-
- /* Public API for TRANSFER_OBTAIN mode.
- * Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill.
- *
- * Buffer fields:
- * frameCount currently ignored but recommend to set to actual number of frames consumed
- * size actual number of bytes consumed, must be multiple of frameSize
- * raw ignored
- */
- void releaseBuffer(const Buffer* audioBuffer);
-
- /* As a convenience we provide a read() interface to the audio buffer.
- * Input parameter 'size' is in byte units.
- * This is implemented on top of obtainBuffer/releaseBuffer. For best
- * performance use callbacks. Returns actual number of bytes read >= 0,
- * or one of the following negative status codes:
- * INVALID_OPERATION AudioRecord is configured for streaming mode
- * BAD_VALUE size is invalid
- * WOULD_BLOCK when obtainBuffer() returns same, or
- * AudioRecord was stopped during the read
- * or any other error code returned by IAudioRecord::start() or restoreRecord_l().
- * Default behavior is to only return when all data has been transferred. Set 'blocking' to
- * false for the method to return immediately without waiting to try multiple times to read
- * the full content of the buffer.
- */
- ssize_t read(void* buffer, size_t size, bool blocking = true);
-
- /* Return the number of input frames lost in the audio driver since the last call of this
- * function. Audio driver is expected to reset the value to 0 and restart counting upon
- * returning the current value by this function call. Such loss typically occurs when the
- * user space process is blocked longer than the capacity of audio driver buffers.
- * Units: the number of input audio frames.
- * FIXME The side-effect of resetting the counter may be incompatible with multi-client.
- * Consider making it more like AudioTrack::getUnderrunFrames which doesn't have side effects.
- */
- uint32_t getInputFramesLost() const;
-
- /* Get the flags */
- audio_input_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; }
-
-private:
- /* copying audio record objects is not allowed */
- AudioRecord(const AudioRecord& other);
- AudioRecord& operator = (const AudioRecord& other);
-
- /* a small internal class to handle the callback */
- class AudioRecordThread : public Thread
- {
- public:
- AudioRecordThread(AudioRecord& receiver, bool bCanCallJava = false);
-
- // Do not call Thread::requestExitAndWait() without first calling requestExit().
- // Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
- virtual void requestExit();
-
- void pause(); // suspend thread from execution at next loop boundary
- void resume(); // allow thread to execute, if not requested to exit
- void wake(); // wake to handle changed notification conditions.
-
- private:
- void pauseInternal(nsecs_t ns = 0LL);
- // like pause(), but only used internally within thread
-
- friend class AudioRecord;
- virtual bool threadLoop();
- AudioRecord& mReceiver;
- virtual ~AudioRecordThread();
- Mutex mMyLock; // Thread::mLock is private
- Condition mMyCond; // Thread::mThreadExitedCondition is private
- bool mPaused; // whether thread is requested to pause at next loop entry
- bool mPausedInt; // whether thread internally requests pause
- nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
- bool mIgnoreNextPausedInt; // skip any internal pause and go immediately
- // to processAudioBuffer() as state may have changed
- // since pause time calculated.
- };
-
- // body of AudioRecordThread::threadLoop()
- // returns the maximum amount of time before we would like to run again, where:
- // 0 immediately
- // > 0 no later than this many nanoseconds from now
- // NS_WHENEVER still active but no particular deadline
- // NS_INACTIVE inactive so don't run again until re-started
- // NS_NEVER never again
- static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
- nsecs_t processAudioBuffer();
-
- // caller must hold lock on mLock for all _l methods
-
- status_t openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
-
- // FIXME enum is faster than strcmp() for parameter 'from'
- status_t restoreRecord_l(const char *from);
-
- sp<AudioRecordThread> mAudioRecordThread;
- mutable Mutex mLock;
-
- // Current client state: false = stopped, true = active. Protected by mLock. If more states
- // are added, consider changing this to enum State { ... } mState as in AudioTrack.
- bool mActive;
-
- // for client callback handler
- callback_t mCbf; // callback handler for events, or NULL
- void* mUserData;
-
- // for notification APIs
- uint32_t mNotificationFramesReq; // requested number of frames between each
- // notification callback
- // as specified in constructor or set()
- uint32_t mNotificationFramesAct; // actual number of frames between each
- // notification callback
- bool mRefreshRemaining; // processAudioBuffer() should refresh
- // mRemainingFrames and mRetryOnPartialBuffer
-
- // These are private to processAudioBuffer(), and are not protected by a lock
- uint32_t mRemainingFrames; // number of frames to request in obtainBuffer()
- bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
- uint32_t mObservedSequence; // last observed value of mSequence
-
- Modulo<uint32_t> mMarkerPosition; // in wrapping (overflow) frame units
- bool mMarkerReached;
- Modulo<uint32_t> mNewPosition; // in frames
- uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
-
- status_t mStatus;
-
- String16 mOpPackageName; // The package name used for app ops.
-
- size_t mFrameCount; // corresponds to current IAudioRecord, value is
- // reported back by AudioFlinger to the client
- size_t mReqFrameCount; // frame count to request the first or next time
- // a new IAudioRecord is needed, non-decreasing
-
- int64_t mFramesRead; // total frames read. reset to zero after
- // the start() following stop(). It is not
- // changed after restoring the track.
- int64_t mFramesReadServerOffset; // An offset to server frames read due to
- // restoring AudioRecord, or stop/start.
- // constant after constructor or set()
- uint32_t mSampleRate;
- audio_format_t mFormat;
- uint32_t mChannelCount;
- size_t mFrameSize; // app-level frame size == AudioFlinger frame size
- uint32_t mLatency; // in ms
- audio_channel_mask_t mChannelMask;
-
- audio_input_flags_t mFlags; // same as mOrigFlags, except for bits that may
- // be denied by client or server, such as
- // AUDIO_INPUT_FLAG_FAST. mLock must be
- // held to read or write those bits reliably.
- audio_input_flags_t mOrigFlags; // as specified in constructor or set(), const
-
- audio_session_t mSessionId;
- transfer_type mTransfer;
-
- // Next 5 fields may be changed if IAudioRecord is re-created, but always != 0
- // provided the initial set() was successful
- sp<IAudioRecord> mAudioRecord;
- sp<IMemory> mCblkMemory;
- audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
- sp<IMemory> mBufferMemory;
- audio_io_handle_t mInput; // returned by AudioSystem::getInput()
-
- int mPreviousPriority; // before start()
- SchedPolicy mPreviousSchedulingGroup;
- bool mAwaitBoost; // thread should wait for priority boost before running
-
- // The proxy should only be referenced while a lock is held because the proxy isn't
- // multi-thread safe.
- // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
- // provided that the caller also holds an extra reference to the proxy and shared memory to keep
- // them around in case they are replaced during the obtainBuffer().
- sp<AudioRecordClientProxy> mProxy;
-
- bool mInOverrun; // whether recorder is currently in overrun state
-
-private:
- class DeathNotifier : public IBinder::DeathRecipient {
- public:
- DeathNotifier(AudioRecord* audioRecord) : mAudioRecord(audioRecord) { }
- protected:
- virtual void binderDied(const wp<IBinder>& who);
- private:
- const wp<AudioRecord> mAudioRecord;
- };
-
- sp<DeathNotifier> mDeathNotifier;
- uint32_t mSequence; // incremented for each new IAudioRecord attempt
- int mClientUid;
- pid_t mClientPid;
- audio_attributes_t mAttributes;
-
- // For Device Selection API
- // a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
- audio_port_handle_t mSelectedDeviceId;
- sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
-};
-
-}; // namespace android
-
-#endif // ANDROID_AUDIORECORD_H
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
new file mode 120000
index 0000000..7939dd3
--- /dev/null
+++ b/include/media/AudioRecord.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioRecord.h
\ No newline at end of file
diff --git a/include/media/AudioResampler.h b/include/media/AudioResampler.h
new file mode 120000
index 0000000..50e12f4
--- /dev/null
+++ b/include/media/AudioResampler.h
@@ -0,0 +1 @@
+../../media/libaudioprocessing/include/AudioResampler.h
\ No newline at end of file
diff --git a/include/media/AudioResamplerPublic.h b/include/media/AudioResamplerPublic.h
new file mode 120000
index 0000000..309c23d
--- /dev/null
+++ b/include/media/AudioResamplerPublic.h
@@ -0,0 +1 @@
+../../media/libaudioprocessing/include/AudioResamplerPublic.h
\ No newline at end of file
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
deleted file mode 100644
index d67ad44..0000000
--- a/include/media/AudioSystem.h
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIOSYSTEM_H_
-#define ANDROID_AUDIOSYSTEM_H_
-
-#include <hardware/audio_effect.h>
-#include <media/AudioPolicy.h>
-#include <media/AudioIoDescriptor.h>
-#include <media/IAudioFlingerClient.h>
-#include <media/IAudioPolicyServiceClient.h>
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <utils/Errors.h>
-#include <utils/Mutex.h>
-
-namespace android {
-
-typedef void (*audio_error_callback)(status_t err);
-typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
-typedef void (*record_config_callback)(int event, audio_session_t session, int source,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
- audio_patch_handle_t patchHandle);
-
-class IAudioFlinger;
-class IAudioPolicyService;
-class String8;
-
-class AudioSystem
-{
-public:
-
- // FIXME Declare in binder opcode order, similarly to IAudioFlinger.h and IAudioFlinger.cpp
-
- /* These are static methods to control the system-wide AudioFlinger
- * only privileged processes can have access to them
- */
-
- // mute/unmute microphone
- static status_t muteMicrophone(bool state);
- static status_t isMicrophoneMuted(bool *state);
-
- // set/get master volume
- static status_t setMasterVolume(float value);
- static status_t getMasterVolume(float* volume);
-
- // mute/unmute audio outputs
- static status_t setMasterMute(bool mute);
- static status_t getMasterMute(bool* mute);
-
- // set/get stream volume on specified output
- static status_t setStreamVolume(audio_stream_type_t stream, float value,
- audio_io_handle_t output);
- static status_t getStreamVolume(audio_stream_type_t stream, float* volume,
- audio_io_handle_t output);
-
- // mute/unmute stream
- static status_t setStreamMute(audio_stream_type_t stream, bool mute);
- static status_t getStreamMute(audio_stream_type_t stream, bool* mute);
-
- // set audio mode in audio hardware
- static status_t setMode(audio_mode_t mode);
-
- // returns true in *state if tracks are active on the specified stream or have been active
- // in the past inPastMs milliseconds
- static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs);
- // returns true in *state if tracks are active for what qualifies as remote playback
- // on the specified stream or have been active in the past inPastMs milliseconds. Remote
- // playback isn't mutually exclusive with local playback.
- static status_t isStreamActiveRemotely(audio_stream_type_t stream, bool *state,
- uint32_t inPastMs);
- // returns true in *state if a recorder is currently recording with the specified source
- static status_t isSourceActive(audio_source_t source, bool *state);
-
- // set/get audio hardware parameters. The function accepts a list of parameters
- // key value pairs in the form: key1=value1;key2=value2;...
- // Some keys are reserved for standard parameters (See AudioParameter class).
- // The versions with audio_io_handle_t are intended for internal media framework use only.
- static status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
- static String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
- // The versions without audio_io_handle_t are intended for JNI.
- static status_t setParameters(const String8& keyValuePairs);
- static String8 getParameters(const String8& keys);
-
- static void setErrorCallback(audio_error_callback cb);
- static void setDynPolicyCallback(dynamic_policy_callback cb);
- static void setRecordConfigCallback(record_config_callback);
-
- // helper function to obtain AudioFlinger service handle
- static const sp<IAudioFlinger> get_audio_flinger();
-
- static float linearToLog(int volume);
- static int logToLinear(float volume);
-
- // Returned samplingRate and frameCount output values are guaranteed
- // to be non-zero if status == NO_ERROR
- // FIXME This API assumes a route, and so should be deprecated.
- static status_t getOutputSamplingRate(uint32_t* samplingRate,
- audio_stream_type_t stream);
- // FIXME This API assumes a route, and so should be deprecated.
- static status_t getOutputFrameCount(size_t* frameCount,
- audio_stream_type_t stream);
- // FIXME This API assumes a route, and so should be deprecated.
- static status_t getOutputLatency(uint32_t* latency,
- audio_stream_type_t stream);
- // returns the audio HAL sample rate
- static status_t getSamplingRate(audio_io_handle_t ioHandle,
- uint32_t* samplingRate);
- // For output threads with a fast mixer, returns the number of frames per normal mixer buffer.
- // For output threads without a fast mixer, or for input, this is same as getFrameCountHAL().
- static status_t getFrameCount(audio_io_handle_t ioHandle,
- size_t* frameCount);
- // returns the audio output latency in ms. Corresponds to
- // audio_stream_out->get_latency()
- static status_t getLatency(audio_io_handle_t output,
- uint32_t* latency);
-
- // return status NO_ERROR implies *buffSize > 0
- // FIXME This API assumes a route, and so should deprecated.
- static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
- audio_channel_mask_t channelMask, size_t* buffSize);
-
- static status_t setVoiceVolume(float volume);
-
- // return the number of audio frames written by AudioFlinger to audio HAL and
- // audio dsp to DAC since the specified output has exited standby.
- // returned status (from utils/Errors.h) can be:
- // - NO_ERROR: successful operation, halFrames and dspFrames point to valid data
- // - INVALID_OPERATION: Not supported on current hardware platform
- // - BAD_VALUE: invalid parameter
- // NOTE: this feature is not supported on all hardware platforms and it is
- // necessary to check returned status before using the returned values.
- static status_t getRenderPosition(audio_io_handle_t output,
- uint32_t *halFrames,
- uint32_t *dspFrames);
-
- // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
- static uint32_t getInputFramesLost(audio_io_handle_t ioHandle);
-
- // Allocate a new unique ID for use as an audio session ID or I/O handle.
- // If unable to contact AudioFlinger, returns AUDIO_UNIQUE_ID_ALLOCATE instead.
- // FIXME If AudioFlinger were to ever exhaust the unique ID namespace,
- // this method could fail by returning either a reserved ID like AUDIO_UNIQUE_ID_ALLOCATE
- // or an unspecified existing unique ID.
- static audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
-
- static void acquireAudioSessionId(audio_session_t audioSession, pid_t pid);
- static void releaseAudioSessionId(audio_session_t audioSession, pid_t pid);
-
- // Get the HW synchronization source used for an audio session.
- // Return a valid source or AUDIO_HW_SYNC_INVALID if an error occurs
- // or no HW sync source is used.
- static audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
-
- // Indicate JAVA services are ready (scheduling, power management ...)
- static status_t systemReady();
-
- // Returns the number of frames per audio HAL buffer.
- // Corresponds to audio_stream->get_buffer_size()/audio_stream_in_frame_size() for input.
- // See also getFrameCount().
- static status_t getFrameCountHAL(audio_io_handle_t ioHandle,
- size_t* frameCount);
-
- // Events used to synchronize actions between audio sessions.
- // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until
- // playback is complete on another audio session.
- // See definitions in MediaSyncEvent.java
- enum sync_event_t {
- SYNC_EVENT_SAME = -1, // used internally to indicate restart with same event
- SYNC_EVENT_NONE = 0,
- SYNC_EVENT_PRESENTATION_COMPLETE,
-
- //
- // Define new events here: SYNC_EVENT_START, SYNC_EVENT_STOP, SYNC_EVENT_TIME ...
- //
- SYNC_EVENT_CNT,
- };
-
- // Timeout for synchronous record start. Prevents from blocking the record thread forever
- // if the trigger event is not fired.
- static const uint32_t kSyncRecordStartTimeOutMs = 30000;
-
- //
- // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
- //
- static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state,
- const char *device_address, const char *device_name);
- static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
- const char *device_address);
- static status_t handleDeviceConfigChange(audio_devices_t device,
- const char *device_address,
- const char *device_name);
- static status_t setPhoneState(audio_mode_t state);
- static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
- static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
-
- // Client must successfully hand off the handle reference to AudioFlinger via createTrack(),
- // or release it with releaseOutput().
- static audio_io_handle_t getOutput(audio_stream_type_t stream,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL);
- static status_t getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session,
- audio_stream_type_t *stream,
- uid_t uid,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
- const audio_offload_info_t *offloadInfo = NULL);
- static status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- static status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
- static void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session);
-
- // Client must successfully hand off the handle reference to AudioFlinger via openRecord(),
- // or release it with releaseInput().
- static status_t getInputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *input,
- audio_session_t session,
- pid_t pid,
- uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_input_flags_t flags,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
-
- static status_t startInput(audio_io_handle_t input,
- audio_session_t session);
- static status_t stopInput(audio_io_handle_t input,
- audio_session_t session);
- static void releaseInput(audio_io_handle_t input,
- audio_session_t session);
- static status_t initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax);
- static status_t setStreamVolumeIndex(audio_stream_type_t stream,
- int index,
- audio_devices_t device);
- static status_t getStreamVolumeIndex(audio_stream_type_t stream,
- int *index,
- audio_devices_t device);
-
- static uint32_t getStrategyForStream(audio_stream_type_t stream);
- static audio_devices_t getDevicesForStream(audio_stream_type_t stream);
-
- static audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc);
- static status_t registerEffect(const effect_descriptor_t *desc,
- audio_io_handle_t io,
- uint32_t strategy,
- audio_session_t session,
- int id);
- static status_t unregisterEffect(int id);
- static status_t setEffectEnabled(int id, bool enabled);
-
- // clear stream to output mapping cache (gStreamOutputMap)
- // and output configuration cache (gOutputs)
- static void clearAudioConfigCache();
-
- static const sp<IAudioPolicyService> get_audio_policy_service();
-
- // helpers for android.media.AudioManager.getProperty(), see description there for meaning
- static uint32_t getPrimaryOutputSamplingRate();
- static size_t getPrimaryOutputFrameCount();
-
- static status_t setLowRamDevice(bool isLowRamDevice);
-
- // Check if hw offload is possible for given format, stream type, sample rate,
- // bit rate, duration, video and streaming or offload property is enabled
- static bool isOffloadSupported(const audio_offload_info_t& info);
-
- // check presence of audio flinger service.
- // returns NO_ERROR if binding to service succeeds, DEAD_OBJECT otherwise
- static status_t checkAudioFlinger();
-
- /* List available audio ports and their attributes */
- static status_t listAudioPorts(audio_port_role_t role,
- audio_port_type_t type,
- unsigned int *num_ports,
- struct audio_port *ports,
- unsigned int *generation);
-
- /* Get attributes for a given audio port */
- static status_t getAudioPort(struct audio_port *port);
-
- /* Create an audio patch between several source and sink ports */
- static status_t createAudioPatch(const struct audio_patch *patch,
- audio_patch_handle_t *handle);
-
- /* Release an audio patch */
- static status_t releaseAudioPatch(audio_patch_handle_t handle);
-
- /* List existing audio patches */
- static status_t listAudioPatches(unsigned int *num_patches,
- struct audio_patch *patches,
- unsigned int *generation);
- /* Set audio port configuration */
- static status_t setAudioPortConfig(const struct audio_port_config *config);
-
-
- static status_t acquireSoundTriggerSession(audio_session_t *session,
- audio_io_handle_t *ioHandle,
- audio_devices_t *device);
- static status_t releaseSoundTriggerSession(audio_session_t session);
-
- static audio_mode_t getPhoneState();
-
- static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
-
- static status_t startAudioSource(const struct audio_port_config *source,
- const audio_attributes_t *attributes,
- audio_io_handle_t *handle);
- static status_t stopAudioSource(audio_io_handle_t handle);
-
- static status_t setMasterMono(bool mono);
- static status_t getMasterMono(bool *mono);
-
- // ----------------------------------------------------------------------------
-
- class AudioPortCallback : public RefBase
- {
- public:
-
- AudioPortCallback() {}
- virtual ~AudioPortCallback() {}
-
- virtual void onAudioPortListUpdate() = 0;
- virtual void onAudioPatchListUpdate() = 0;
- virtual void onServiceDied() = 0;
-
- };
-
- static status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
- static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
-
- class AudioDeviceCallback : public RefBase
- {
- public:
-
- AudioDeviceCallback() {}
- virtual ~AudioDeviceCallback() {}
-
- virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
- audio_port_handle_t deviceId) = 0;
- };
-
- static status_t addAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
- audio_io_handle_t audioIo);
- static status_t removeAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
- audio_io_handle_t audioIo);
-
- static audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
-
-private:
-
- class AudioFlingerClient: public IBinder::DeathRecipient, public BnAudioFlingerClient
- {
- public:
- AudioFlingerClient() :
- mInBuffSize(0), mInSamplingRate(0),
- mInFormat(AUDIO_FORMAT_DEFAULT), mInChannelMask(AUDIO_CHANNEL_NONE) {
- }
-
- void clearIoCache();
- status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
- audio_channel_mask_t channelMask, size_t* buffSize);
- sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
-
- // DeathRecipient
- virtual void binderDied(const wp<IBinder>& who);
-
- // IAudioFlingerClient
-
- // indicate a change in the configuration of an output or input: keeps the cached
- // values for output/input parameters up-to-date in client process
- virtual void ioConfigChanged(audio_io_config_event event,
- const sp<AudioIoDescriptor>& ioDesc);
-
-
- status_t addAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
- audio_io_handle_t audioIo);
- status_t removeAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
- audio_io_handle_t audioIo);
-
- audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
-
- private:
- Mutex mLock;
- DefaultKeyedVector<audio_io_handle_t, sp<AudioIoDescriptor> > mIoDescriptors;
- DefaultKeyedVector<audio_io_handle_t, Vector < sp<AudioDeviceCallback> > >
- mAudioDeviceCallbacks;
- // cached values for recording getInputBufferSize() queries
- size_t mInBuffSize; // zero indicates cache is invalid
- uint32_t mInSamplingRate;
- audio_format_t mInFormat;
- audio_channel_mask_t mInChannelMask;
- sp<AudioIoDescriptor> getIoDescriptor_l(audio_io_handle_t ioHandle);
- };
-
- class AudioPolicyServiceClient: public IBinder::DeathRecipient,
- public BnAudioPolicyServiceClient
- {
- public:
- AudioPolicyServiceClient() {
- }
-
- int addAudioPortCallback(const sp<AudioPortCallback>& callback);
- int removeAudioPortCallback(const sp<AudioPortCallback>& callback);
-
- // DeathRecipient
- virtual void binderDied(const wp<IBinder>& who);
-
- // IAudioPolicyServiceClient
- virtual void onAudioPortListUpdate();
- virtual void onAudioPatchListUpdate();
- virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
- virtual void onRecordingConfigurationUpdate(int event, audio_session_t session,
- audio_source_t source, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
-
- private:
- Mutex mLock;
- Vector <sp <AudioPortCallback> > mAudioPortCallbacks;
- };
-
- static const sp<AudioFlingerClient> getAudioFlingerClient();
- static sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
-
- static sp<AudioFlingerClient> gAudioFlingerClient;
- static sp<AudioPolicyServiceClient> gAudioPolicyServiceClient;
- friend class AudioFlingerClient;
- friend class AudioPolicyServiceClient;
-
- static Mutex gLock; // protects gAudioFlinger and gAudioErrorCallback,
- static Mutex gLockAPS; // protects gAudioPolicyService and gAudioPolicyServiceClient
- static sp<IAudioFlinger> gAudioFlinger;
- static audio_error_callback gAudioErrorCallback;
- static dynamic_policy_callback gDynPolicyCallback;
- static record_config_callback gRecordConfigCallback;
-
- static size_t gInBuffSize;
- // previous parameters for recording buffer size queries
- static uint32_t gPrevInSamplingRate;
- static audio_format_t gPrevInFormat;
- static audio_channel_mask_t gPrevInChannelMask;
-
- static sp<IAudioPolicyService> gAudioPolicyService;
-};
-
-}; // namespace android
-
-#endif /*ANDROID_AUDIOSYSTEM_H_*/
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
new file mode 120000
index 0000000..9fad2b7
--- /dev/null
+++ b/include/media/AudioSystem.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioSystem.h
\ No newline at end of file
diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h
new file mode 120000
index 0000000..b6b9278
--- /dev/null
+++ b/include/media/AudioTimestamp.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioTimestamp.h
\ No newline at end of file
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
deleted file mode 100644
index 88c4e61..0000000
--- a/include/media/AudioTrack.h
+++ /dev/null
@@ -1,1112 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIOTRACK_H
-#define ANDROID_AUDIOTRACK_H
-
-#include <cutils/sched_policy.h>
-#include <media/AudioSystem.h>
-#include <media/AudioTimestamp.h>
-#include <media/IAudioTrack.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/Modulo.h>
-#include <utils/threads.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-struct audio_track_cblk_t;
-class AudioTrackClientProxy;
-class StaticAudioTrackClientProxy;
-
-// ----------------------------------------------------------------------------
-
-class AudioTrack : public RefBase
-{
-public:
-
- /* Events used by AudioTrack callback function (callback_t).
- * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
- */
- enum event_type {
- EVENT_MORE_DATA = 0, // Request to write more data to buffer.
- // This event only occurs for TRANSFER_CALLBACK.
- // If this event is delivered but the callback handler
- // does not want to write more data, the handler must
- // ignore the event by setting frameCount to zero.
- // This might occur, for example, if the application is
- // waiting for source data or is at the end of stream.
- //
- // For data filling, it is preferred that the callback
- // does not block and instead returns a short count on
- // the amount of data actually delivered
- // (or 0, if no data is currently available).
- EVENT_UNDERRUN = 1, // Buffer underrun occurred. This will not occur for
- // static tracks.
- EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from
- // loop start if loop count was not 0 for a static track.
- EVENT_MARKER = 3, // Playback head is at the specified marker position
- // (See setMarkerPosition()).
- EVENT_NEW_POS = 4, // Playback head is at a new position
- // (See setPositionUpdatePeriod()).
- EVENT_BUFFER_END = 5, // Playback has completed for a static track.
- EVENT_NEW_IAUDIOTRACK = 6, // IAudioTrack was re-created, either due to re-routing and
- // voluntary invalidation by mediaserver, or mediaserver crash.
- EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played
- // back (after stop is called) for an offloaded track.
-#if 0 // FIXME not yet implemented
- EVENT_NEW_TIMESTAMP = 8, // Delivered periodically and when there's a significant change
- // in the mapping from frame position to presentation time.
- // See AudioTimestamp for the information included with event.
-#endif
- };
-
- /* Client should declare a Buffer and pass the address to obtainBuffer()
- * and releaseBuffer(). See also callback_t for EVENT_MORE_DATA.
- */
-
- class Buffer
- {
- public:
- // FIXME use m prefix
- size_t frameCount; // number of sample frames corresponding to size;
- // on input to obtainBuffer() it is the number of frames desired,
- // on output from obtainBuffer() it is the number of available
- // [empty slots for] frames to be filled
- // on input to releaseBuffer() it is currently ignored
-
- size_t size; // input/output in bytes == frameCount * frameSize
- // on input to obtainBuffer() it is ignored
- // on output from obtainBuffer() it is the number of available
- // [empty slots for] bytes to be filled,
- // which is frameCount * frameSize
- // on input to releaseBuffer() it is the number of bytes to
- // release
- // FIXME This is redundant with respect to frameCount. Consider
- // removing size and making frameCount the primary field.
-
- union {
- void* raw;
- short* i16; // signed 16-bit
- int8_t* i8; // unsigned 8-bit, offset by 0x80
- }; // input to obtainBuffer(): unused, output: pointer to buffer
- };
-
- /* As a convenience, if a callback is supplied, a handler thread
- * is automatically created with the appropriate priority. This thread
- * invokes the callback when a new buffer becomes available or various conditions occur.
- * Parameters:
- *
- * event: type of event notified (see enum AudioTrack::event_type).
- * user: Pointer to context for use by the callback receiver.
- * info: Pointer to optional parameter according to event type:
- * - EVENT_MORE_DATA: pointer to AudioTrack::Buffer struct. The callback must not write
- * more bytes than indicated by 'size' field and update 'size' if fewer bytes are
- * written.
- * - EVENT_UNDERRUN: unused.
- * - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining.
- * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
- * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
- * - EVENT_BUFFER_END: unused.
- * - EVENT_NEW_IAUDIOTRACK: unused.
- * - EVENT_STREAM_END: unused.
- * - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
- */
-
- typedef void (*callback_t)(int event, void* user, void *info);
-
- /* Returns the minimum frame count required for the successful creation of
- * an AudioTrack object.
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - NO_INIT: audio server or audio hardware not initialized
- * - BAD_VALUE: unsupported configuration
- * frameCount is guaranteed to be non-zero if status is NO_ERROR,
- * and is undefined otherwise.
- * FIXME This API assumes a route, and so should be deprecated.
- */
-
- static status_t getMinFrameCount(size_t* frameCount,
- audio_stream_type_t streamType,
- uint32_t sampleRate);
-
- /* How data is transferred to AudioTrack
- */
- enum transfer_type {
- TRANSFER_DEFAULT, // not specified explicitly; determine from the other parameters
- TRANSFER_CALLBACK, // callback EVENT_MORE_DATA
- TRANSFER_OBTAIN, // call obtainBuffer() and releaseBuffer()
- TRANSFER_SYNC, // synchronous write()
- TRANSFER_SHARED, // shared memory
- };
-
- /* Constructs an uninitialized AudioTrack. No connection with
- * AudioFlinger takes place. Use set() after this.
- */
- AudioTrack();
-
- /* Creates an AudioTrack object and registers it with AudioFlinger.
- * Once created, the track needs to be started before it can be used.
- * Unspecified values are set to appropriate default values.
- *
- * Parameters:
- *
- * streamType: Select the type of audio stream this track is attached to
- * (e.g. AUDIO_STREAM_MUSIC).
- * sampleRate: Data source sampling rate in Hz. Zero means to use the sink sample rate.
- * A non-zero value must be specified if AUDIO_OUTPUT_FLAG_DIRECT is set.
- * 0 will not work with current policy implementation for direct output
- * selection where an exact match is needed for sampling rate.
- * format: Audio format. For mixed tracks, any PCM format supported by server is OK.
- * For direct and offloaded tracks, the possible format(s) depends on the
- * output sink.
- * channelMask: Channel mask, such that audio_is_output_channel(channelMask) is true.
- * frameCount: Minimum size of track PCM buffer in frames. This defines the
- * application's contribution to the
- * latency of the track. The actual size selected by the AudioTrack could be
- * larger if the requested size is not compatible with current audio HAL
- * configuration. Zero means to use a default value.
- * flags: See comments on audio_output_flags_t in <system/audio.h>.
- * cbf: Callback function. If not null, this function is called periodically
- * to provide new data in TRANSFER_CALLBACK mode
- * and inform of marker, position updates, etc.
- * user: Context for use by the callback receiver.
- * notificationFrames: The callback function is called each time notificationFrames PCM
- * frames have been consumed from track input buffer by server.
- * Zero means to use a default value, which is typically:
- * - fast tracks: HAL buffer size, even if track frameCount is larger
- * - normal tracks: 1/2 of track frameCount
- * A positive value means that many frames at initial source sample rate.
- * A negative value for this parameter specifies the negative of the
- * requested number of notifications (sub-buffers) in the entire buffer.
- * For fast tracks, the FastMixer will process one sub-buffer at a time.
- * The size of each sub-buffer is determined by the HAL.
- * To get "double buffering", for example, one should pass -2.
- * The minimum number of sub-buffers is 1 (expressed as -1),
- * and the maximum number of sub-buffers is 8 (expressed as -8).
- * Negative is only permitted for fast tracks, and if frameCount is zero.
- * TODO It is ugly to overload a parameter in this way depending on
- * whether it is positive, negative, or zero. Consider splitting apart.
- * sessionId: Specific session ID, or zero to use default.
- * transferType: How data is transferred to AudioTrack.
- * offloadInfo: If not NULL, provides offload parameters for
- * AudioSystem::getOutputForAttr().
- * uid: User ID of the app which initially requested this AudioTrack
- * for power management tracking, or -1 for current user ID.
- * pid: Process ID of the app which initially requested this AudioTrack
- * for power management tracking, or -1 for current process ID.
- * pAttributes: If not NULL, supersedes streamType for use case selection.
- * doNotReconnect: If set to true, AudioTrack won't automatically recreate the IAudioTrack
- binder to AudioFlinger.
- It will return an error instead. The application will recreate
- the track based on offloading or different channel configuration, etc.
- * maxRequiredSpeed: For PCM tracks, this creates an appropriate buffer size that will allow
- * maxRequiredSpeed playback. Values less than 1.0f and greater than
- * AUDIO_TIMESTRETCH_SPEED_MAX will be clamped. For non-PCM tracks
- * and direct or offloaded tracks, this parameter is ignored.
- * threadCanCallJava: Not present in parameter list, and so is fixed at false.
- */
-
- AudioTrack( audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount = 0,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- callback_t cbf = NULL,
- void* user = NULL,
- int32_t notificationFrames = 0,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL,
- int uid = -1,
- pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL,
- bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f);
-
- /* Creates an audio track and registers it with AudioFlinger.
- * With this constructor, the track is configured for static buffer mode.
- * Data to be rendered is passed in a shared memory buffer
- * identified by the argument sharedBuffer, which should be non-0.
- * If sharedBuffer is zero, this constructor is equivalent to the previous constructor
- * but without the ability to specify a non-zero value for the frameCount parameter.
- * The memory should be initialized to the desired data before calling start().
- * The write() method is not supported in this case.
- * It is recommended to pass a callback function to be notified of playback end by an
- * EVENT_UNDERRUN event.
- */
-
- AudioTrack( audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const sp<IMemory>& sharedBuffer,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- callback_t cbf = NULL,
- void* user = NULL,
- int32_t notificationFrames = 0,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL,
- int uid = -1,
- pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL,
- bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f);
-
- /* Terminates the AudioTrack and unregisters it from AudioFlinger.
- * Also destroys all resources associated with the AudioTrack.
- */
-protected:
- virtual ~AudioTrack();
-public:
-
- /* Initialize an AudioTrack that was created using the AudioTrack() constructor.
- * Don't call set() more than once, or after the AudioTrack() constructors that take parameters.
- * set() is not multi-thread safe.
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful initialization
- * - INVALID_OPERATION: AudioTrack is already initialized
- * - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
- * - NO_INIT: audio server or audio hardware not initialized
- * If status is not equal to NO_ERROR, don't call any other APIs on this AudioTrack.
- * If sharedBuffer is non-0, the frameCount parameter is ignored and
- * replaced by the shared buffer's total allocated size in frame units.
- *
- * Parameters not listed in the AudioTrack constructors above:
- *
- * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
- *
- * Internal state post condition:
- * (mStreamType == AUDIO_STREAM_DEFAULT) implies this AudioTrack has valid attributes
- */
- status_t set(audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount = 0,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- callback_t cbf = NULL,
- void* user = NULL,
- int32_t notificationFrames = 0,
- const sp<IMemory>& sharedBuffer = 0,
- bool threadCanCallJava = false,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = NULL,
- int uid = -1,
- pid_t pid = -1,
- const audio_attributes_t* pAttributes = NULL,
- bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f);
-
- /* Result of constructing the AudioTrack. This must be checked for successful initialization
- * before using any AudioTrack API (except for set()), because using
- * an uninitialized AudioTrack produces undefined results.
- * See set() method above for possible return codes.
- */
- status_t initCheck() const { return mStatus; }
-
- /* Returns this track's estimated latency in milliseconds.
- * This includes the latency due to AudioTrack buffer size, AudioMixer (if any)
- * and audio hardware driver.
- */
- uint32_t latency() const { return mLatency; }
-
- /* Returns the number of application-level buffer underruns
- * since the AudioTrack was created.
- */
- uint32_t getUnderrunCount() const;
-
- /* getters, see constructors and set() */
-
- audio_stream_type_t streamType() const;
- audio_format_t format() const { return mFormat; }
-
- /* Return frame size in bytes, which for linear PCM is
- * channelCount * (bit depth per channel / 8).
- * channelCount is determined from channelMask, and bit depth comes from format.
- * For non-linear formats, the frame size is typically 1 byte.
- */
- size_t frameSize() const { return mFrameSize; }
-
- uint32_t channelCount() const { return mChannelCount; }
- size_t frameCount() const { return mFrameCount; }
-
- // TODO consider notificationFrames() if needed
-
- /* Return effective size of audio buffer that an application writes to
- * or a negative error if the track is uninitialized.
- */
- ssize_t getBufferSizeInFrames();
-
- /* Returns the buffer duration in microseconds at current playback rate.
- */
- status_t getBufferDurationInUs(int64_t *duration);
-
- /* Set the effective size of audio buffer that an application writes to.
- * This is used to determine the amount of available room in the buffer,
- * which determines when a write will block.
- * This allows an application to raise and lower the audio latency.
- * The requested size may be adjusted so that it is
- * greater or equal to the absolute minimum and
- * less than or equal to the getBufferCapacityInFrames().
- * It may also be adjusted slightly for internal reasons.
- *
- * Return the final size or a negative error if the track is unitialized
- * or does not support variable sizes.
- */
- ssize_t setBufferSizeInFrames(size_t size);
-
- /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
- sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
-
- /* After it's created the track is not active. Call start() to
- * make it active. If set, the callback will start being called.
- * If the track was previously paused, volume is ramped up over the first mix buffer.
- */
- status_t start();
-
- /* Stop a track.
- * In static buffer mode, the track is stopped immediately.
- * In streaming mode, the callback will cease being called. Note that obtainBuffer() still
- * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
- * In streaming mode the stop does not occur immediately: any data remaining in the buffer
- * is first drained, mixed, and output, and only then is the track marked as stopped.
- */
- void stop();
- bool stopped() const;
-
- /* Flush a stopped or paused track. All previously buffered data is discarded immediately.
- * This has the effect of draining the buffers without mixing or output.
- * Flush is intended for streaming mode, for example before switching to non-contiguous content.
- * This function is a no-op if the track is not stopped or paused, or uses a static buffer.
- */
- void flush();
-
- /* Pause a track. After pause, the callback will cease being called and
- * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
- * and will fill up buffers until the pool is exhausted.
- * Volume is ramped down over the next mix buffer following the pause request,
- * and then the track is marked as paused. It can be resumed with ramp up by start().
- */
- void pause();
-
- /* Set volume for this track, mostly used for games' sound effects
- * left and right volumes. Levels must be >= 0.0 and <= 1.0.
- * This is the older API. New applications should use setVolume(float) when possible.
- */
- status_t setVolume(float left, float right);
-
- /* Set volume for all channels. This is the preferred API for new applications,
- * especially for multi-channel content.
- */
- status_t setVolume(float volume);
-
- /* Set the send level for this track. An auxiliary effect should be attached
- * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0.
- */
- status_t setAuxEffectSendLevel(float level);
- void getAuxEffectSendLevel(float* level) const;
-
- /* Set source sample rate for this track in Hz, mostly used for games' sound effects.
- * Zero is not permitted.
- */
- status_t setSampleRate(uint32_t sampleRate);
-
- /* Return current source sample rate in Hz.
- * If specified as zero in constructor or set(), this will be the sink sample rate.
- */
- uint32_t getSampleRate() const;
-
- /* Return the original source sample rate in Hz. This corresponds to the sample rate
- * if playback rate had normal speed and pitch.
- */
- uint32_t getOriginalSampleRate() const;
-
- /* Set source playback rate for timestretch
- * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
- * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
- *
- * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
- * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
- *
- * Speed increases the playback rate of media, but does not alter pitch.
- * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
- */
- status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
-
- /* Return current playback rate */
- const AudioPlaybackRate& getPlaybackRate() const;
-
- /* Enables looping and sets the start and end points of looping.
- * Only supported for static buffer mode.
- *
- * Parameters:
- *
- * loopStart: loop start in frames relative to start of buffer.
- * loopEnd: loop end in frames relative to start of buffer.
- * loopCount: number of loops to execute. Calling setLoop() with loopCount == 0 cancels any
- * pending or active loop. loopCount == -1 means infinite looping.
- *
- * For proper operation the following condition must be respected:
- * loopCount != 0 implies 0 <= loopStart < loopEnd <= frameCount().
- *
- * If the loop period (loopEnd - loopStart) is too small for the implementation to support,
- * setLoop() will return BAD_VALUE. loopCount must be >= -1.
- *
- */
- status_t setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount);
-
- /* Sets marker position. When playback reaches the number of frames specified, a callback with
- * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
- * notification callback. To set a marker at a position which would compute as 0,
- * a workaround is to set the marker at a nearby position such as ~0 or 1.
- * If the AudioTrack has been opened with no callback function associated, the operation will
- * fail.
- *
- * Parameters:
- *
- * marker: marker position expressed in wrapping (overflow) frame units,
- * like the return value of getPosition().
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: the AudioTrack has no callback installed.
- */
- status_t setMarkerPosition(uint32_t marker);
- status_t getMarkerPosition(uint32_t *marker) const;
-
- /* Sets position update period. Every time the number of frames specified has been played,
- * a callback with event type EVENT_NEW_POS is called.
- * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
- * callback.
- * If the AudioTrack has been opened with no callback function associated, the operation will
- * fail.
- * Extremely small values may be rounded up to a value the implementation can support.
- *
- * Parameters:
- *
- * updatePeriod: position update notification period expressed in frames.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: the AudioTrack has no callback installed.
- */
- status_t setPositionUpdatePeriod(uint32_t updatePeriod);
- status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const;
-
- /* Sets playback head position.
- * Only supported for static buffer mode.
- *
- * Parameters:
- *
- * position: New playback head position in frames relative to start of buffer.
- * 0 <= position <= frameCount(). Note that end of buffer is permitted,
- * but will result in an immediate underrun if started.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode.
- * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack
- * buffer
- */
- status_t setPosition(uint32_t position);
-
- /* Return the total number of frames played since playback start.
- * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
- * It is reset to zero by flush(), reload(), and stop().
- *
- * Parameters:
- *
- * position: Address where to return play head position.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - BAD_VALUE: position is NULL
- */
- status_t getPosition(uint32_t *position);
-
- /* For static buffer mode only, this returns the current playback position in frames
- * relative to start of buffer. It is analogous to the position units used by
- * setLoop() and setPosition(). After underrun, the position will be at end of buffer.
- */
- status_t getBufferPosition(uint32_t *position);
-
- /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids
- * rewriting the buffer before restarting playback after a stop.
- * This method must be called with the AudioTrack in paused or stopped state.
- * Not allowed in streaming mode.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode.
- */
- status_t reload();
-
- /* Returns a handle on the audio output used by this AudioTrack.
- *
- * Parameters:
- * none.
- *
- * Returned value:
- * handle on audio hardware output, or AUDIO_IO_HANDLE_NONE if the
- * track needed to be re-created but that failed
- */
-private:
- audio_io_handle_t getOutput() const;
-public:
-
- /* Selects the audio device to use for output of this AudioTrack. A value of
- * AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
- *
- * Parameters:
- * The device ID of the selected device (as returned by the AudioDevicesManager API).
- *
- * Returned value:
- * - NO_ERROR: successful operation
- * TODO: what else can happen here?
- */
- status_t setOutputDevice(audio_port_handle_t deviceId);
-
- /* Returns the ID of the audio device selected for this AudioTrack.
- * A value of AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
- *
- * Parameters:
- * none.
- */
- audio_port_handle_t getOutputDevice();
-
- /* Returns the ID of the audio device actually used by the output to which this AudioTrack is
- * attached.
- * A value of AUDIO_PORT_HANDLE_NONE indicates the audio track is not attached to any output.
- *
- * Parameters:
- * none.
- */
- audio_port_handle_t getRoutedDeviceId();
-
- /* Returns the unique session ID associated with this track.
- *
- * Parameters:
- * none.
- *
- * Returned value:
- * AudioTrack session ID.
- */
- audio_session_t getSessionId() const { return mSessionId; }
-
- /* Attach track auxiliary output to specified effect. Use effectId = 0
- * to detach track from effect.
- *
- * Parameters:
- *
- * effectId: effectId obtained from AudioEffect::id().
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: the effect is not an auxiliary effect.
- * - BAD_VALUE: The specified effect ID is invalid
- */
- status_t attachAuxEffect(int effectId);
-
- /* Public API for TRANSFER_OBTAIN mode.
- * Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames.
- * After filling these slots with data, the caller should release them with releaseBuffer().
- * If the track buffer is not full, obtainBuffer() returns as many contiguous
- * [empty slots for] frames as are available immediately.
- *
- * If nonContig is non-NULL, it is an output parameter that will be set to the number of
- * additional non-contiguous frames that are predicted to be available immediately,
- * if the client were to release the first frames and then call obtainBuffer() again.
- * This value is only a prediction, and needs to be confirmed.
- * It will be set to zero for an error return.
- *
- * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK
- * regardless of the value of waitCount.
- * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a
- * maximum timeout based on waitCount; see chart below.
- * Buffers will be returned until the pool
- * is exhausted, at which point obtainBuffer() will either block
- * or return WOULD_BLOCK depending on the value of the "waitCount"
- * parameter.
- *
- * Interpretation of waitCount:
- * +n limits wait time to n * WAIT_PERIOD_MS,
- * -1 causes an (almost) infinite wait time,
- * 0 non-blocking.
- *
- * Buffer fields
- * On entry:
- * frameCount number of [empty slots for] frames requested
- * size ignored
- * raw ignored
- * After error return:
- * frameCount 0
- * size 0
- * raw undefined
- * After successful return:
- * frameCount actual number of [empty slots for] frames available, <= number requested
- * size actual number of bytes available
- * raw pointer to the buffer
- */
- status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
- size_t *nonContig = NULL);
-
-private:
- /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
- * additional non-contiguous frames that are predicted to be available immediately,
- * if the client were to release the first frames and then call obtainBuffer() again.
- * This value is only a prediction, and needs to be confirmed.
- * It will be set to zero for an error return.
- * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
- * in case the requested amount of frames is in two or more non-contiguous regions.
- * FIXME requested and elapsed are both relative times. Consider changing to absolute time.
- */
- status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
- struct timespec *elapsed = NULL, size_t *nonContig = NULL);
-public:
-
- /* Public API for TRANSFER_OBTAIN mode.
- * Release a filled buffer of frames for AudioFlinger to process.
- *
- * Buffer fields:
- * frameCount currently ignored but recommend to set to actual number of frames filled
- * size actual number of bytes filled, must be multiple of frameSize
- * raw ignored
- */
- void releaseBuffer(const Buffer* audioBuffer);
-
- /* As a convenience we provide a write() interface to the audio buffer.
- * Input parameter 'size' is in byte units.
- * This is implemented on top of obtainBuffer/releaseBuffer. For best
- * performance use callbacks. Returns actual number of bytes written >= 0,
- * or one of the following negative status codes:
- * INVALID_OPERATION AudioTrack is configured for static buffer or streaming mode
- * BAD_VALUE size is invalid
- * WOULD_BLOCK when obtainBuffer() returns same, or
- * AudioTrack was stopped during the write
- * DEAD_OBJECT when AudioFlinger dies or the output device changes and
- * the track cannot be automatically restored.
- * The application needs to recreate the AudioTrack
- * because the audio device changed or AudioFlinger died.
- * This typically occurs for direct or offload tracks
- * or if mDoNotReconnect is true.
- * or any other error code returned by IAudioTrack::start() or restoreTrack_l().
- * Default behavior is to only return when all data has been transferred. Set 'blocking' to
- * false for the method to return immediately without waiting to try multiple times to write
- * the full content of the buffer.
- */
- ssize_t write(const void* buffer, size_t size, bool blocking = true);
-
- /*
- * Dumps the state of an audio track.
- * Not a general-purpose API; intended only for use by media player service to dump its tracks.
- */
- status_t dump(int fd, const Vector<String16>& args) const;
-
- /*
- * Return the total number of frames which AudioFlinger desired but were unavailable,
- * and thus which resulted in an underrun. Reset to zero by stop().
- */
- uint32_t getUnderrunFrames() const;
-
- /* Get the flags */
- audio_output_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; }
-
- /* Set parameters - only possible when using direct output */
- status_t setParameters(const String8& keyValuePairs);
-
- /* Get parameters */
- String8 getParameters(const String8& keys);
-
- /* Poll for a timestamp on demand.
- * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs,
- * or if you need to get the most recent timestamp outside of the event callback handler.
- * Caution: calling this method too often may be inefficient;
- * if you need a high resolution mapping between frame position and presentation time,
- * consider implementing that at application level, based on the low resolution timestamps.
- * Returns NO_ERROR if timestamp is valid.
- * WOULD_BLOCK if called in STOPPED or FLUSHED state, or if called immediately after
- * start/ACTIVE, when the number of frames consumed is less than the
- * overall hardware latency to physical output. In WOULD_BLOCK cases,
- * one might poll again, or use getPosition(), or use 0 position and
- * current time for the timestamp.
- * DEAD_OBJECT if AudioFlinger dies or the output device changes and
- * the track cannot be automatically restored.
- * The application needs to recreate the AudioTrack
- * because the audio device changed or AudioFlinger died.
- * This typically occurs for direct or offload tracks
- * or if mDoNotReconnect is true.
- * INVALID_OPERATION wrong state, or some other error.
- *
- * The timestamp parameter is undefined on return, if status is not NO_ERROR.
- */
- status_t getTimestamp(AudioTimestamp& timestamp);
-
- /* Return the extended timestamp, with additional timebase info and improved drain behavior.
- *
- * This is similar to the AudioTrack.java API:
- * getTimestamp(@NonNull AudioTimestamp timestamp, @AudioTimestamp.Timebase int timebase)
- *
- * Some differences between this method and the getTimestamp(AudioTimestamp& timestamp) method
- *
- * 1. stop() by itself does not reset the frame position.
- * A following start() resets the frame position to 0.
- * 2. flush() by itself does not reset the frame position.
- * The frame position advances by the number of frames flushed,
- * when the first frame after flush reaches the audio sink.
- * 3. BOOTTIME clock offsets are provided to help synchronize with
- * non-audio streams, e.g. sensor data.
- * 4. Position is returned with 64 bits of resolution.
- *
- * Parameters:
- * timestamp: A pointer to the caller allocated ExtendedTimestamp.
- *
- * Returns NO_ERROR on success; timestamp is filled with valid data.
- * BAD_VALUE if timestamp is NULL.
- * WOULD_BLOCK if called immediately after start() when the number
- * of frames consumed is less than the
- * overall hardware latency to physical output. In WOULD_BLOCK cases,
- * one might poll again, or use getPosition(), or use 0 position and
- * current time for the timestamp.
- * If WOULD_BLOCK is returned, the timestamp is still
- * modified with the LOCATION_CLIENT portion filled.
- * DEAD_OBJECT if AudioFlinger dies or the output device changes and
- * the track cannot be automatically restored.
- * The application needs to recreate the AudioTrack
- * because the audio device changed or AudioFlinger died.
- * This typically occurs for direct or offloaded tracks
- * or if mDoNotReconnect is true.
- * INVALID_OPERATION if called on a offloaded or direct track.
- * Use getTimestamp(AudioTimestamp& timestamp) instead.
- */
- status_t getTimestamp(ExtendedTimestamp *timestamp);
-private:
- status_t getTimestamp_l(ExtendedTimestamp *timestamp);
-public:
-
- /* Add an AudioDeviceCallback. The caller will be notified when the audio device to which this
- * AudioTrack is routed is updated.
- * Replaces any previously installed callback.
- * Parameters:
- * callback: The callback interface
- * Returns NO_ERROR if successful.
- * INVALID_OPERATION if the same callback is already installed.
- * NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
- * BAD_VALUE if the callback is NULL
- */
- status_t addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
-
- /* remove an AudioDeviceCallback.
- * Parameters:
- * callback: The callback interface
- * Returns NO_ERROR if successful.
- * INVALID_OPERATION if the callback is not installed
- * BAD_VALUE if the callback is NULL
- */
- status_t removeAudioDeviceCallback(
- const sp<AudioSystem::AudioDeviceCallback>& callback);
-
- /* Obtain the pending duration in milliseconds for playback of pure PCM
- * (mixable without embedded timing) data remaining in AudioTrack.
- *
- * This is used to estimate the drain time for the client-server buffer
- * so the choice of ExtendedTimestamp::LOCATION_SERVER is default.
- * One may optionally request to find the duration to play through the HAL
- * by specifying a location ExtendedTimestamp::LOCATION_KERNEL; however,
- * INVALID_OPERATION may be returned if the kernel location is unavailable.
- *
- * Returns NO_ERROR if successful.
- * INVALID_OPERATION if ExtendedTimestamp::LOCATION_KERNEL cannot be obtained
- * or the AudioTrack does not contain pure PCM data.
- * BAD_VALUE if msec is nullptr or location is invalid.
- */
- status_t pendingDuration(int32_t *msec,
- ExtendedTimestamp::Location location = ExtendedTimestamp::LOCATION_SERVER);
-
-protected:
- /* copying audio tracks is not allowed */
- AudioTrack(const AudioTrack& other);
- AudioTrack& operator = (const AudioTrack& other);
-
- /* a small internal class to handle the callback */
- class AudioTrackThread : public Thread
- {
- public:
- AudioTrackThread(AudioTrack& receiver, bool bCanCallJava = false);
-
- // Do not call Thread::requestExitAndWait() without first calling requestExit().
- // Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
- virtual void requestExit();
-
- void pause(); // suspend thread from execution at next loop boundary
- void resume(); // allow thread to execute, if not requested to exit
- void wake(); // wake to handle changed notification conditions.
-
- private:
- void pauseInternal(nsecs_t ns = 0LL);
- // like pause(), but only used internally within thread
-
- friend class AudioTrack;
- virtual bool threadLoop();
- AudioTrack& mReceiver;
- virtual ~AudioTrackThread();
- Mutex mMyLock; // Thread::mLock is private
- Condition mMyCond; // Thread::mThreadExitedCondition is private
- bool mPaused; // whether thread is requested to pause at next loop entry
- bool mPausedInt; // whether thread internally requests pause
- nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
- bool mIgnoreNextPausedInt; // skip any internal pause and go immediately
- // to processAudioBuffer() as state may have changed
- // since pause time calculated.
- };
-
- // body of AudioTrackThread::threadLoop()
- // returns the maximum amount of time before we would like to run again, where:
- // 0 immediately
- // > 0 no later than this many nanoseconds from now
- // NS_WHENEVER still active but no particular deadline
- // NS_INACTIVE inactive so don't run again until re-started
- // NS_NEVER never again
- static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
- nsecs_t processAudioBuffer();
-
- // caller must hold lock on mLock for all _l methods
-
- status_t createTrack_l();
-
- // can only be called when mState != STATE_ACTIVE
- void flush_l();
-
- void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
-
- // FIXME enum is faster than strcmp() for parameter 'from'
- status_t restoreTrack_l(const char *from);
-
- uint32_t getUnderrunCount_l() const;
-
- bool isOffloaded() const;
- bool isDirect() const;
- bool isOffloadedOrDirect() const;
-
- bool isOffloaded_l() const
- { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
-
- bool isOffloadedOrDirect_l() const
- { return (mFlags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|
- AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }
-
- bool isDirect_l() const
- { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
-
- // pure pcm data is mixable (which excludes HW_AV_SYNC, with embedded timing)
- bool isPurePcmData_l() const
- { return audio_is_linear_pcm(mFormat)
- && (mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) == 0; }
-
- // increment mPosition by the delta of mServer, and return new value of mPosition
- Modulo<uint32_t> updateAndGetPosition_l();
-
- // check sample rate and speed is compatible with AudioTrack
- bool isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
-
- void restartIfDisabled();
-
- // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
- sp<IAudioTrack> mAudioTrack;
- sp<IMemory> mCblkMemory;
- audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
- audio_io_handle_t mOutput; // returned by AudioSystem::getOutput()
-
- sp<AudioTrackThread> mAudioTrackThread;
- bool mThreadCanCallJava;
-
- float mVolume[2];
- float mSendLevel;
- mutable uint32_t mSampleRate; // mutable because getSampleRate() can update it
- uint32_t mOriginalSampleRate;
- AudioPlaybackRate mPlaybackRate;
- float mMaxRequiredSpeed; // use PCM buffer size to allow this speed
-
- // Corresponds to current IAudioTrack, value is reported back by AudioFlinger to the client.
- // This allocated buffer size is maintained by the proxy.
- size_t mFrameCount; // maximum size of buffer
-
- size_t mReqFrameCount; // frame count to request the first or next time
- // a new IAudioTrack is needed, non-decreasing
-
- // The following AudioFlinger server-side values are cached in createAudioTrack_l().
- // These values can be used for informational purposes until the track is invalidated,
- // whereupon restoreTrack_l() calls createTrack_l() to update the values.
- uint32_t mAfLatency; // AudioFlinger latency in ms
- size_t mAfFrameCount; // AudioFlinger frame count
- uint32_t mAfSampleRate; // AudioFlinger sample rate
-
- // constant after constructor or set()
- audio_format_t mFormat; // as requested by client, not forced to 16-bit
- audio_stream_type_t mStreamType; // mStreamType == AUDIO_STREAM_DEFAULT implies
- // this AudioTrack has valid attributes
- uint32_t mChannelCount;
- audio_channel_mask_t mChannelMask;
- sp<IMemory> mSharedBuffer;
- transfer_type mTransfer;
- audio_offload_info_t mOffloadInfoCopy;
- const audio_offload_info_t* mOffloadInfo;
- audio_attributes_t mAttributes;
-
- size_t mFrameSize; // frame size in bytes
-
- status_t mStatus;
-
- // can change dynamically when IAudioTrack invalidated
- uint32_t mLatency; // in ms
-
- // Indicates the current track state. Protected by mLock.
- enum State {
- STATE_ACTIVE,
- STATE_STOPPED,
- STATE_PAUSED,
- STATE_PAUSED_STOPPING,
- STATE_FLUSHED,
- STATE_STOPPING,
- } mState;
-
- // for client callback handler
- callback_t mCbf; // callback handler for events, or NULL
- void* mUserData;
-
- // for notification APIs
-
- // next 2 fields are const after constructor or set()
- uint32_t mNotificationFramesReq; // requested number of frames between each
- // notification callback,
- // at initial source sample rate
- uint32_t mNotificationsPerBufferReq;
- // requested number of notifications per buffer,
- // currently only used for fast tracks with
- // default track buffer size
-
- uint32_t mNotificationFramesAct; // actual number of frames between each
- // notification callback,
- // at initial source sample rate
- bool mRefreshRemaining; // processAudioBuffer() should refresh
- // mRemainingFrames and mRetryOnPartialBuffer
-
- // used for static track cbf and restoration
- int32_t mLoopCount; // last setLoop loopCount; zero means disabled
- uint32_t mLoopStart; // last setLoop loopStart
- uint32_t mLoopEnd; // last setLoop loopEnd
- int32_t mLoopCountNotified; // the last loopCount notified by callback.
- // mLoopCountNotified counts down, matching
- // the remaining loop count for static track
- // playback.
-
- // These are private to processAudioBuffer(), and are not protected by a lock
- uint32_t mRemainingFrames; // number of frames to request in obtainBuffer()
- bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
- uint32_t mObservedSequence; // last observed value of mSequence
-
- Modulo<uint32_t> mMarkerPosition; // in wrapping (overflow) frame units
- bool mMarkerReached;
- Modulo<uint32_t> mNewPosition; // in frames
- uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
-
- Modulo<uint32_t> mServer; // in frames, last known mProxy->getPosition()
- // which is count of frames consumed by server,
- // reset by new IAudioTrack,
- // whether it is reset by stop() is TBD
- Modulo<uint32_t> mPosition; // in frames, like mServer except continues
- // monotonically after new IAudioTrack,
- // and could be easily widened to uint64_t
- Modulo<uint32_t> mReleased; // count of frames released to server
- // but not necessarily consumed by server,
- // reset by stop() but continues monotonically
- // after new IAudioTrack to restore mPosition,
- // and could be easily widened to uint64_t
- int64_t mStartUs; // the start time after flush or stop.
- // only used for offloaded and direct tracks.
-
- bool mPreviousTimestampValid;// true if mPreviousTimestamp is valid
- bool mTimestampStartupGlitchReported; // reduce log spam
- bool mRetrogradeMotionReported; // reduce log spam
- AudioTimestamp mPreviousTimestamp; // used to detect retrograde motion
- ExtendedTimestamp::Location mPreviousLocation; // location used for previous timestamp
-
- uint32_t mUnderrunCountOffset; // updated when restoring tracks
-
- int64_t mFramesWritten; // total frames written. reset to zero after
- // the start() following stop(). It is not
- // changed after restoring the track or
- // after flush.
- int64_t mFramesWrittenServerOffset; // An offset to server frames due to
- // restoring AudioTrack, or stop/start.
-
- audio_output_flags_t mFlags; // same as mOrigFlags, except for bits that may
- // be denied by client or server, such as
- // AUDIO_OUTPUT_FLAG_FAST. mLock must be
- // held to read or write those bits reliably.
- audio_output_flags_t mOrigFlags; // as specified in constructor or set(), const
-
- bool mDoNotReconnect;
-
- audio_session_t mSessionId;
- int mAuxEffectId;
-
- mutable Mutex mLock;
-
- int mPreviousPriority; // before start()
- SchedPolicy mPreviousSchedulingGroup;
- bool mAwaitBoost; // thread should wait for priority boost before running
-
- // The proxy should only be referenced while a lock is held because the proxy isn't
- // multi-thread safe, especially the SingleStateQueue part of the proxy.
- // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
- // provided that the caller also holds an extra reference to the proxy and shared memory to keep
- // them around in case they are replaced during the obtainBuffer().
- sp<StaticAudioTrackClientProxy> mStaticProxy; // for type safety only
- sp<AudioTrackClientProxy> mProxy; // primary owner of the memory
-
- bool mInUnderrun; // whether track is currently in underrun state
- uint32_t mPausedPosition;
-
- // For Device Selection API
- // a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
- audio_port_handle_t mSelectedDeviceId;
-
-private:
- class DeathNotifier : public IBinder::DeathRecipient {
- public:
- DeathNotifier(AudioTrack* audioTrack) : mAudioTrack(audioTrack) { }
- protected:
- virtual void binderDied(const wp<IBinder>& who);
- private:
- const wp<AudioTrack> mAudioTrack;
- };
-
- sp<DeathNotifier> mDeathNotifier;
- uint32_t mSequence; // incremented for each new IAudioTrack attempt
- int mClientUid;
- pid_t mClientPid;
-
- sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
-};
-
-}; // namespace android
-
-#endif // ANDROID_AUDIOTRACK_H
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
new file mode 120000
index 0000000..303bfcd
--- /dev/null
+++ b/include/media/AudioTrack.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioTrack.h
\ No newline at end of file
diff --git a/include/media/BufferProviders.h b/include/media/BufferProviders.h
new file mode 120000
index 0000000..779bb15
--- /dev/null
+++ b/include/media/BufferProviders.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/BufferProviders.h
\ No newline at end of file
diff --git a/include/media/BufferingSettings.h b/include/media/BufferingSettings.h
new file mode 120000
index 0000000..409203f
--- /dev/null
+++ b/include/media/BufferingSettings.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/BufferingSettings.h
\ No newline at end of file
diff --git a/include/media/CasImpl.h b/include/media/CasImpl.h
new file mode 100644
index 0000000..726f1ce
--- /dev/null
+++ b/include/media/CasImpl.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CAS_IMPL_H_
+#define CAS_IMPL_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <android/media/BnCas.h>
+
+namespace android {
+namespace media {
+class ICasListener;
+}
+using namespace media;
+using namespace MediaCas;
+using binder::Status;
+struct CasPlugin;
+class SharedLibrary;
+
+class CasImpl : public BnCas {
+public:
+ CasImpl(const sp<ICasListener> &listener);
+ virtual ~CasImpl();
+
+ static void OnEvent(
+ void *appData,
+ int32_t event,
+ int32_t arg,
+ uint8_t *data,
+ size_t size);
+
+ void init(const sp<SharedLibrary>& library, CasPlugin *plugin);
+ void onEvent(
+ int32_t event,
+ int32_t arg,
+ uint8_t *data,
+ size_t size);
+
+ // ICas inherits
+
+ virtual Status setPrivateData(
+ const CasData& pvtData) override;
+
+ virtual Status openSession(CasSessionId* _aidl_return) override;
+
+ virtual Status closeSession(const CasSessionId& sessionId) override;
+
+ virtual Status setSessionPrivateData(
+ const CasSessionId& sessionId,
+ const CasData& pvtData) override;
+
+ virtual Status processEcm(
+ const CasSessionId& sessionId, const ParcelableCasData& ecm) override;
+
+ virtual Status processEmm(const ParcelableCasData& emm) override;
+
+ virtual Status sendEvent(
+ int32_t event, int32_t arg, const ::std::unique_ptr<CasData> &eventData) override;
+
+ virtual Status provision(const String16& provisionString) override;
+
+ virtual Status refreshEntitlements(
+ int32_t refreshType, const ::std::unique_ptr<CasData> &refreshData) override;
+
+ virtual Status release() override;
+
+private:
+ struct PluginHolder;
+ sp<SharedLibrary> mLibrary;
+ sp<PluginHolder> mPluginHolder;
+ sp<ICasListener> mListener;
+
+ DISALLOW_EVIL_CONSTRUCTORS(CasImpl);
+};
+
+} // namespace android
+
+#endif // CAS_IMPL_H_
diff --git a/include/media/CharacterEncodingDetector.h b/include/media/CharacterEncodingDetector.h
new file mode 120000
index 0000000..2b28387
--- /dev/null
+++ b/include/media/CharacterEncodingDetector.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/CharacterEncodingDetector.h
\ No newline at end of file
diff --git a/include/media/Crypto.h b/include/media/Crypto.h
deleted file mode 100644
index 7d181d3..0000000
--- a/include/media/Crypto.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CRYPTO_H_
-
-#define CRYPTO_H_
-
-#include <media/ICrypto.h>
-#include <utils/threads.h>
-#include <utils/KeyedVector.h>
-
-#include "SharedLibrary.h"
-
-namespace android {
-
-struct CryptoFactory;
-struct CryptoPlugin;
-
-struct Crypto : public BnCrypto {
- Crypto();
- virtual ~Crypto();
-
- virtual status_t initCheck() const;
-
- virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]);
-
- virtual status_t createPlugin(
- const uint8_t uuid[16], const void *data, size_t size);
-
- virtual status_t destroyPlugin();
-
- virtual bool requiresSecureDecoderComponent(
- const char *mime) const;
-
- virtual void notifyResolution(uint32_t width, uint32_t height);
-
- virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
-
- virtual ssize_t decrypt(
- DestinationType dstType,
- const uint8_t key[16],
- const uint8_t iv[16],
- CryptoPlugin::Mode mode,
- const CryptoPlugin::Pattern &pattern,
- const sp<IMemory> &sharedBuffer, size_t offset,
- const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
- void *dstPtr,
- AString *errorDetailMsg);
-
-private:
- mutable Mutex mLock;
-
- status_t mInitCheck;
- sp<SharedLibrary> mLibrary;
- CryptoFactory *mFactory;
- CryptoPlugin *mPlugin;
-
- static KeyedVector<Vector<uint8_t>, String8> mUUIDToLibraryPathMap;
- static KeyedVector<String8, wp<SharedLibrary> > mLibraryPathToOpenLibraryMap;
- static Mutex mMapLock;
-
- void findFactoryForScheme(const uint8_t uuid[16]);
- bool loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]);
- void closeFactory();
-
- DISALLOW_EVIL_CONSTRUCTORS(Crypto);
-};
-
-} // namespace android
-
-#endif // CRYPTO_H_
diff --git a/include/media/Crypto.h b/include/media/Crypto.h
new file mode 120000
index 0000000..9af6495
--- /dev/null
+++ b/include/media/Crypto.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/Crypto.h
\ No newline at end of file
diff --git a/include/media/CryptoHal.h b/include/media/CryptoHal.h
new file mode 120000
index 0000000..92f3137
--- /dev/null
+++ b/include/media/CryptoHal.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/CryptoHal.h
\ No newline at end of file
diff --git a/include/media/DescramblerImpl.h b/include/media/DescramblerImpl.h
new file mode 100644
index 0000000..9f212ac
--- /dev/null
+++ b/include/media/DescramblerImpl.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DESCRAMBLER_IMPL_H_
+#define DESCRAMBLER_IMPL_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <android/media/BnDescrambler.h>
+
+namespace android {
+using namespace media;
+using namespace MediaDescrambler;
+using binder::Status;
+struct DescramblerPlugin;
+class SharedLibrary;
+
+class DescramblerImpl : public BnDescrambler {
+public:
+ DescramblerImpl(const sp<SharedLibrary>& library, DescramblerPlugin *plugin);
+ virtual ~DescramblerImpl();
+
+ virtual Status setMediaCasSession(
+ const CasSessionId& sessionId) override;
+
+ virtual Status requiresSecureDecoderComponent(
+ const String16& mime, bool *result) override;
+
+ virtual Status descramble(
+ const DescrambleInfo& descrambleInfo, int32_t *result) override;
+
+ virtual Status release() override;
+
+private:
+ sp<SharedLibrary> mLibrary;
+ DescramblerPlugin *mPlugin;
+
+ DISALLOW_EVIL_CONSTRUCTORS(DescramblerImpl);
+};
+
+} // namespace android
+
+#endif // DESCRAMBLER_IMPL_H_
diff --git a/include/media/Drm.h b/include/media/Drm.h
deleted file mode 100644
index d40019b..0000000
--- a/include/media/Drm.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DRM_H_
-
-#define DRM_H_
-
-#include "SharedLibrary.h"
-
-#include <media/IDrm.h>
-#include <media/IDrmClient.h>
-#include <utils/threads.h>
-
-namespace android {
-
-class DrmFactory;
-class DrmPlugin;
-struct DrmSessionClientInterface;
-
-struct Drm : public BnDrm,
- public IBinder::DeathRecipient,
- public DrmPluginListener {
- Drm();
- virtual ~Drm();
-
- virtual status_t initCheck() const;
-
- virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
-
- virtual status_t createPlugin(const uint8_t uuid[16]);
-
- virtual status_t destroyPlugin();
-
- virtual status_t openSession(Vector<uint8_t> &sessionId);
-
- virtual status_t closeSession(Vector<uint8_t> const &sessionId);
-
- virtual status_t
- getKeyRequest(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &initData,
- String8 const &mimeType, DrmPlugin::KeyType keyType,
- KeyedVector<String8, String8> const &optionalParameters,
- Vector<uint8_t> &request, String8 &defaultUrl,
- DrmPlugin::KeyRequestType *keyRequestType);
-
- virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &response,
- Vector<uint8_t> &keySetId);
-
- virtual status_t removeKeys(Vector<uint8_t> const &keySetId);
-
- virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keySetId);
-
- virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
- KeyedVector<String8, String8> &infoMap) const;
-
- virtual status_t getProvisionRequest(String8 const &certType,
- String8 const &certAuthority,
- Vector<uint8_t> &request,
- String8 &defaulUrl);
-
- virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
- Vector<uint8_t> &certificate,
- Vector<uint8_t> &wrappedKey);
-
- virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
- virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
-
- virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
- virtual status_t releaseAllSecureStops();
-
- virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
- virtual status_t getPropertyByteArray(String8 const &name,
- Vector<uint8_t> &value ) const;
- virtual status_t setPropertyString(String8 const &name, String8 const &value ) const;
- virtual status_t setPropertyByteArray(String8 const &name,
- Vector<uint8_t> const &value ) const;
-
- virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm);
-
- virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm);
-
- virtual status_t encrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output);
-
- virtual status_t decrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output);
-
- virtual status_t sign(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> &signature);
-
- virtual status_t verify(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &signature,
- bool &match);
-
- virtual status_t signRSA(Vector<uint8_t> const &sessionId,
- String8 const &algorithm,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &wrappedKey,
- Vector<uint8_t> &signature);
-
- virtual status_t setListener(const sp<IDrmClient>& listener);
-
- virtual void sendEvent(DrmPlugin::EventType eventType, int extra,
- Vector<uint8_t> const *sessionId,
- Vector<uint8_t> const *data);
-
- virtual void sendExpirationUpdate(Vector<uint8_t> const *sessionId,
- int64_t expiryTimeInMS);
-
- virtual void sendKeysChange(Vector<uint8_t> const *sessionId,
- Vector<DrmPlugin::KeyStatus> const *keyStatusList,
- bool hasNewUsableKey);
-
- virtual void binderDied(const wp<IBinder> &the_late_who);
-
-private:
- static Mutex mLock;
-
- status_t mInitCheck;
-
- sp<DrmSessionClientInterface> mDrmSessionClient;
-
- sp<IDrmClient> mListener;
- mutable Mutex mEventLock;
- mutable Mutex mNotifyLock;
-
- sp<SharedLibrary> mLibrary;
- DrmFactory *mFactory;
- DrmPlugin *mPlugin;
-
- static KeyedVector<Vector<uint8_t>, String8> mUUIDToLibraryPathMap;
- static KeyedVector<String8, wp<SharedLibrary> > mLibraryPathToOpenLibraryMap;
- static Mutex mMapLock;
-
- void findFactoryForScheme(const uint8_t uuid[16]);
- bool loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]);
- void closeFactory();
- void writeByteArray(Parcel &obj, Vector<uint8_t> const *array);
-
- DISALLOW_EVIL_CONSTRUCTORS(Drm);
-};
-
-} // namespace android
-
-#endif // CRYPTO_H_
diff --git a/include/media/Drm.h b/include/media/Drm.h
new file mode 120000
index 0000000..ac60003
--- /dev/null
+++ b/include/media/Drm.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/Drm.h
\ No newline at end of file
diff --git a/include/media/DrmHal.h b/include/media/DrmHal.h
new file mode 120000
index 0000000..17bb667
--- /dev/null
+++ b/include/media/DrmHal.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/DrmHal.h
\ No newline at end of file
diff --git a/include/media/DrmPluginPath.h b/include/media/DrmPluginPath.h
new file mode 120000
index 0000000..9e05194
--- /dev/null
+++ b/include/media/DrmPluginPath.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/DrmPluginPath.h
\ No newline at end of file
diff --git a/include/media/DrmSessionClientInterface.h b/include/media/DrmSessionClientInterface.h
new file mode 120000
index 0000000..f4e3211
--- /dev/null
+++ b/include/media/DrmSessionClientInterface.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/DrmSessionClientInterface.h
\ No newline at end of file
diff --git a/include/media/DrmSessionManager.h b/include/media/DrmSessionManager.h
new file mode 120000
index 0000000..f0a47bf
--- /dev/null
+++ b/include/media/DrmSessionManager.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/DrmSessionManager.h
\ No newline at end of file
diff --git a/include/media/ExtendedAudioBufferProvider.h b/include/media/ExtendedAudioBufferProvider.h
new file mode 120000
index 0000000..d653cc3
--- /dev/null
+++ b/include/media/ExtendedAudioBufferProvider.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/ExtendedAudioBufferProvider.h
\ No newline at end of file
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
deleted file mode 100644
index 096f7ef..0000000
--- a/include/media/IAudioFlinger.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IAUDIOFLINGER_H
-#define ANDROID_IAUDIOFLINGER_H
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <media/IAudioTrack.h>
-#include <media/IAudioRecord.h>
-#include <media/IAudioFlingerClient.h>
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <hardware/audio_effect.h>
-#include <media/IEffect.h>
-#include <media/IEffectClient.h>
-#include <utils/String8.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class IAudioFlinger : public IInterface
-{
-public:
- DECLARE_META_INTERFACE(AudioFlinger);
-
-
- // invariant on exit for all APIs that return an sp<>:
- // (return value != 0) == (*status == NO_ERROR)
-
- /* create an audio track and registers it with AudioFlinger.
- * return null if the track cannot be created.
- */
- virtual sp<IAudioTrack> createTrack(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t *pFrameCount,
- audio_output_flags_t *flags,
- const sp<IMemory>& sharedBuffer,
- // On successful return, AudioFlinger takes over the handle
- // reference and will release it when the track is destroyed.
- // However on failure, the client is responsible for release.
- audio_io_handle_t output,
- pid_t pid,
- pid_t tid, // -1 means unused, otherwise must be valid non-0
- audio_session_t *sessionId,
- int clientUid,
- status_t *status) = 0;
-
- virtual sp<IAudioRecord> openRecord(
- // On successful return, AudioFlinger takes over the handle
- // reference and will release it when the track is destroyed.
- // However on failure, the client is responsible for release.
- audio_io_handle_t input,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const String16& callingPackage,
- size_t *pFrameCount,
- audio_input_flags_t *flags,
- pid_t pid,
- pid_t tid, // -1 means unused, otherwise must be valid non-0
- int clientUid,
- audio_session_t *sessionId,
- size_t *notificationFrames,
- sp<IMemory>& cblk,
- sp<IMemory>& buffers, // return value 0 means it follows cblk
- status_t *status) = 0;
-
- // FIXME Surprisingly, format/latency don't work for input handles
-
- /* query the audio hardware state. This state never changes,
- * and therefore can be cached.
- */
- virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const = 0;
-
- // reserved; formerly channelCount()
-
- virtual audio_format_t format(audio_io_handle_t output) const = 0;
- virtual size_t frameCount(audio_io_handle_t ioHandle) const = 0;
-
- // return estimated latency in milliseconds
- virtual uint32_t latency(audio_io_handle_t output) const = 0;
-
- /* set/get the audio hardware state. This will probably be used by
- * the preference panel, mostly.
- */
- virtual status_t setMasterVolume(float value) = 0;
- virtual status_t setMasterMute(bool muted) = 0;
-
- virtual float masterVolume() const = 0;
- virtual bool masterMute() const = 0;
-
- /* set/get stream type state. This will probably be used by
- * the preference panel, mostly.
- */
- virtual status_t setStreamVolume(audio_stream_type_t stream, float value,
- audio_io_handle_t output) = 0;
- virtual status_t setStreamMute(audio_stream_type_t stream, bool muted) = 0;
-
- virtual float streamVolume(audio_stream_type_t stream,
- audio_io_handle_t output) const = 0;
- virtual bool streamMute(audio_stream_type_t stream) const = 0;
-
- // set audio mode
- virtual status_t setMode(audio_mode_t mode) = 0;
-
- // mic mute/state
- virtual status_t setMicMute(bool state) = 0;
- virtual bool getMicMute() const = 0;
-
- virtual status_t setParameters(audio_io_handle_t ioHandle,
- const String8& keyValuePairs) = 0;
- virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
- const = 0;
-
- // Register an object to receive audio input/output change and track notifications.
- // For a given calling pid, AudioFlinger disregards any registrations after the first.
- // Thus the IAudioFlingerClient must be a singleton per process.
- virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
-
- // retrieve the audio recording buffer size
- // FIXME This API assumes a route, and so should be deprecated.
- virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
- audio_channel_mask_t channelMask) const = 0;
-
- virtual status_t openOutput(audio_module_handle_t module,
- audio_io_handle_t *output,
- audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
- uint32_t *latencyMs,
- audio_output_flags_t flags) = 0;
- virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
- audio_io_handle_t output2) = 0;
- virtual status_t closeOutput(audio_io_handle_t output) = 0;
- virtual status_t suspendOutput(audio_io_handle_t output) = 0;
- virtual status_t restoreOutput(audio_io_handle_t output) = 0;
-
- virtual status_t openInput(audio_module_handle_t module,
- audio_io_handle_t *input,
- audio_config_t *config,
- audio_devices_t *device,
- const String8& address,
- audio_source_t source,
- audio_input_flags_t flags) = 0;
- virtual status_t closeInput(audio_io_handle_t input) = 0;
-
- virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
-
- virtual status_t setVoiceVolume(float volume) = 0;
-
- virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
- audio_io_handle_t output) const = 0;
-
- virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
-
- virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) = 0;
-
- virtual void acquireAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
- virtual void releaseAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
-
- virtual status_t queryNumberEffects(uint32_t *numEffects) const = 0;
-
- virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const = 0;
-
- virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID,
- effect_descriptor_t *pDescriptor) const = 0;
-
- virtual sp<IEffect> createEffect(
- effect_descriptor_t *pDesc,
- const sp<IEffectClient>& client,
- int32_t priority,
- // AudioFlinger doesn't take over handle reference from client
- audio_io_handle_t output,
- audio_session_t sessionId,
- const String16& callingPackage,
- status_t *status,
- int *id,
- int *enabled) = 0;
-
- virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
- audio_io_handle_t dstOutput) = 0;
-
- virtual audio_module_handle_t loadHwModule(const char *name) = 0;
-
- // helpers for android.media.AudioManager.getProperty(), see description there for meaning
- // FIXME move these APIs to AudioPolicy to permit a more accurate implementation
- // that looks on primary device for a stream with fast flag, primary flag, or first one.
- virtual uint32_t getPrimaryOutputSamplingRate() = 0;
- virtual size_t getPrimaryOutputFrameCount() = 0;
-
- // Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
- // and should be called at most once. For a definition of what "low RAM" means, see
- // android.app.ActivityManager.isLowRamDevice().
- virtual status_t setLowRamDevice(bool isLowRamDevice) = 0;
-
- /* List available audio ports and their attributes */
- virtual status_t listAudioPorts(unsigned int *num_ports,
- struct audio_port *ports) = 0;
-
- /* Get attributes for a given audio port */
- virtual status_t getAudioPort(struct audio_port *port) = 0;
-
- /* Create an audio patch between several source and sink ports */
- virtual status_t createAudioPatch(const struct audio_patch *patch,
- audio_patch_handle_t *handle) = 0;
-
- /* Release an audio patch */
- virtual status_t releaseAudioPatch(audio_patch_handle_t handle) = 0;
-
- /* List existing audio patches */
- virtual status_t listAudioPatches(unsigned int *num_patches,
- struct audio_patch *patches) = 0;
- /* Set audio port configuration */
- virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
-
- /* Get the HW synchronization source used for an audio session */
- virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) = 0;
-
- /* Indicate JAVA services are ready (scheduling, power management ...) */
- virtual status_t systemReady() = 0;
-
- // Returns the number of frames per audio HAL buffer.
- virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-
-class BnAudioFlinger : public BnInterface<IAudioFlinger>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_IAUDIOFLINGER_H
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
new file mode 120000
index 0000000..ef6f5be
--- /dev/null
+++ b/include/media/IAudioFlinger.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IAudioFlinger.h
\ No newline at end of file
diff --git a/include/media/IAudioFlingerClient.h b/include/media/IAudioFlingerClient.h
new file mode 120000
index 0000000..dc481e8
--- /dev/null
+++ b/include/media/IAudioFlingerClient.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IAudioFlingerClient.h
\ No newline at end of file
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
deleted file mode 100644
index f9dcbea..0000000
--- a/include/media/IAudioPolicyService.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IAUDIOPOLICYSERVICE_H
-#define ANDROID_IAUDIOPOLICYSERVICE_H
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <media/AudioSystem.h>
-#include <media/AudioPolicy.h>
-#include <media/IAudioPolicyServiceClient.h>
-
-#include <system/audio_policy.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class IAudioPolicyService : public IInterface
-{
-public:
- DECLARE_META_INTERFACE(AudioPolicyService);
-
- //
- // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
- //
- virtual status_t setDeviceConnectionState(audio_devices_t device,
- audio_policy_dev_state_t state,
- const char *device_address,
- const char *device_name) = 0;
- virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
- const char *device_address) = 0;
- virtual status_t handleDeviceConfigChange(audio_devices_t device,
- const char *device_address,
- const char *device_name) = 0;
- virtual status_t setPhoneState(audio_mode_t state) = 0;
- virtual status_t setForceUse(audio_policy_force_use_t usage,
- audio_policy_forced_cfg_t config) = 0;
- virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
- virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL) = 0;
- virtual status_t getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session,
- audio_stream_type_t *stream,
- uid_t uid,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
- const audio_offload_info_t *offloadInfo = NULL) = 0;
- virtual status_t startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
- virtual status_t stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
- virtual void releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session) = 0;
- virtual status_t getInputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *input,
- audio_session_t session,
- pid_t pid,
- uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_input_flags_t flags,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE) = 0;
- virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session) = 0;
- virtual status_t stopInput(audio_io_handle_t input,
- audio_session_t session) = 0;
- virtual void releaseInput(audio_io_handle_t input,
- audio_session_t session) = 0;
- virtual status_t initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax) = 0;
- virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
- int index,
- audio_devices_t device) = 0;
- virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
- int *index,
- audio_devices_t device) = 0;
- virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0;
- virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
- virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc) = 0;
- virtual status_t registerEffect(const effect_descriptor_t *desc,
- audio_io_handle_t io,
- uint32_t strategy,
- audio_session_t session,
- int id) = 0;
- virtual status_t unregisterEffect(int id) = 0;
- virtual status_t setEffectEnabled(int id, bool enabled) = 0;
- virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const = 0;
- virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0)
- const = 0;
- virtual bool isSourceActive(audio_source_t source) const = 0;
- virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
- effect_descriptor_t *descriptors,
- uint32_t *count) = 0;
- // Check if offload is possible for given format, stream type, sample rate,
- // bit rate, duration, video and streaming or offload property is enabled
- virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
-
- /* List available audio ports and their attributes */
- virtual status_t listAudioPorts(audio_port_role_t role,
- audio_port_type_t type,
- unsigned int *num_ports,
- struct audio_port *ports,
- unsigned int *generation) = 0;
-
- /* Get attributes for a given audio port */
- virtual status_t getAudioPort(struct audio_port *port) = 0;
-
- /* Create an audio patch between several source and sink ports */
- virtual status_t createAudioPatch(const struct audio_patch *patch,
- audio_patch_handle_t *handle) = 0;
-
- /* Release an audio patch */
- virtual status_t releaseAudioPatch(audio_patch_handle_t handle) = 0;
-
- /* List existing audio patches */
- virtual status_t listAudioPatches(unsigned int *num_patches,
- struct audio_patch *patches,
- unsigned int *generation) = 0;
- /* Set audio port configuration */
- virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
-
- virtual void registerClient(const sp<IAudioPolicyServiceClient>& client) = 0;
-
- virtual void setAudioPortCallbacksEnabled(bool enabled) = 0;
-
- virtual status_t acquireSoundTriggerSession(audio_session_t *session,
- audio_io_handle_t *ioHandle,
- audio_devices_t *device) = 0;
-
- virtual status_t releaseSoundTriggerSession(audio_session_t session) = 0;
-
- virtual audio_mode_t getPhoneState() = 0;
-
- virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration) = 0;
-
- virtual status_t startAudioSource(const struct audio_port_config *source,
- const audio_attributes_t *attributes,
- audio_io_handle_t *handle) = 0;
- virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
-
- virtual status_t setMasterMono(bool mono) = 0;
- virtual status_t getMasterMono(bool *mono) = 0;
-};
-
-
-// ----------------------------------------------------------------------------
-
-class BnAudioPolicyService : public BnInterface<IAudioPolicyService>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_IAUDIOPOLICYSERVICE_H
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
new file mode 120000
index 0000000..08101fc
--- /dev/null
+++ b/include/media/IAudioPolicyService.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IAudioPolicyService.h
\ No newline at end of file
diff --git a/include/media/IAudioPolicyServiceClient.h b/include/media/IAudioPolicyServiceClient.h
new file mode 120000
index 0000000..0d4b3e7
--- /dev/null
+++ b/include/media/IAudioPolicyServiceClient.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IAudioPolicyServiceClient.h
\ No newline at end of file
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
new file mode 120000
index 0000000..7fbf8f2
--- /dev/null
+++ b/include/media/IAudioRecord.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IAudioRecord.h
\ No newline at end of file
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
deleted file mode 100644
index a31cec6..0000000
--- a/include/media/IAudioTrack.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IAUDIOTRACK_H
-#define ANDROID_IAUDIOTRACK_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
-#include <utils/String8.h>
-#include <media/AudioTimestamp.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class IAudioTrack : public IInterface
-{
-public:
- DECLARE_META_INTERFACE(AudioTrack);
-
- /* Get this track's control block */
- virtual sp<IMemory> getCblk() const = 0;
-
- /* After it's created the track is not active. Call start() to
- * make it active.
- */
- virtual status_t start() = 0;
-
- /* Stop a track. If set, the callback will cease being called and
- * obtainBuffer will return an error. Buffers that are already released
- * will continue to be processed, unless/until flush() is called.
- */
- virtual void stop() = 0;
-
- /* Flush a stopped or paused track. All pending/released buffers are discarded.
- * This function has no effect if the track is not stopped or paused.
- */
- virtual void flush() = 0;
-
- /* Pause a track. If set, the callback will cease being called and
- * obtainBuffer will return an error. Buffers that are already released
- * will continue to be processed, unless/until flush() is called.
- */
- virtual void pause() = 0;
-
- /* Attach track auxiliary output to specified effect. Use effectId = 0
- * to detach track from effect.
- */
- virtual status_t attachAuxEffect(int effectId) = 0;
-
- /* Send parameters to the audio hardware */
- virtual status_t setParameters(const String8& keyValuePairs) = 0;
-
- /* Return NO_ERROR if timestamp is valid. timestamp is undefined otherwise. */
- virtual status_t getTimestamp(AudioTimestamp& timestamp) = 0;
-
- /* Signal the playback thread for a change in control block */
- virtual void signal() = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnAudioTrack : public BnInterface<IAudioTrack>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_IAUDIOTRACK_H
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
new file mode 120000
index 0000000..7bab1fd
--- /dev/null
+++ b/include/media/IAudioTrack.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IAudioTrack.h
\ No newline at end of file
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
deleted file mode 100644
index a4bfaf8..0000000
--- a/include/media/ICrypto.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <binder/IInterface.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <media/hardware/CryptoAPI.h>
-
-#ifndef ANDROID_ICRYPTO_H_
-
-#define ANDROID_ICRYPTO_H_
-
-namespace android {
-
-struct AString;
-class IMemory;
-
-struct ICrypto : public IInterface {
- DECLARE_META_INTERFACE(Crypto);
-
- virtual status_t initCheck() const = 0;
-
- virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) = 0;
-
- virtual status_t createPlugin(
- const uint8_t uuid[16], const void *data, size_t size) = 0;
-
- virtual status_t destroyPlugin() = 0;
-
- virtual bool requiresSecureDecoderComponent(
- const char *mime) const = 0;
-
- virtual void notifyResolution(uint32_t width, uint32_t height) = 0;
-
- virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) = 0;
-
- enum DestinationType {
- kDestinationTypeVmPointer, // non-secure
- kDestinationTypeOpaqueHandle, // secure
- kDestinationTypeNativeHandle // secure
- };
-
- virtual ssize_t decrypt(
- DestinationType dstType,
- const uint8_t key[16],
- const uint8_t iv[16],
- CryptoPlugin::Mode mode,
- const CryptoPlugin::Pattern &pattern,
- const sp<IMemory> &sharedBuffer, size_t offset,
- const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
- void *dstPtr,
- AString *errorDetailMsg) = 0;
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(ICrypto);
-};
-
-struct BnCrypto : public BnInterface<ICrypto> {
- virtual status_t onTransact(
- uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0);
-private:
- void readVector(const Parcel &data, Vector<uint8_t> &vector) const;
- void writeVector(Parcel *reply, Vector<uint8_t> const &vector) const;
-};
-
-} // namespace android
-
-#endif // ANDROID_ICRYPTO_H_
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
new file mode 120000
index 0000000..b250e07
--- /dev/null
+++ b/include/media/ICrypto.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/ICrypto.h
\ No newline at end of file
diff --git a/include/media/IDataSource.h b/include/media/IDataSource.h
new file mode 120000
index 0000000..41cdd8b
--- /dev/null
+++ b/include/media/IDataSource.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IDataSource.h
\ No newline at end of file
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
deleted file mode 100644
index fd51fd0..0000000
--- a/include/media/IDrm.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <binder/IInterface.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <media/drm/DrmAPI.h>
-#include <media/IDrmClient.h>
-
-#ifndef ANDROID_IDRM_H_
-
-#define ANDROID_IDRM_H_
-
-namespace android {
-
-struct AString;
-
-struct IDrm : public IInterface {
- DECLARE_META_INTERFACE(Drm);
-
- virtual status_t initCheck() const = 0;
-
- virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0;
-
- virtual status_t createPlugin(const uint8_t uuid[16]) = 0;
-
- virtual status_t destroyPlugin() = 0;
-
- virtual status_t openSession(Vector<uint8_t> &sessionId) = 0;
-
- virtual status_t closeSession(Vector<uint8_t> const &sessionId) = 0;
-
- virtual status_t
- getKeyRequest(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &initData,
- String8 const &mimeType, DrmPlugin::KeyType keyType,
- KeyedVector<String8, String8> const &optionalParameters,
- Vector<uint8_t> &request, String8 &defaultUrl,
- DrmPlugin::KeyRequestType *keyRequestType) = 0;
-
- virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &response,
- Vector<uint8_t> &keySetId) = 0;
-
- virtual status_t removeKeys(Vector<uint8_t> const &keySetId) = 0;
-
- virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keySetId) = 0;
-
- virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
- KeyedVector<String8, String8> &infoMap) const = 0;
-
- virtual status_t getProvisionRequest(String8 const &certType,
- String8 const &certAuthority,
- Vector<uint8_t> &request,
- String8 &defaulUrl) = 0;
-
- virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
- Vector<uint8_t> &certificate,
- Vector<uint8_t> &wrappedKey) = 0;
-
- virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) = 0;
- virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) = 0;
-
- virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0;
- virtual status_t releaseAllSecureStops() = 0;
-
- virtual status_t getPropertyString(String8 const &name, String8 &value) const = 0;
- virtual status_t getPropertyByteArray(String8 const &name,
- Vector<uint8_t> &value) const = 0;
- virtual status_t setPropertyString(String8 const &name,
- String8 const &value ) const = 0;
- virtual status_t setPropertyByteArray(String8 const &name,
- Vector<uint8_t> const &value) const = 0;
-
- virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm) = 0;
-
- virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm) = 0;
-
- virtual status_t encrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output) = 0;
-
- virtual status_t decrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output) = 0;
-
- virtual status_t sign(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> &signature) = 0;
-
- virtual status_t verify(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &signature,
- bool &match) = 0;
-
- virtual status_t signRSA(Vector<uint8_t> const &sessionId,
- String8 const &algorithm,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &wrappedKey,
- Vector<uint8_t> &signature) = 0;
-
- virtual status_t setListener(const sp<IDrmClient>& listener) = 0;
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(IDrm);
-};
-
-struct BnDrm : public BnInterface<IDrm> {
- virtual status_t onTransact(
- uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0);
-private:
- void readVector(const Parcel &data, Vector<uint8_t> &vector) const;
- void writeVector(Parcel *reply, Vector<uint8_t> const &vector) const;
-};
-
-} // namespace android
-
-#endif // ANDROID_IDRM_H_
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
new file mode 120000
index 0000000..841bb1b
--- /dev/null
+++ b/include/media/IDrm.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IDrm.h
\ No newline at end of file
diff --git a/include/media/IDrmClient.h b/include/media/IDrmClient.h
new file mode 120000
index 0000000..10aa5c0
--- /dev/null
+++ b/include/media/IDrmClient.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IDrmClient.h
\ No newline at end of file
diff --git a/include/media/IEffect.h b/include/media/IEffect.h
new file mode 120000
index 0000000..2fb8bfb
--- /dev/null
+++ b/include/media/IEffect.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IEffect.h
\ No newline at end of file
diff --git a/include/media/IEffectClient.h b/include/media/IEffectClient.h
new file mode 120000
index 0000000..b4e39cf
--- /dev/null
+++ b/include/media/IEffectClient.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/IEffectClient.h
\ No newline at end of file
diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h
new file mode 120000
index 0000000..9d4568e
--- /dev/null
+++ b/include/media/IHDCP.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IHDCP.h
\ No newline at end of file
diff --git a/include/media/IMediaAnalyticsService.h b/include/media/IMediaAnalyticsService.h
new file mode 120000
index 0000000..a596d60
--- /dev/null
+++ b/include/media/IMediaAnalyticsService.h
@@ -0,0 +1 @@
+../../media/libmediametrics/include/IMediaAnalyticsService.h
\ No newline at end of file
diff --git a/include/media/IMediaCodecList.h b/include/media/IMediaCodecList.h
new file mode 120000
index 0000000..2186312
--- /dev/null
+++ b/include/media/IMediaCodecList.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaCodecList.h
\ No newline at end of file
diff --git a/include/media/IMediaCodecService.h b/include/media/IMediaCodecService.h
deleted file mode 100644
index 984a0fd..0000000
--- a/include/media/IMediaCodecService.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IMEDIACODECSERVICE_H
-#define ANDROID_IMEDIACODECSERVICE_H
-
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
-#include <binder/Parcel.h>
-#include <media/IDataSource.h>
-#include <include/OMX.h>
-
-namespace android {
-
-class IMediaCodecService: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(MediaCodecService);
-
- virtual sp<IOMX> getOMX() = 0;
-};
-
-class BnMediaCodecService: public BnInterface<IMediaCodecService>
-{
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
- uint32_t flags = 0);
-};
-
-} // namespace android
-
-#endif // ANDROID_IMEDIACODECSERVICE_H
diff --git a/include/media/IMediaCodecService.h b/include/media/IMediaCodecService.h
new file mode 120000
index 0000000..37f6822
--- /dev/null
+++ b/include/media/IMediaCodecService.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaCodecService.h
\ No newline at end of file
diff --git a/include/media/IMediaDeathNotifier.h b/include/media/IMediaDeathNotifier.h
new file mode 120000
index 0000000..ce3b8f0
--- /dev/null
+++ b/include/media/IMediaDeathNotifier.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaDeathNotifier.h
\ No newline at end of file
diff --git a/include/media/IMediaDrmService.h b/include/media/IMediaDrmService.h
new file mode 120000
index 0000000..f3c260f
--- /dev/null
+++ b/include/media/IMediaDrmService.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaDrmService.h
\ No newline at end of file
diff --git a/include/media/IMediaExtractor.h b/include/media/IMediaExtractor.h
deleted file mode 100644
index 34b15e9..0000000
--- a/include/media/IMediaExtractor.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef IMEDIA_EXTRACTOR_BASE_H_
-
-#define IMEDIA_EXTRACTOR_BASE_H_
-
-#include <media/IMediaSource.h>
-#include <media/stagefright/DataSource.h>
-
-namespace android {
-
-class MetaData;
-
-class IMediaExtractor : public IInterface {
-public:
- DECLARE_META_INTERFACE(MediaExtractor);
-
- virtual size_t countTracks() = 0;
- virtual sp<IMediaSource> getTrack(size_t index) = 0;
-
- enum GetTrackMetaDataFlags {
- kIncludeExtensiveMetaData = 1
- };
- virtual sp<MetaData> getTrackMetaData(
- size_t index, uint32_t flags = 0) = 0;
-
- // Return container specific meta-data. The default implementation
- // returns an empty metadata object.
- virtual sp<MetaData> getMetaData() = 0;
-
- enum Flags {
- CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
- CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
- CAN_PAUSE = 4,
- CAN_SEEK = 8, // the "seek bar"
- };
-
- // If subclasses do _not_ override this, the default is
- // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
- virtual uint32_t flags() const = 0;
-
- // for DRM
- virtual void setDrmFlag(bool flag) = 0;
- virtual bool getDrmFlag() = 0;
- virtual char* getDrmTrackInfo(size_t trackID, int *len) = 0;
- virtual void setUID(uid_t uid) = 0;
-
- virtual const char * name() = 0;
-};
-
-
-class BnMediaExtractor: public BnInterface<IMediaExtractor>
-{
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
- uint32_t flags = 0);
-};
-
-void registerMediaExtractor(
- const sp<IMediaExtractor> &extractor,
- const sp<DataSource> &source,
- const char *mime);
-
-void registerMediaSource(
- const sp<IMediaExtractor> &extractor,
- const sp<IMediaSource> &source);
-
-status_t dumpExtractors(int fd, const Vector<String16>& args);
-
-
-} // namespace android
-
-#endif // IMEDIA_EXTRACTOR_BASE_H_
diff --git a/include/media/IMediaExtractor.h b/include/media/IMediaExtractor.h
new file mode 120000
index 0000000..8708c8c
--- /dev/null
+++ b/include/media/IMediaExtractor.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaExtractor.h
\ No newline at end of file
diff --git a/include/media/IMediaExtractorService.h b/include/media/IMediaExtractorService.h
deleted file mode 100644
index 4d7b317..0000000
--- a/include/media/IMediaExtractorService.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IMEDIAEXTRACTORSERVICE_H
-#define ANDROID_IMEDIAEXTRACTORSERVICE_H
-
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
-#include <binder/Parcel.h>
-#include <media/IDataSource.h>
-#include <media/IMediaExtractor.h>
-
-namespace android {
-
-class IMediaExtractorService: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(MediaExtractorService);
-
- virtual sp<IMediaExtractor> makeExtractor(const sp<IDataSource> &source, const char *mime) = 0;
-
-};
-
-class BnMediaExtractorService: public BnInterface<IMediaExtractorService>
-{
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
- uint32_t flags = 0);
-};
-
-} // namespace android
-
-#endif // ANDROID_IMEDIAEXTRACTORSERVICE_H
diff --git a/include/media/IMediaExtractorService.h b/include/media/IMediaExtractorService.h
new file mode 120000
index 0000000..3ee9f1e
--- /dev/null
+++ b/include/media/IMediaExtractorService.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaExtractorService.h
\ No newline at end of file
diff --git a/include/media/IMediaHTTPConnection.h b/include/media/IMediaHTTPConnection.h
new file mode 120000
index 0000000..0970c15
--- /dev/null
+++ b/include/media/IMediaHTTPConnection.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaHTTPConnection.h
\ No newline at end of file
diff --git a/include/media/IMediaHTTPService.h b/include/media/IMediaHTTPService.h
new file mode 120000
index 0000000..b90c34f
--- /dev/null
+++ b/include/media/IMediaHTTPService.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaHTTPService.h
\ No newline at end of file
diff --git a/include/media/IMediaLogService.h b/include/media/IMediaLogService.h
deleted file mode 100644
index 1f5777e..0000000
--- a/include/media/IMediaLogService.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IMEDIALOGSERVICE_H
-#define ANDROID_IMEDIALOGSERVICE_H
-
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
-#include <binder/Parcel.h>
-
-namespace android {
-
-class IMediaLogService: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(MediaLogService);
-
- virtual void registerWriter(const sp<IMemory>& shared, size_t size, const char *name) = 0;
- virtual void unregisterWriter(const sp<IMemory>& shared) = 0;
-
-};
-
-class BnMediaLogService: public BnInterface<IMediaLogService>
-{
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
- uint32_t flags = 0);
-};
-
-} // namespace android
-
-#endif // ANDROID_IMEDIALOGSERVICE_H
diff --git a/include/media/IMediaLogService.h b/include/media/IMediaLogService.h
new file mode 120000
index 0000000..245a29d
--- /dev/null
+++ b/include/media/IMediaLogService.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaLogService.h
\ No newline at end of file
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
new file mode 120000
index 0000000..959df1a
--- /dev/null
+++ b/include/media/IMediaMetadataRetriever.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaMetadataRetriever.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
deleted file mode 100644
index 0fd8933..0000000
--- a/include/media/IMediaPlayer.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IMEDIAPLAYER_H
-#define ANDROID_IMEDIAPLAYER_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <utils/KeyedVector.h>
-#include <system/audio.h>
-
-// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
-// global, and not in android::
-struct sockaddr_in;
-
-namespace android {
-
-class Parcel;
-class Surface;
-class IDataSource;
-struct IStreamSource;
-class IGraphicBufferProducer;
-struct IMediaHTTPService;
-struct AudioPlaybackRate;
-struct AVSyncSettings;
-
-class IMediaPlayer: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(MediaPlayer);
-
- virtual void disconnect() = 0;
-
- virtual status_t setDataSource(
- const sp<IMediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8>* headers) = 0;
-
- virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
- virtual status_t setDataSource(const sp<IStreamSource>& source) = 0;
- virtual status_t setDataSource(const sp<IDataSource>& source) = 0;
- virtual status_t setVideoSurfaceTexture(
- const sp<IGraphicBufferProducer>& bufferProducer) = 0;
- virtual status_t prepareAsync() = 0;
- virtual status_t start() = 0;
- virtual status_t stop() = 0;
- virtual status_t pause() = 0;
- virtual status_t isPlaying(bool* state) = 0;
- virtual status_t setPlaybackSettings(const AudioPlaybackRate& rate) = 0;
- virtual status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) = 0;
- virtual status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint) = 0;
- virtual status_t getSyncSettings(AVSyncSettings* sync /* nonnull */,
- float* videoFps /* nonnull */) = 0;
- virtual status_t seekTo(int msec) = 0;
- virtual status_t getCurrentPosition(int* msec) = 0;
- virtual status_t getDuration(int* msec) = 0;
- virtual status_t reset() = 0;
- virtual status_t setAudioStreamType(audio_stream_type_t type) = 0;
- virtual status_t setLooping(int loop) = 0;
- virtual status_t setVolume(float leftVolume, float rightVolume) = 0;
- virtual status_t setAuxEffectSendLevel(float level) = 0;
- virtual status_t attachAuxEffect(int effectId) = 0;
- virtual status_t setParameter(int key, const Parcel& request) = 0;
- virtual status_t getParameter(int key, Parcel* reply) = 0;
- virtual status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint) = 0;
- virtual status_t getRetransmitEndpoint(struct sockaddr_in* endpoint) = 0;
- virtual status_t setNextPlayer(const sp<IMediaPlayer>& next) = 0;
-
- // Invoke a generic method on the player by using opaque parcels
- // for the request and reply.
- // @param request Parcel that must start with the media player
- // interface token.
- // @param[out] reply Parcel to hold the reply data. Cannot be null.
- // @return OK if the invocation was made successfully.
- virtual status_t invoke(const Parcel& request, Parcel *reply) = 0;
-
- // Set a new metadata filter.
- // @param filter A set of allow and drop rules serialized in a Parcel.
- // @return OK if the invocation was made successfully.
- virtual status_t setMetadataFilter(const Parcel& filter) = 0;
-
- // Retrieve a set of metadata.
- // @param update_only Include only the metadata that have changed
- // since the last invocation of getMetadata.
- // The set is built using the unfiltered
- // notifications the native player sent to the
- // MediaPlayerService during that period of
- // time. If false, all the metadatas are considered.
- // @param apply_filter If true, once the metadata set has been built based
- // on the value update_only, the current filter is
- // applied.
- // @param[out] metadata On exit contains a set (possibly empty) of metadata.
- // Valid only if the call returned OK.
- // @return OK if the invocation was made successfully.
- virtual status_t getMetadata(bool update_only,
- bool apply_filter,
- Parcel *metadata) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnMediaPlayer: public BnInterface<IMediaPlayer>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif // ANDROID_IMEDIAPLAYER_H
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
new file mode 120000
index 0000000..9414d37
--- /dev/null
+++ b/include/media/IMediaPlayer.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaPlayer.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayerClient.h b/include/media/IMediaPlayerClient.h
new file mode 120000
index 0000000..b6547ce
--- /dev/null
+++ b/include/media/IMediaPlayerClient.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaPlayerClient.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
new file mode 120000
index 0000000..89c96cd
--- /dev/null
+++ b/include/media/IMediaPlayerService.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaPlayerService.h
\ No newline at end of file
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
deleted file mode 100644
index 68a65f0..0000000
--- a/include/media/IMediaRecorder.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- **
- ** Copyright 2008, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-#ifndef ANDROID_IMEDIARECORDER_H
-#define ANDROID_IMEDIARECORDER_H
-
-#include <binder/IInterface.h>
-
-namespace android {
-
-class Surface;
-namespace hardware {
-class ICamera;
-}
-class ICameraRecordingProxy;
-class IMediaRecorderClient;
-class IGraphicBufferConsumer;
-class IGraphicBufferProducer;
-
-class IMediaRecorder: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(MediaRecorder);
-
- virtual status_t setCamera(const sp<hardware::ICamera>& camera,
- const sp<ICameraRecordingProxy>& proxy) = 0;
- virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
- virtual status_t setVideoSource(int vs) = 0;
- virtual status_t setAudioSource(int as) = 0;
- virtual status_t setOutputFormat(int of) = 0;
- virtual status_t setVideoEncoder(int ve) = 0;
- virtual status_t setAudioEncoder(int ae) = 0;
- virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
- virtual status_t setVideoSize(int width, int height) = 0;
- virtual status_t setVideoFrameRate(int frames_per_second) = 0;
- virtual status_t setParameters(const String8& params) = 0;
- virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0;
- virtual status_t setClientName(const String16& clientName) = 0;
- virtual status_t prepare() = 0;
- virtual status_t getMaxAmplitude(int* max) = 0;
- virtual status_t start() = 0;
- virtual status_t stop() = 0;
- virtual status_t reset() = 0;
- virtual status_t pause() = 0;
- virtual status_t resume() = 0;
- virtual status_t init() = 0;
- virtual status_t close() = 0;
- virtual status_t release() = 0;
- virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
- virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnMediaRecorder: public BnInterface<IMediaRecorder>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif // ANDROID_IMEDIARECORDER_H
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
new file mode 120000
index 0000000..57d192c
--- /dev/null
+++ b/include/media/IMediaRecorder.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaRecorder.h
\ No newline at end of file
diff --git a/include/media/IMediaRecorderClient.h b/include/media/IMediaRecorderClient.h
new file mode 120000
index 0000000..89f4359
--- /dev/null
+++ b/include/media/IMediaRecorderClient.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaRecorderClient.h
\ No newline at end of file
diff --git a/include/media/IMediaSource.h b/include/media/IMediaSource.h
new file mode 120000
index 0000000..1330ad3
--- /dev/null
+++ b/include/media/IMediaSource.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IMediaSource.h
\ No newline at end of file
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
deleted file mode 100644
index ffa6d6d..0000000
--- a/include/media/IOMX.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IOMX_H_
-
-#define ANDROID_IOMX_H_
-
-#include <binder/IInterface.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/IGraphicBufferConsumer.h>
-#include <ui/GraphicBuffer.h>
-#include <utils/List.h>
-#include <utils/String8.h>
-
-#include <list>
-
-#include <media/hardware/MetadataBufferType.h>
-
-#include <OMX_Core.h>
-#include <OMX_Video.h>
-
-namespace android {
-
-class IMemory;
-class IOMXObserver;
-class IOMXRenderer;
-class NativeHandle;
-class Surface;
-
-class IOMX : public IInterface {
-public:
- DECLARE_META_INTERFACE(OMX);
-
- typedef uint32_t buffer_id;
- typedef uint32_t node_id;
-
- // Given a node_id and the calling process' pid, returns true iff
- // the implementation of the OMX interface lives in the same
- // process.
- virtual bool livesLocally(node_id node, pid_t pid) = 0;
-
- struct ComponentInfo {
- String8 mName;
- List<String8> mRoles;
- };
- virtual status_t listNodes(List<ComponentInfo> *list) = 0;
-
- virtual status_t allocateNode(
- const char *name, const sp<IOMXObserver> &observer,
- sp<IBinder> *nodeBinder,
- node_id *node) = 0;
-
- virtual status_t freeNode(node_id node) = 0;
-
- virtual status_t sendCommand(
- node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) = 0;
-
- virtual status_t getParameter(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size) = 0;
-
- virtual status_t setParameter(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size) = 0;
-
- virtual status_t getConfig(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size) = 0;
-
- virtual status_t setConfig(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size) = 0;
-
- virtual status_t getState(
- node_id node, OMX_STATETYPE* state) = 0;
-
- // This will set *type to previous metadata buffer type on OMX error (not on binder error), and
- // new metadata buffer type on success.
- virtual status_t storeMetaDataInBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type = NULL) = 0;
-
- virtual status_t prepareForAdaptivePlayback(
- node_id node, OMX_U32 portIndex, OMX_BOOL enable,
- OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) = 0;
-
- virtual status_t configureVideoTunnelMode(
- node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
- OMX_U32 audioHwSync, native_handle_t **sidebandHandle) = 0;
-
- virtual status_t enableNativeBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) = 0;
-
- virtual status_t getGraphicBufferUsage(
- node_id node, OMX_U32 port_index, OMX_U32* usage) = 0;
-
- // Use |params| as an OMX buffer, but limit the size of the OMX buffer to |allottedSize|.
- virtual status_t useBuffer(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize) = 0;
-
- virtual status_t useGraphicBuffer(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) = 0;
-
- virtual status_t updateGraphicBufferInMeta(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0;
-
- virtual status_t updateNativeHandleInMeta(
- node_id node, OMX_U32 port_index,
- const sp<NativeHandle> &nativeHandle, buffer_id buffer) = 0;
-
- // This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
- // well as on success.
- virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index, android_dataspace dataSpace,
- sp<IGraphicBufferProducer> *bufferProducer,
- MetadataBufferType *type = NULL) = 0;
-
- virtual status_t createPersistentInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferConsumer> *bufferConsumer) = 0;
-
- // This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
- // well as on success.
- virtual status_t setInputSurface(
- node_id node, OMX_U32 port_index,
- const sp<IGraphicBufferConsumer> &bufferConsumer,
- MetadataBufferType *type) = 0;
-
- virtual status_t signalEndOfInputStream(node_id node) = 0;
-
- // Allocate an opaque buffer as a native handle. If component supports returning native
- // handles, those are returned in *native_handle. Otherwise, the allocated buffer is
- // returned in *buffer_data. This clearly only makes sense if the caller lives in the
- // same process as the callee, i.e. is the media_server, as the returned "buffer_data"
- // pointer is just that, a pointer into local address space.
- virtual status_t allocateSecureBuffer(
- node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) = 0;
-
- // Allocate an OMX buffer of size |allotedSize|. Use |params| as the backup buffer, which
- // may be larger.
- virtual status_t allocateBufferWithBackup(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize) = 0;
-
- virtual status_t freeBuffer(
- node_id node, OMX_U32 port_index, buffer_id buffer) = 0;
-
- enum {
- kFenceTimeoutMs = 1000
- };
- // Calls OMX_FillBuffer on buffer, and passes |fenceFd| to component if it supports
- // fences. Otherwise, it waits on |fenceFd| before calling OMX_FillBuffer.
- // Takes ownership of |fenceFd| even if this call fails.
- virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd = -1) = 0;
-
- // Calls OMX_EmptyBuffer on buffer (after updating buffer header with |range_offset|,
- // |range_length|, |flags| and |timestamp|). Passes |fenceFd| to component if it
- // supports fences. Otherwise, it waits on |fenceFd| before calling OMX_EmptyBuffer.
- // Takes ownership of |fenceFd| even if this call fails.
- virtual status_t emptyBuffer(
- node_id node,
- buffer_id buffer,
- OMX_U32 range_offset, OMX_U32 range_length,
- OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) = 0;
-
- virtual status_t getExtensionIndex(
- node_id node,
- const char *parameter_name,
- OMX_INDEXTYPE *index) = 0;
-
- enum InternalOptionType {
- INTERNAL_OPTION_SUSPEND, // data is a bool
- INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY, // data is an int64_t
- INTERNAL_OPTION_MAX_TIMESTAMP_GAP, // data is int64_t
- INTERNAL_OPTION_MAX_FPS, // data is float
- INTERNAL_OPTION_START_TIME, // data is an int64_t
- INTERNAL_OPTION_TIME_LAPSE, // data is an int64_t[2]
- INTERNAL_OPTION_COLOR_ASPECTS, // data is ColorAspects
- INTERNAL_OPTION_TIME_OFFSET, // data is an int64_t
- };
- virtual status_t setInternalOption(
- node_id node,
- OMX_U32 port_index,
- InternalOptionType type,
- const void *data,
- size_t size) = 0;
-};
-
-struct omx_message {
- enum {
- EVENT,
- EMPTY_BUFFER_DONE,
- FILL_BUFFER_DONE,
- FRAME_RENDERED,
- } type;
-
- IOMX::node_id node;
- int fenceFd; // used for EMPTY_BUFFER_DONE and FILL_BUFFER_DONE; client must close this
-
- union {
- // if type == EVENT
- struct {
- OMX_EVENTTYPE event;
- OMX_U32 data1;
- OMX_U32 data2;
- } event_data;
-
- // if type == EMPTY_BUFFER_DONE
- struct {
- IOMX::buffer_id buffer;
- } buffer_data;
-
- // if type == FILL_BUFFER_DONE
- struct {
- IOMX::buffer_id buffer;
- OMX_U32 range_offset;
- OMX_U32 range_length;
- OMX_U32 flags;
- OMX_TICKS timestamp;
- } extended_buffer_data;
-
- // if type == FRAME_RENDERED
- struct {
- OMX_TICKS timestamp;
- OMX_S64 nanoTime;
- } render_data;
- } u;
-};
-
-class IOMXObserver : public IInterface {
-public:
- DECLARE_META_INTERFACE(OMXObserver);
-
- // Handle (list of) messages.
- virtual void onMessages(const std::list<omx_message> &messages) = 0;
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-class BnOMX : public BnInterface<IOMX> {
-public:
- virtual status_t onTransact(
- uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0);
-
-protected:
- // check if the codec is secure.
- virtual bool isSecure(IOMX::node_id /*node*/) {
- return false;
- }
-};
-
-class BnOMXObserver : public BnInterface<IOMXObserver> {
-public:
- virtual status_t onTransact(
- uint32_t code, const Parcel &data, Parcel *reply,
- uint32_t flags = 0);
-};
-
-struct CodecProfileLevel {
- OMX_U32 mProfile;
- OMX_U32 mLevel;
-};
-
-inline static const char *asString(MetadataBufferType i, const char *def = "??") {
- using namespace android;
- switch (i) {
- case kMetadataBufferTypeCameraSource: return "CameraSource";
- case kMetadataBufferTypeGrallocSource: return "GrallocSource";
- case kMetadataBufferTypeANWBuffer: return "ANWBuffer";
- case kMetadataBufferTypeNativeHandleSource: return "NativeHandleSource";
- case kMetadataBufferTypeInvalid: return "Invalid";
- default: return def;
- }
-}
-
-} // namespace android
-
-#endif // ANDROID_IOMX_H_
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
new file mode 120000
index 0000000..6d5b375
--- /dev/null
+++ b/include/media/IOMX.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IOMX.h
\ No newline at end of file
diff --git a/include/media/IRemoteDisplay.h b/include/media/IRemoteDisplay.h
new file mode 120000
index 0000000..4b0cf10
--- /dev/null
+++ b/include/media/IRemoteDisplay.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IRemoteDisplay.h
\ No newline at end of file
diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h
new file mode 120000
index 0000000..f29a2ee
--- /dev/null
+++ b/include/media/IRemoteDisplayClient.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IRemoteDisplayClient.h
\ No newline at end of file
diff --git a/include/media/IResourceManagerClient.h b/include/media/IResourceManagerClient.h
new file mode 120000
index 0000000..100af9b
--- /dev/null
+++ b/include/media/IResourceManagerClient.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IResourceManagerClient.h
\ No newline at end of file
diff --git a/include/media/IResourceManagerService.h b/include/media/IResourceManagerService.h
new file mode 120000
index 0000000..9b389c6
--- /dev/null
+++ b/include/media/IResourceManagerService.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IResourceManagerService.h
\ No newline at end of file
diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h
new file mode 120000
index 0000000..4943af9
--- /dev/null
+++ b/include/media/IStreamSource.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/IStreamSource.h
\ No newline at end of file
diff --git a/include/media/Interpolator.h b/include/media/Interpolator.h
new file mode 100644
index 0000000..703cf77
--- /dev/null
+++ b/include/media/Interpolator.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_INTERPOLATOR_H
+#define ANDROID_INTERPOLATOR_H
+
+#include <map>
+#include <sstream>
+#include <unordered_map>
+
+#include <binder/Parcel.h>
+#include <utils/RefBase.h>
+
+#pragma push_macro("LOG_TAG")
+#undef LOG_TAG
+#define LOG_TAG "Interpolator"
+
+namespace android {
+
+/*
+ * A general purpose spline interpolator class which takes a set of points
+ * and performs interpolation. This is used for the VolumeShaper class.
+ */
+
+template <typename S, typename T>
+class Interpolator : public std::map<S, T> {
+public:
+ // Polynomial spline interpolators
+ // Extend only at the end of enum, as this must match order in VolumeShapers.java.
+ enum InterpolatorType : int32_t {
+ INTERPOLATOR_TYPE_STEP, // Not continuous
+ INTERPOLATOR_TYPE_LINEAR, // C0
+ INTERPOLATOR_TYPE_CUBIC, // C1
+ INTERPOLATOR_TYPE_CUBIC_MONOTONIC, // C1 (to provide locally monotonic curves)
+ // INTERPOLATOR_TYPE_CUBIC_C2, // TODO - requires global computation / cache
+ };
+
+ explicit Interpolator(
+ InterpolatorType interpolatorType = INTERPOLATOR_TYPE_LINEAR,
+ bool cache = true)
+ : mCache(cache)
+ , mFirstSlope(0)
+ , mLastSlope(0) {
+ setInterpolatorType(interpolatorType);
+ }
+
+ std::pair<S, T> first() const {
+ return *this->begin();
+ }
+
+ std::pair<S, T> last() const {
+ return *this->rbegin();
+ }
+
+ // find the corresponding Y point from a X point.
+ T findY(S x) { // logically const, but modifies cache
+ auto high = this->lower_bound(x);
+ // greater than last point
+ if (high == this->end()) {
+ return this->rbegin()->second;
+ }
+ // at or before first point
+ if (high == this->begin()) {
+ return high->second;
+ }
+ // go lower.
+ auto low = high;
+ --low;
+
+ // now that we have two adjacent points:
+ switch (mInterpolatorType) {
+ case INTERPOLATOR_TYPE_STEP:
+ return high->first == x ? high->second : low->second;
+ case INTERPOLATOR_TYPE_LINEAR:
+ return ((high->first - x) * low->second + (x - low->first) * high->second)
+ / (high->first - low->first);
+ case INTERPOLATOR_TYPE_CUBIC:
+ case INTERPOLATOR_TYPE_CUBIC_MONOTONIC:
+ default: {
+ // See https://en.wikipedia.org/wiki/Cubic_Hermite_spline
+
+ const S interval = high->first - low->first;
+
+ // check to see if we've cached the polynomial coefficients
+ if (mMemo.count(low->first) != 0) {
+ const S t = (x - low->first) / interval;
+ const S t2 = t * t;
+ const auto &memo = mMemo[low->first];
+ return low->second + std::get<0>(memo) * t
+ + (std::get<1>(memo) + std::get<2>(memo) * t) * t2;
+ }
+
+ // find the neighboring points (low2 < low < high < high2)
+ auto low2 = this->end();
+ if (low != this->begin()) {
+ low2 = low;
+ --low2; // decrementing this->begin() is undefined
+ }
+ auto high2 = high;
+ ++high2;
+
+ // you could have catmullRom with monotonic or
+ // non catmullRom (finite difference) with regular cubic;
+ // the choices here minimize computation.
+ bool monotonic, catmullRom;
+ if (mInterpolatorType == INTERPOLATOR_TYPE_CUBIC_MONOTONIC) {
+ monotonic = true;
+ catmullRom = false;
+ } else {
+ monotonic = false;
+ catmullRom = true;
+ }
+
+ // secants are only needed for finite difference splines or
+ // monotonic computation.
+ // we use lazy computation here - if we precompute in
+ // a single pass, duplicate secant computations may be avoided.
+ S sec, sec0, sec1;
+ if (!catmullRom || monotonic) {
+ sec = (high->second - low->second) / interval;
+ sec0 = low2 != this->end()
+ ? (low->second - low2->second) / (low->first - low2->first)
+ : mFirstSlope;
+ sec1 = high2 != this->end()
+ ? (high2->second - high->second) / (high2->first - high->first)
+ : mLastSlope;
+ }
+
+ // compute the tangent slopes at the control points
+ S m0, m1;
+ if (catmullRom) {
+ // Catmull-Rom spline
+ m0 = low2 != this->end()
+ ? (high->second - low2->second) / (high->first - low2->first)
+ : mFirstSlope;
+
+ m1 = high2 != this->end()
+ ? (high2->second - low->second) / (high2->first - low->first)
+ : mLastSlope;
+ } else {
+ // finite difference spline
+ m0 = (sec0 + sec) * 0.5f;
+ m1 = (sec1 + sec) * 0.5f;
+ }
+
+ if (monotonic) {
+ // https://en.wikipedia.org/wiki/Monotone_cubic_interpolation
+ // A sufficient condition for Fritsch–Carlson monotonicity is constraining
+ // (1) the normalized slopes to be within the circle of radius 3, or
+ // (2) the normalized slopes to be within the square of radius 3.
+ // Condition (2) is more generous and easier to compute.
+ const S maxSlope = 3 * sec;
+ m0 = constrainSlope(m0, maxSlope);
+ m1 = constrainSlope(m1, maxSlope);
+
+ m0 = constrainSlope(m0, 3 * sec0);
+ m1 = constrainSlope(m1, 3 * sec1);
+ }
+
+ const S t = (x - low->first) / interval;
+ const S t2 = t * t;
+ if (mCache) {
+ // convert to cubic polynomial coefficients and compute
+ m0 *= interval;
+ m1 *= interval;
+ const T dy = high->second - low->second;
+ const S c0 = low->second;
+ const S c1 = m0;
+ const S c2 = 3 * dy - 2 * m0 - m1;
+ const S c3 = m0 + m1 - 2 * dy;
+ mMemo[low->first] = std::make_tuple(c1, c2, c3);
+ return c0 + c1 * t + (c2 + c3 * t) * t2;
+ } else {
+ // classic Hermite interpolation
+ const S t3 = t2 * t;
+ const S h00 = 2 * t3 - 3 * t2 + 1;
+ const S h10 = t3 - 2 * t2 + t ;
+ const S h01 = -2 * t3 + 3 * t2 ;
+ const S h11 = t3 - t2 ;
+ return h00 * low->second + (h10 * m0 + h11 * m1) * interval + h01 * high->second;
+ }
+ } // default
+ }
+ }
+
+ InterpolatorType getInterpolatorType() const {
+ return mInterpolatorType;
+ }
+
+ status_t setInterpolatorType(InterpolatorType interpolatorType) {
+ switch (interpolatorType) {
+ case INTERPOLATOR_TYPE_STEP: // Not continuous
+ case INTERPOLATOR_TYPE_LINEAR: // C0
+ case INTERPOLATOR_TYPE_CUBIC: // C1
+ case INTERPOLATOR_TYPE_CUBIC_MONOTONIC: // C1 + other constraints
+ // case INTERPOLATOR_TYPE_CUBIC_C2:
+ mInterpolatorType = interpolatorType;
+ return NO_ERROR;
+ default:
+ ALOGE("invalid interpolatorType: %d", interpolatorType);
+ return BAD_VALUE;
+ }
+ }
+
+ T getFirstSlope() const {
+ return mFirstSlope;
+ }
+
+ void setFirstSlope(T slope) {
+ mFirstSlope = slope;
+ }
+
+ T getLastSlope() const {
+ return mLastSlope;
+ }
+
+ void setLastSlope(T slope) {
+ mLastSlope = slope;
+ }
+
+ void clearCache() {
+ mMemo.clear();
+ }
+
+ status_t writeToParcel(Parcel *parcel) const {
+ if (parcel == nullptr) {
+ return BAD_VALUE;
+ }
+ status_t res = parcel->writeInt32(mInterpolatorType)
+ ?: parcel->writeFloat(mFirstSlope)
+ ?: parcel->writeFloat(mLastSlope)
+ ?: parcel->writeUint32((uint32_t)this->size()); // silent truncation
+ if (res != NO_ERROR) {
+ return res;
+ }
+ for (const auto &pt : *this) {
+ res = parcel->writeFloat(pt.first)
+ ?: parcel->writeFloat(pt.second);
+ if (res != NO_ERROR) {
+ return res;
+ }
+ }
+ return NO_ERROR;
+ }
+
+ status_t readFromParcel(const Parcel &parcel) {
+ this->clear();
+ int32_t type;
+ uint32_t size;
+ status_t res = parcel.readInt32(&type)
+ ?: parcel.readFloat(&mFirstSlope)
+ ?: parcel.readFloat(&mLastSlope)
+ ?: parcel.readUint32(&size)
+ ?: setInterpolatorType((InterpolatorType)type);
+ if (res != NO_ERROR) {
+ return res;
+ }
+ // Note: We don't need to check size is within some bounds as
+ // the Parcel read will fail if size is incorrectly specified too large.
+ float lastx;
+ for (uint32_t i = 0; i < size; ++i) {
+ float x, y;
+ res = parcel.readFloat(&x)
+ ?: parcel.readFloat(&y);
+ if (res != NO_ERROR) {
+ return res;
+ }
+ if ((i > 0 && !(x > lastx)) /* handle nan */
+ || y != y /* handle nan */) {
+ // This is a std::map object which imposes sorted order
+ // automatically on emplace.
+ // Nevertheless for reading from a Parcel,
+ // we require that the points be specified monotonic in x.
+ return BAD_VALUE;
+ }
+ this->emplace(x, y);
+ lastx = x;
+ }
+ return NO_ERROR;
+ }
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << "Interpolator{mInterpolatorType=" << static_cast<int32_t>(mInterpolatorType);
+ ss << ", mFirstSlope=" << mFirstSlope;
+ ss << ", mLastSlope=" << mLastSlope;
+ ss << ", {";
+ bool first = true;
+ for (const auto &pt : *this) {
+ if (first) {
+ first = false;
+ ss << "{";
+ } else {
+ ss << ", {";
+ }
+ ss << pt.first << ", " << pt.second << "}";
+ }
+ ss << "}}";
+ return ss.str();
+ }
+
+private:
+ static S constrainSlope(S slope, S maxSlope) {
+ if (maxSlope > 0) {
+ slope = std::min(slope, maxSlope);
+ slope = std::max(slope, S(0)); // not globally monotonic
+ } else {
+ slope = std::max(slope, maxSlope);
+ slope = std::min(slope, S(0)); // not globally monotonic
+ }
+ return slope;
+ }
+
+ InterpolatorType mInterpolatorType;
+ bool mCache; // whether we cache spline coefficient computation
+
+ // for cubic interpolation, the boundary conditions in slope.
+ S mFirstSlope;
+ S mLastSlope;
+
+ // spline cubic polynomial coefficient cache
+ std::unordered_map<S, std::tuple<S /* c1 */, S /* c2 */, S /* c3 */>> mMemo;
+}; // Interpolator
+
+} // namespace android
+
+#pragma pop_macro("LOG_TAG")
+
+#endif // ANDROID_INTERPOLATOR_H
diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h
new file mode 120000
index 0000000..5483fda
--- /dev/null
+++ b/include/media/JetPlayer.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/JetPlayer.h
\ No newline at end of file
diff --git a/include/media/LinearMap.h b/include/media/LinearMap.h
new file mode 120000
index 0000000..30d4ca8
--- /dev/null
+++ b/include/media/LinearMap.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/LinearMap.h
\ No newline at end of file
diff --git a/include/media/MediaAnalyticsItem.h b/include/media/MediaAnalyticsItem.h
new file mode 120000
index 0000000..e8124e0
--- /dev/null
+++ b/include/media/MediaAnalyticsItem.h
@@ -0,0 +1 @@
+../../media/libmediametrics/include/MediaAnalyticsItem.h
\ No newline at end of file
diff --git a/include/media/MediaCasDefs.h b/include/media/MediaCasDefs.h
new file mode 100644
index 0000000..8c5a967
--- /dev/null
+++ b/include/media/MediaCasDefs.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CAS_DEFS_H_
+#define MEDIA_CAS_DEFS_H_
+
+#include <binder/Parcel.h>
+#include <media/cas/CasAPI.h>
+#include <media/cas/DescramblerAPI.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+class IMemory;
+namespace media {
+
+namespace MediaCas {
+class ParcelableCasData : public CasData,
+ public Parcelable {
+public:
+ ParcelableCasData() {}
+ ParcelableCasData(const uint8_t *data, size_t size) :
+ CasData(data, data + size) {}
+ virtual ~ParcelableCasData() {}
+ status_t readFromParcel(const Parcel* parcel) override;
+ status_t writeToParcel(Parcel* parcel) const override;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(ParcelableCasData);
+};
+
+class ParcelableCasPluginDescriptor : public Parcelable {
+public:
+ ParcelableCasPluginDescriptor(int32_t CA_system_id, const char *name)
+ : mCASystemId(CA_system_id), mName(name) {}
+
+ ParcelableCasPluginDescriptor() : mCASystemId(0) {}
+
+ ParcelableCasPluginDescriptor(ParcelableCasPluginDescriptor&& desc) = default;
+
+ virtual ~ParcelableCasPluginDescriptor() {}
+
+ status_t readFromParcel(const Parcel* parcel) override;
+ status_t writeToParcel(Parcel* parcel) const override;
+
+private:
+ int32_t mCASystemId;
+ String16 mName;
+ DISALLOW_EVIL_CONSTRUCTORS(ParcelableCasPluginDescriptor);
+};
+}
+
+namespace MediaDescrambler {
+class DescrambleInfo : public Parcelable {
+public:
+ enum DestinationType {
+ kDestinationTypeVmPointer, // non-secure
+ kDestinationTypeNativeHandle // secure
+ };
+
+ DestinationType dstType;
+ DescramblerPlugin::ScramblingControl scramblingControl;
+ size_t numSubSamples;
+ DescramblerPlugin::SubSample *subSamples;
+ sp<IMemory> srcMem;
+ int32_t srcOffset;
+ void *dstPtr;
+ int32_t dstOffset;
+
+ DescrambleInfo();
+ virtual ~DescrambleInfo();
+ status_t readFromParcel(const Parcel* parcel) override;
+ status_t writeToParcel(Parcel* parcel) const override;
+
+private:
+
+ DISALLOW_EVIL_CONSTRUCTORS(DescrambleInfo);
+};
+}
+
+} // namespace media
+} // namespace android
+
+
+#endif // MEDIA_CAS_DEFS_H_
diff --git a/include/media/MediaCodecBuffer.h b/include/media/MediaCodecBuffer.h
new file mode 120000
index 0000000..8c9aa76
--- /dev/null
+++ b/include/media/MediaCodecBuffer.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MediaCodecBuffer.h
\ No newline at end of file
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
new file mode 120000
index 0000000..ff44ce4
--- /dev/null
+++ b/include/media/MediaCodecInfo.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MediaCodecInfo.h
\ No newline at end of file
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
deleted file mode 100644
index 5f2a32d..0000000
--- a/include/media/MediaDefs.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_DEFS_H_
-
-#define MEDIA_DEFS_H_
-
-namespace android {
-
-extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
-
-extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
-extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
-extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
-extern const char *MEDIA_MIMETYPE_VIDEO_H263;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
-extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
-extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
-
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG; // layer III
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
-extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
-extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
-extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
-extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
-extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
-extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
-extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
-extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
-extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
-
-extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
-extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
-extern const char *MEDIA_MIMETYPE_TEXT_VTT;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
-extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
-
-// These are values exported to JAVA API that need to be in sync with
-// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
-// they are not defined in frameworks/av, so defining them here.
-enum AudioEncoding {
- kAudioEncodingPcm16bit = 2,
- kAudioEncodingPcm8bit = 3,
- kAudioEncodingPcmFloat = 4,
-};
-
-} // namespace android
-
-#endif // MEDIA_DEFS_H_
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
new file mode 120000
index 0000000..9850603
--- /dev/null
+++ b/include/media/MediaDefs.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MediaDefs.h
\ No newline at end of file
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
new file mode 120000
index 0000000..1c53511
--- /dev/null
+++ b/include/media/MediaMetadataRetrieverInterface.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MediaMetadataRetrieverInterface.h
\ No newline at end of file
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
deleted file mode 100644
index 4977efd..0000000
--- a/include/media/MediaPlayerInterface.h
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIAPLAYERINTERFACE_H
-#define ANDROID_MEDIAPLAYERINTERFACE_H
-
-#ifdef __cplusplus
-
-#include <sys/types.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/RefBase.h>
-
-#include <media/mediaplayer.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/AudioSystem.h>
-#include <media/AudioTimestamp.h>
-#include <media/AVSyncSettings.h>
-#include <media/Metadata.h>
-
-// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
-// global, and not in android::
-struct sockaddr_in;
-
-namespace android {
-
-class DataSource;
-class Parcel;
-class Surface;
-class IGraphicBufferProducer;
-
-template<typename T> class SortedVector;
-
-enum player_type {
- STAGEFRIGHT_PLAYER = 3,
- NU_PLAYER = 4,
- // Test players are available only in the 'test' and 'eng' builds.
- // The shared library with the test player is passed passed as an
- // argument to the 'test:' url in the setDataSource call.
- TEST_PLAYER = 5,
-};
-
-
-#define DEFAULT_AUDIOSINK_BUFFERCOUNT 4
-#define DEFAULT_AUDIOSINK_BUFFERSIZE 1200
-#define DEFAULT_AUDIOSINK_SAMPLERATE 44100
-
-// when the channel mask isn't known, use the channel count to derive a mask in AudioSink::open()
-#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
-
-// duration below which we do not allow deep audio buffering
-#define AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US 5000000
-
-// callback mechanism for passing messages to MediaPlayer object
-typedef void (*notify_callback_f)(void* cookie,
- int msg, int ext1, int ext2, const Parcel *obj);
-
-// abstract base class - use MediaPlayerInterface
-class MediaPlayerBase : public RefBase
-{
-public:
- // AudioSink: abstraction layer for audio output
- class AudioSink : public RefBase {
- public:
- enum cb_event_t {
- CB_EVENT_FILL_BUFFER, // Request to write more data to buffer.
- CB_EVENT_STREAM_END, // Sent after all the buffers queued in AF and HW are played
- // back (after stop is called)
- CB_EVENT_TEAR_DOWN // The AudioTrack was invalidated due to use case change:
- // Need to re-evaluate offloading options
- };
-
- // Callback returns the number of bytes actually written to the buffer.
- typedef size_t (*AudioCallback)(
- AudioSink *audioSink, void *buffer, size_t size, void *cookie,
- cb_event_t event);
-
- virtual ~AudioSink() {}
- virtual bool ready() const = 0; // audio output is open and ready
- virtual ssize_t bufferSize() const = 0;
- virtual ssize_t frameCount() const = 0;
- virtual ssize_t channelCount() const = 0;
- virtual ssize_t frameSize() const = 0;
- virtual uint32_t latency() const = 0;
- virtual float msecsPerFrame() const = 0;
- virtual status_t getPosition(uint32_t *position) const = 0;
- virtual status_t getTimestamp(AudioTimestamp &ts) const = 0;
- virtual int64_t getPlayedOutDurationUs(int64_t nowUs) const = 0;
- virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0;
- virtual audio_session_t getSessionId() const = 0;
- virtual audio_stream_type_t getAudioStreamType() const = 0;
- virtual uint32_t getSampleRate() const = 0;
- virtual int64_t getBufferDurationInUs() const = 0;
-
- // If no callback is specified, use the "write" API below to submit
- // audio data.
- virtual status_t open(
- uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
- audio_format_t format=AUDIO_FORMAT_PCM_16_BIT,
- int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
- AudioCallback cb = NULL,
- void *cookie = NULL,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL,
- bool doNotReconnect = false,
- uint32_t suggestedFrameCount = 0) = 0;
-
- virtual status_t start() = 0;
-
- /* Input parameter |size| is in byte units stored in |buffer|.
- * Data is copied over and actual number of bytes written (>= 0)
- * is returned, or no data is copied and a negative status code
- * is returned (even when |blocking| is true).
- * When |blocking| is false, AudioSink will immediately return after
- * part of or full |buffer| is copied over.
- * When |blocking| is true, AudioSink will wait to copy the entire
- * buffer, unless an error occurs or the copy operation is
- * prematurely stopped.
- */
- virtual ssize_t write(const void* buffer, size_t size, bool blocking = true) = 0;
-
- virtual void stop() = 0;
- virtual void flush() = 0;
- virtual void pause() = 0;
- virtual void close() = 0;
-
- virtual status_t setPlaybackRate(const AudioPlaybackRate& rate) = 0;
- virtual status_t getPlaybackRate(AudioPlaybackRate* rate /* nonnull */) = 0;
- virtual bool needsTrailingPadding() { return true; }
-
- virtual status_t setParameters(const String8& /* keyValuePairs */) { return NO_ERROR; }
- virtual String8 getParameters(const String8& /* keys */) { return String8::empty(); }
- };
-
- MediaPlayerBase() : mCookie(0), mNotify(0) {}
- virtual ~MediaPlayerBase() {}
- virtual status_t initCheck() = 0;
- virtual bool hardwareOutput() = 0;
-
- virtual status_t setUID(uid_t /* uid */) {
- return INVALID_OPERATION;
- }
-
- virtual status_t setDataSource(
- const sp<IMediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8> *headers = NULL) = 0;
-
- virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
-
- virtual status_t setDataSource(const sp<IStreamSource>& /* source */) {
- return INVALID_OPERATION;
- }
-
- virtual status_t setDataSource(const sp<DataSource>& /* source */) {
- return INVALID_OPERATION;
- }
-
- // pass the buffered IGraphicBufferProducer to the media player service
- virtual status_t setVideoSurfaceTexture(
- const sp<IGraphicBufferProducer>& bufferProducer) = 0;
-
- virtual status_t prepare() = 0;
- virtual status_t prepareAsync() = 0;
- virtual status_t start() = 0;
- virtual status_t stop() = 0;
- virtual status_t pause() = 0;
- virtual bool isPlaying() = 0;
- virtual status_t setPlaybackSettings(const AudioPlaybackRate& rate) {
- // by default, players only support setting rate to the default
- if (!isAudioPlaybackRateEqual(rate, AUDIO_PLAYBACK_RATE_DEFAULT)) {
- return BAD_VALUE;
- }
- return OK;
- }
- virtual status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) {
- *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
- return OK;
- }
- virtual status_t setSyncSettings(const AVSyncSettings& sync, float /* videoFps */) {
- // By default, players only support setting sync source to default; all other sync
- // settings are ignored. There is no requirement for getters to return set values.
- if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
- return BAD_VALUE;
- }
- return OK;
- }
- virtual status_t getSyncSettings(
- AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */) {
- *sync = AVSyncSettings();
- *videoFps = -1.f;
- return OK;
- }
- virtual status_t seekTo(int msec) = 0;
- virtual status_t getCurrentPosition(int *msec) = 0;
- virtual status_t getDuration(int *msec) = 0;
- virtual status_t reset() = 0;
- virtual status_t setLooping(int loop) = 0;
- virtual player_type playerType() = 0;
- virtual status_t setParameter(int key, const Parcel &request) = 0;
- virtual status_t getParameter(int key, Parcel *reply) = 0;
-
- // default no-op implementation of optional extensions
- virtual status_t setRetransmitEndpoint(const struct sockaddr_in* /* endpoint */) {
- return INVALID_OPERATION;
- }
- virtual status_t getRetransmitEndpoint(struct sockaddr_in* /* endpoint */) {
- return INVALID_OPERATION;
- }
- virtual status_t setNextPlayer(const sp<MediaPlayerBase>& /* next */) {
- return OK;
- }
-
- // Invoke a generic method on the player by using opaque parcels
- // for the request and reply.
- //
- // @param request Parcel that is positioned at the start of the
- // data sent by the java layer.
- // @param[out] reply Parcel to hold the reply data. Cannot be null.
- // @return OK if the call was successful.
- virtual status_t invoke(const Parcel& request, Parcel *reply) = 0;
-
- // The Client in the MetadataPlayerService calls this method on
- // the native player to retrieve all or a subset of metadata.
- //
- // @param ids SortedList of metadata ID to be fetch. If empty, all
- // the known metadata should be returned.
- // @param[inout] records Parcel where the player appends its metadata.
- // @return OK if the call was successful.
- virtual status_t getMetadata(const media::Metadata::Filter& /* ids */,
- Parcel* /* records */) {
- return INVALID_OPERATION;
- };
-
- void setNotifyCallback(
- void* cookie, notify_callback_f notifyFunc) {
- Mutex::Autolock autoLock(mNotifyLock);
- mCookie = cookie; mNotify = notifyFunc;
- }
-
- void sendEvent(int msg, int ext1=0, int ext2=0,
- const Parcel *obj=NULL) {
- notify_callback_f notifyCB;
- void* cookie;
- {
- Mutex::Autolock autoLock(mNotifyLock);
- notifyCB = mNotify;
- cookie = mCookie;
- }
-
- if (notifyCB) notifyCB(cookie, msg, ext1, ext2, obj);
- }
-
- virtual status_t dump(int /* fd */, const Vector<String16>& /* args */) const {
- return INVALID_OPERATION;
- }
-
-private:
- friend class MediaPlayerService;
-
- Mutex mNotifyLock;
- void* mCookie;
- notify_callback_f mNotify;
-};
-
-// Implement this class for media players that use the AudioFlinger software mixer
-class MediaPlayerInterface : public MediaPlayerBase
-{
-public:
- virtual ~MediaPlayerInterface() { }
- virtual bool hardwareOutput() { return false; }
- virtual void setAudioSink(const sp<AudioSink>& audioSink) { mAudioSink = audioSink; }
-protected:
- sp<AudioSink> mAudioSink;
-};
-
-// Implement this class for media players that output audio directly to hardware
-class MediaPlayerHWInterface : public MediaPlayerBase
-{
-public:
- virtual ~MediaPlayerHWInterface() {}
- virtual bool hardwareOutput() { return true; }
- virtual status_t setVolume(float leftVolume, float rightVolume) = 0;
- virtual status_t setAudioStreamType(audio_stream_type_t streamType) = 0;
-};
-
-}; // namespace android
-
-#endif // __cplusplus
-
-
-#endif // ANDROID_MEDIAPLAYERINTERFACE_H
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
new file mode 120000
index 0000000..9b2e1c7
--- /dev/null
+++ b/include/media/MediaPlayerInterface.h
@@ -0,0 +1 @@
+../../media/libmediaplayerservice/include/MediaPlayerInterface.h
\ No newline at end of file
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
deleted file mode 100644
index e02918f..0000000
--- a/include/media/MediaProfiles.h
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
- **
- ** Copyright 2010, The Android Open Source Project.
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-#ifndef ANDROID_MEDIAPROFILES_H
-#define ANDROID_MEDIAPROFILES_H
-
-#include <utils/threads.h>
-#include <media/mediarecorder.h>
-
-namespace android {
-
-enum camcorder_quality {
- CAMCORDER_QUALITY_LIST_START = 0,
- CAMCORDER_QUALITY_LOW = 0,
- CAMCORDER_QUALITY_HIGH = 1,
- CAMCORDER_QUALITY_QCIF = 2,
- CAMCORDER_QUALITY_CIF = 3,
- CAMCORDER_QUALITY_480P = 4,
- CAMCORDER_QUALITY_720P = 5,
- CAMCORDER_QUALITY_1080P = 6,
- CAMCORDER_QUALITY_QVGA = 7,
- CAMCORDER_QUALITY_2160P = 8,
- CAMCORDER_QUALITY_LIST_END = 8,
-
- CAMCORDER_QUALITY_TIME_LAPSE_LIST_START = 1000,
- CAMCORDER_QUALITY_TIME_LAPSE_LOW = 1000,
- CAMCORDER_QUALITY_TIME_LAPSE_HIGH = 1001,
- CAMCORDER_QUALITY_TIME_LAPSE_QCIF = 1002,
- CAMCORDER_QUALITY_TIME_LAPSE_CIF = 1003,
- CAMCORDER_QUALITY_TIME_LAPSE_480P = 1004,
- CAMCORDER_QUALITY_TIME_LAPSE_720P = 1005,
- CAMCORDER_QUALITY_TIME_LAPSE_1080P = 1006,
- CAMCORDER_QUALITY_TIME_LAPSE_QVGA = 1007,
- CAMCORDER_QUALITY_TIME_LAPSE_2160P = 1008,
- CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1008,
-
- CAMCORDER_QUALITY_HIGH_SPEED_LIST_START = 2000,
- CAMCORDER_QUALITY_HIGH_SPEED_LOW = 2000,
- CAMCORDER_QUALITY_HIGH_SPEED_HIGH = 2001,
- CAMCORDER_QUALITY_HIGH_SPEED_480P = 2002,
- CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
- CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
- CAMCORDER_QUALITY_HIGH_SPEED_2160P = 2005,
- CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
-};
-
-enum video_decoder {
- VIDEO_DECODER_WMV,
-};
-
-enum audio_decoder {
- AUDIO_DECODER_WMA,
-};
-
-
-class MediaProfiles
-{
-public:
-
- /**
- * Returns the singleton instance for subsequence queries.
- * or NULL if error.
- */
- static MediaProfiles* getInstance();
-
- /**
- * Returns the value for the given param name for the given camera at
- * the given quality level, or -1 if error.
- *
- * Supported param name are:
- * duration - the recording duration.
- * file.format - output file format. see mediarecorder.h for details
- * vid.codec - video encoder. see mediarecorder.h for details.
- * aud.codec - audio encoder. see mediarecorder.h for details.
- * vid.width - video frame width
- * vid.height - video frame height
- * vid.fps - video frame rate
- * vid.bps - video bit rate
- * aud.bps - audio bit rate
- * aud.hz - audio sample rate
- * aud.ch - number of audio channels
- */
- int getCamcorderProfileParamByName(const char *name, int cameraId,
- camcorder_quality quality) const;
-
- /**
- * Returns true if a profile for the given camera at the given quality exists,
- * or false if not.
- */
- bool hasCamcorderProfile(int cameraId, camcorder_quality quality) const;
-
- /**
- * Returns the output file formats supported.
- */
- Vector<output_format> getOutputFileFormats() const;
-
- /**
- * Returns the video encoders supported.
- */
- Vector<video_encoder> getVideoEncoders() const;
-
- /**
- * Returns the value for the given param name for the given video encoder
- * returned from getVideoEncoderByIndex or -1 if error.
- *
- * Supported param name are:
- * enc.vid.width.min - min video frame width
- * enc.vid.width.max - max video frame width
- * enc.vid.height.min - min video frame height
- * enc.vid.height.max - max video frame height
- * enc.vid.bps.min - min bit rate in bits per second
- * enc.vid.bps.max - max bit rate in bits per second
- * enc.vid.fps.min - min frame rate in frames per second
- * enc.vid.fps.max - max frame rate in frames per second
- */
- int getVideoEncoderParamByName(const char *name, video_encoder codec) const;
-
- /**
- * Returns the audio encoders supported.
- */
- Vector<audio_encoder> getAudioEncoders() const;
-
- /**
- * Returns the value for the given param name for the given audio encoder
- * returned from getAudioEncoderByIndex or -1 if error.
- *
- * Supported param name are:
- * enc.aud.ch.min - min number of channels
- * enc.aud.ch.max - max number of channels
- * enc.aud.bps.min - min bit rate in bits per second
- * enc.aud.bps.max - max bit rate in bits per second
- * enc.aud.hz.min - min sample rate in samples per second
- * enc.aud.hz.max - max sample rate in samples per second
- */
- int getAudioEncoderParamByName(const char *name, audio_encoder codec) const;
-
- /**
- * Returns the video decoders supported.
- */
- Vector<video_decoder> getVideoDecoders() const;
-
- /**
- * Returns the audio decoders supported.
- */
- Vector<audio_decoder> getAudioDecoders() const;
-
- /**
- * Returns the number of image encoding quality levels supported.
- */
- Vector<int> getImageEncodingQualityLevels(int cameraId) const;
-
- /**
- * Returns the start time offset (in ms) for the given camera Id.
- * If the given camera Id does not exist, -1 will be returned.
- */
- int getStartTimeOffsetMs(int cameraId) const;
-
-private:
- enum {
- // Camcorder profiles (high/low) and timelapse profiles (high/low)
- kNumRequiredProfiles = 4,
- };
-
- MediaProfiles& operator=(const MediaProfiles&); // Don't call me
- MediaProfiles(const MediaProfiles&); // Don't call me
- MediaProfiles() {} // Dummy default constructor
- ~MediaProfiles(); // Don't delete me
-
- struct VideoCodec {
- VideoCodec(video_encoder codec, int bitRate, int frameWidth, int frameHeight, int frameRate)
- : mCodec(codec),
- mBitRate(bitRate),
- mFrameWidth(frameWidth),
- mFrameHeight(frameHeight),
- mFrameRate(frameRate) {}
-
- VideoCodec(const VideoCodec& copy) {
- mCodec = copy.mCodec;
- mBitRate = copy.mBitRate;
- mFrameWidth = copy.mFrameWidth;
- mFrameHeight = copy.mFrameHeight;
- mFrameRate = copy.mFrameRate;
- }
-
- ~VideoCodec() {}
-
- video_encoder mCodec;
- int mBitRate;
- int mFrameWidth;
- int mFrameHeight;
- int mFrameRate;
- };
-
- struct AudioCodec {
- AudioCodec(audio_encoder codec, int bitRate, int sampleRate, int channels)
- : mCodec(codec),
- mBitRate(bitRate),
- mSampleRate(sampleRate),
- mChannels(channels) {}
-
- AudioCodec(const AudioCodec& copy) {
- mCodec = copy.mCodec;
- mBitRate = copy.mBitRate;
- mSampleRate = copy.mSampleRate;
- mChannels = copy.mChannels;
- }
-
- ~AudioCodec() {}
-
- audio_encoder mCodec;
- int mBitRate;
- int mSampleRate;
- int mChannels;
- };
-
- struct CamcorderProfile {
- CamcorderProfile()
- : mCameraId(0),
- mFileFormat(OUTPUT_FORMAT_THREE_GPP),
- mQuality(CAMCORDER_QUALITY_HIGH),
- mDuration(0),
- mVideoCodec(0),
- mAudioCodec(0) {}
-
- CamcorderProfile(const CamcorderProfile& copy) {
- mCameraId = copy.mCameraId;
- mFileFormat = copy.mFileFormat;
- mQuality = copy.mQuality;
- mDuration = copy.mDuration;
- mVideoCodec = new VideoCodec(*copy.mVideoCodec);
- mAudioCodec = new AudioCodec(*copy.mAudioCodec);
- }
-
- ~CamcorderProfile() {
- delete mVideoCodec;
- delete mAudioCodec;
- }
-
- int mCameraId;
- output_format mFileFormat;
- camcorder_quality mQuality;
- int mDuration;
- VideoCodec *mVideoCodec;
- AudioCodec *mAudioCodec;
- };
-
- struct VideoEncoderCap {
- // Ugly constructor
- VideoEncoderCap(video_encoder codec,
- int minBitRate, int maxBitRate,
- int minFrameWidth, int maxFrameWidth,
- int minFrameHeight, int maxFrameHeight,
- int minFrameRate, int maxFrameRate)
- : mCodec(codec),
- mMinBitRate(minBitRate), mMaxBitRate(maxBitRate),
- mMinFrameWidth(minFrameWidth), mMaxFrameWidth(maxFrameWidth),
- mMinFrameHeight(minFrameHeight), mMaxFrameHeight(maxFrameHeight),
- mMinFrameRate(minFrameRate), mMaxFrameRate(maxFrameRate) {}
-
- ~VideoEncoderCap() {}
-
- video_encoder mCodec;
- int mMinBitRate, mMaxBitRate;
- int mMinFrameWidth, mMaxFrameWidth;
- int mMinFrameHeight, mMaxFrameHeight;
- int mMinFrameRate, mMaxFrameRate;
- };
-
- struct AudioEncoderCap {
- // Ugly constructor
- AudioEncoderCap(audio_encoder codec,
- int minBitRate, int maxBitRate,
- int minSampleRate, int maxSampleRate,
- int minChannels, int maxChannels)
- : mCodec(codec),
- mMinBitRate(minBitRate), mMaxBitRate(maxBitRate),
- mMinSampleRate(minSampleRate), mMaxSampleRate(maxSampleRate),
- mMinChannels(minChannels), mMaxChannels(maxChannels) {}
-
- ~AudioEncoderCap() {}
-
- audio_encoder mCodec;
- int mMinBitRate, mMaxBitRate;
- int mMinSampleRate, mMaxSampleRate;
- int mMinChannels, mMaxChannels;
- };
-
- struct VideoDecoderCap {
- VideoDecoderCap(video_decoder codec): mCodec(codec) {}
- ~VideoDecoderCap() {}
-
- video_decoder mCodec;
- };
-
- struct AudioDecoderCap {
- AudioDecoderCap(audio_decoder codec): mCodec(codec) {}
- ~AudioDecoderCap() {}
-
- audio_decoder mCodec;
- };
-
- struct NameToTagMap {
- const char* name;
- int tag;
- };
-
- struct ImageEncodingQualityLevels {
- int mCameraId;
- Vector<int> mLevels;
- };
-
- int getCamcorderProfileIndex(int cameraId, camcorder_quality quality) const;
- void initRequiredProfileRefs(const Vector<int>& cameraIds);
- int getRequiredProfileRefIndex(int cameraId);
-
- // Debug
- static void logVideoCodec(const VideoCodec& codec);
- static void logAudioCodec(const AudioCodec& codec);
- static void logVideoEncoderCap(const VideoEncoderCap& cap);
- static void logAudioEncoderCap(const AudioEncoderCap& cap);
- static void logVideoDecoderCap(const VideoDecoderCap& cap);
- static void logAudioDecoderCap(const AudioDecoderCap& cap);
-
- // If the xml configuration file does exist, use the settings
- // from the xml
- static MediaProfiles* createInstanceFromXmlFile(const char *xml);
- static output_format createEncoderOutputFileFormat(const char **atts);
- static VideoCodec* createVideoCodec(const char **atts, MediaProfiles *profiles);
- static AudioCodec* createAudioCodec(const char **atts, MediaProfiles *profiles);
- static AudioDecoderCap* createAudioDecoderCap(const char **atts);
- static VideoDecoderCap* createVideoDecoderCap(const char **atts);
- static VideoEncoderCap* createVideoEncoderCap(const char **atts);
- static AudioEncoderCap* createAudioEncoderCap(const char **atts);
-
- static CamcorderProfile* createCamcorderProfile(
- int cameraId, const char **atts, Vector<int>& cameraIds);
-
- static int getCameraId(const char **atts);
-
- void addStartTimeOffset(int cameraId, const char **atts);
-
- ImageEncodingQualityLevels* findImageEncodingQualityLevels(int cameraId) const;
- void addImageEncodingQualityLevel(int cameraId, const char** atts);
-
- // Customized element tag handler for parsing the xml configuration file.
- static void startElementHandler(void *userData, const char *name, const char **atts);
-
- // If the xml configuration file does not exist, use hard-coded values
- static MediaProfiles* createDefaultInstance();
-
- static CamcorderProfile *createDefaultCamcorderQcifProfile(camcorder_quality quality);
- static CamcorderProfile *createDefaultCamcorderCifProfile(camcorder_quality quality);
- static void createDefaultCamcorderLowProfiles(
- MediaProfiles::CamcorderProfile **lowProfile,
- MediaProfiles::CamcorderProfile **lowSpecificProfile);
- static void createDefaultCamcorderHighProfiles(
- MediaProfiles::CamcorderProfile **highProfile,
- MediaProfiles::CamcorderProfile **highSpecificProfile);
-
- static CamcorderProfile *createDefaultCamcorderTimeLapseQcifProfile(camcorder_quality quality);
- static CamcorderProfile *createDefaultCamcorderTimeLapse480pProfile(camcorder_quality quality);
- static void createDefaultCamcorderTimeLapseLowProfiles(
- MediaProfiles::CamcorderProfile **lowTimeLapseProfile,
- MediaProfiles::CamcorderProfile **lowSpecificTimeLapseProfile);
- static void createDefaultCamcorderTimeLapseHighProfiles(
- MediaProfiles::CamcorderProfile **highTimeLapseProfile,
- MediaProfiles::CamcorderProfile **highSpecificTimeLapseProfile);
-
- static void createDefaultCamcorderProfiles(MediaProfiles *profiles);
- static void createDefaultVideoEncoders(MediaProfiles *profiles);
- static void createDefaultAudioEncoders(MediaProfiles *profiles);
- static void createDefaultVideoDecoders(MediaProfiles *profiles);
- static void createDefaultAudioDecoders(MediaProfiles *profiles);
- static void createDefaultEncoderOutputFileFormats(MediaProfiles *profiles);
- static void createDefaultImageEncodingQualityLevels(MediaProfiles *profiles);
- static void createDefaultImageDecodingMaxMemory(MediaProfiles *profiles);
-
- static VideoEncoderCap* createDefaultH263VideoEncoderCap();
- static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
- static AudioEncoderCap* createDefaultAmrNBEncoderCap();
-
- static int findTagForName(const NameToTagMap *map, size_t nMappings, const char *name);
-
- /**
- * Check on existing profiles with the following criteria:
- * 1. Low quality profile must have the lowest video
- * resolution product (width x height)
- * 2. High quality profile must have the highest video
- * resolution product (width x height)
- *
- * and add required low/high quality camcorder/timelapse
- * profiles if they are not found. This allows to remove
- * duplicate profile definitions in the media_profiles.xml
- * file.
- */
- void checkAndAddRequiredProfilesIfNecessary();
-
-
- // Mappings from name (for instance, codec name) to enum value
- static const NameToTagMap sVideoEncoderNameMap[];
- static const NameToTagMap sAudioEncoderNameMap[];
- static const NameToTagMap sFileFormatMap[];
- static const NameToTagMap sVideoDecoderNameMap[];
- static const NameToTagMap sAudioDecoderNameMap[];
- static const NameToTagMap sCamcorderQualityNameMap[];
-
- static bool sIsInitialized;
- static MediaProfiles *sInstance;
- static Mutex sLock;
- int mCurrentCameraId;
-
- Vector<CamcorderProfile*> mCamcorderProfiles;
- Vector<AudioEncoderCap*> mAudioEncoders;
- Vector<VideoEncoderCap*> mVideoEncoders;
- Vector<AudioDecoderCap*> mAudioDecoders;
- Vector<VideoDecoderCap*> mVideoDecoders;
- Vector<output_format> mEncoderOutputFileFormats;
- Vector<ImageEncodingQualityLevels *> mImageEncodingQualityLevels;
- KeyedVector<int, int> mStartTimeOffsets;
-
- typedef struct {
- bool mHasRefProfile; // Refers to an existing profile
- int mRefProfileIndex; // Reference profile index
- int mResolutionProduct; // width x height
- } RequiredProfileRefInfo; // Required low and high profiles
-
- typedef struct {
- RequiredProfileRefInfo mRefs[kNumRequiredProfiles];
- int mCameraId;
- } RequiredProfiles;
-
- RequiredProfiles *mRequiredProfileRefs;
- Vector<int> mCameraIds;
-};
-
-}; // namespace android
-
-#endif // ANDROID_MEDIAPROFILES_H
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
new file mode 120000
index 0000000..651c6e6
--- /dev/null
+++ b/include/media/MediaProfiles.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MediaProfiles.h
\ No newline at end of file
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
deleted file mode 100644
index 42151ea..0000000
--- a/include/media/MediaRecorderBase.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_RECORDER_BASE_H_
-
-#define MEDIA_RECORDER_BASE_H_
-
-#include <media/mediarecorder.h>
-
-#include <system/audio.h>
-
-namespace android {
-
-class ICameraRecordingProxy;
-class Surface;
-class IGraphicBufferConsumer;
-class IGraphicBufferProducer;
-
-struct MediaRecorderBase {
- MediaRecorderBase(const String16 &opPackageName)
- : mOpPackageName(opPackageName) {}
- virtual ~MediaRecorderBase() {}
-
- virtual status_t init() = 0;
- virtual status_t setAudioSource(audio_source_t as) = 0;
- virtual status_t setVideoSource(video_source vs) = 0;
- virtual status_t setOutputFormat(output_format of) = 0;
- virtual status_t setAudioEncoder(audio_encoder ae) = 0;
- virtual status_t setVideoEncoder(video_encoder ve) = 0;
- virtual status_t setVideoSize(int width, int height) = 0;
- virtual status_t setVideoFrameRate(int frames_per_second) = 0;
- virtual status_t setCamera(const sp<hardware::ICamera>& camera,
- const sp<ICameraRecordingProxy>& proxy) = 0;
- virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
- virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
- virtual status_t setOutputFileAuxiliary(int /*fd*/) {return INVALID_OPERATION;}
- virtual status_t setParameters(const String8& params) = 0;
- virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0;
- virtual status_t setClientName(const String16& clientName) = 0;
- virtual status_t prepare() = 0;
- virtual status_t start() = 0;
- virtual status_t stop() = 0;
- virtual status_t pause() = 0;
- virtual status_t resume() = 0;
- virtual status_t close() = 0;
- virtual status_t reset() = 0;
- virtual status_t getMaxAmplitude(int *max) = 0;
- virtual status_t dump(int fd, const Vector<String16>& args) const = 0;
- virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
- virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0;
-
-
-protected:
- String16 mOpPackageName;
-
-private:
- MediaRecorderBase(const MediaRecorderBase &);
- MediaRecorderBase &operator=(const MediaRecorderBase &);
-};
-
-} // namespace android
-
-#endif // MEDIA_RECORDER_BASE_H_
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
new file mode 120000
index 0000000..e40f992
--- /dev/null
+++ b/include/media/MediaRecorderBase.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MediaRecorderBase.h
\ No newline at end of file
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
new file mode 120000
index 0000000..91346aa
--- /dev/null
+++ b/include/media/MediaResource.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MediaResource.h
\ No newline at end of file
diff --git a/include/media/MediaResourcePolicy.h b/include/media/MediaResourcePolicy.h
new file mode 120000
index 0000000..5d165ee
--- /dev/null
+++ b/include/media/MediaResourcePolicy.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MediaResourcePolicy.h
\ No newline at end of file
diff --git a/include/media/MemoryLeakTrackUtil.h b/include/media/MemoryLeakTrackUtil.h
new file mode 120000
index 0000000..504173e
--- /dev/null
+++ b/include/media/MemoryLeakTrackUtil.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MemoryLeakTrackUtil.h
\ No newline at end of file
diff --git a/include/media/Metadata.h b/include/media/Metadata.h
new file mode 120000
index 0000000..e421168
--- /dev/null
+++ b/include/media/Metadata.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/Metadata.h
\ No newline at end of file
diff --git a/include/media/MidiDeviceInfo.h b/include/media/MidiDeviceInfo.h
new file mode 120000
index 0000000..95da7cf
--- /dev/null
+++ b/include/media/MidiDeviceInfo.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MidiDeviceInfo.h
\ No newline at end of file
diff --git a/include/media/MidiIoWrapper.h b/include/media/MidiIoWrapper.h
new file mode 120000
index 0000000..786ec3d
--- /dev/null
+++ b/include/media/MidiIoWrapper.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/MidiIoWrapper.h
\ No newline at end of file
diff --git a/include/media/MmapStreamCallback.h b/include/media/MmapStreamCallback.h
new file mode 100644
index 0000000..8098e79
--- /dev/null
+++ b/include/media/MmapStreamCallback.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_MMAP_STREAM_CALLBACK_H
+#define ANDROID_AUDIO_MMAP_STREAM_CALLBACK_H
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+
+class MmapStreamCallback : public virtual RefBase {
+ public:
+
+ /**
+ * The mmap stream should be torn down because conditions that permitted its creation with
+ * the requested parameters have changed and do not allow it to operate with the requested
+ * constraints any more.
+ */
+ virtual void onTearDown() = 0;
+
+ /**
+ * The volume to be applied to the use case specified when opening the stream has changed
+ * \param[in] channels a channel mask containing all channels the volume should be applied to.
+ * \param[in] values the volume values to be applied to each channel. The size of the vector
+ * should correspond to the channel count retrieved with
+ * audio_channel_count_from_in_mask() or audio_channel_count_from_out_mask()
+ */
+ virtual void onVolumeChanged(audio_channel_mask_t channels, Vector<float> values) = 0;
+
+ /**
+ * The device the stream is routed to/from has changed
+ * \param[in] onRoutingChanged the unique device ID of the new device.
+ */
+ virtual void onRoutingChanged(audio_port_handle_t deviceId) = 0;
+
+ protected:
+ MmapStreamCallback() {}
+ virtual ~MmapStreamCallback() {}
+};
+
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_MMAP_STREAM_CALLBACK_H
diff --git a/include/media/MmapStreamInterface.h b/include/media/MmapStreamInterface.h
new file mode 100644
index 0000000..7dbc19e
--- /dev/null
+++ b/include/media/MmapStreamInterface.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_MMAP_STREAM_INTERFACE_H
+#define ANDROID_AUDIO_MMAP_STREAM_INTERFACE_H
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class MmapStreamCallback;
+
+class MmapStreamInterface : public virtual RefBase
+{
+ public:
+
+ /**
+ * Values for direction argument passed to openMmapStream()
+ */
+ typedef enum {
+ DIRECTION_OUTPUT = 0, /**< open a playback mmap stream */
+ DIRECTION_INPUT, /**< open a capture mmap stream */
+ } stream_direction_t;
+
+ class Client {
+ public:
+ uid_t clientUid;
+ pid_t clientPid;
+ String16 packageName;
+ };
+ /**
+ * Open a playback or capture stream in MMAP mode at the audio HAL.
+ *
+ * \note This method is implemented by AudioFlinger
+ *
+ * \param[in] direction open a playback or capture stream.
+ * \param[in] attr audio attributes defining the main use case for this stream
+ * \param[in,out] config audio parameters (sampling rate, format ...) for the stream.
+ * Requested parameters as input,
+ * Actual parameters as output
+ * \param[in] client a Client struct describing the first client using this stream.
+ * \param[in,out] deviceId audio device the stream should preferably be routed to/from
+ * Requested as input,
+ * Actual as output
+ * \param[in] callback the MmapStreamCallback interface used by AudioFlinger to notify
+ * condition changes affecting the stream operation
+ * \param[out] interface the MmapStreamInterface interface controlling the created stream
+ * \return OK if the stream was successfully created.
+ * NO_INIT if AudioFlinger is not properly initialized
+ * BAD_VALUE if the stream cannot be opened because of invalid arguments
+ * INVALID_OPERATION if the stream cannot be opened because of platform limitations
+ */
+ static status_t openMmapStream(stream_direction_t direction,
+ const audio_attributes_t *attr,
+ audio_config_base_t *config,
+ const Client& client,
+ audio_port_handle_t *deviceId,
+ const sp<MmapStreamCallback>& callback,
+ sp<MmapStreamInterface>& interface);
+
+ /**
+ * Retrieve information on the mmap buffer used for audio samples transfer.
+ * Must be called before any other method after opening the stream or entering standby.
+ *
+ * \param[in] min_size_frames minimum buffer size requested. The actual buffer
+ * size returned in struct audio_mmap_buffer_info can be larger.
+ * \param[out] info address at which the mmap buffer information should be returned.
+ *
+ * \return OK if the buffer was allocated.
+ * NO_INIT in case of initialization error
+ * BAD_VALUE if the requested buffer size is too large
+ * INVALID_OPERATION if called out of sequence (e.g. buffer already allocated)
+ */
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) = 0;
+
+ /**
+ * Read current read/write position in the mmap buffer with associated time stamp.
+ *
+ * \param[out] position address at which the mmap read/write position should be returned.
+ *
+ * \return OK if the position is successfully returned.
+ * NO_INIT in case of initialization error
+ * NOT_ENOUGH_DATA if the position cannot be retrieved
+ * INVALID_OPERATION if called before createMmapBuffer()
+ */
+ virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
+
+ /**
+ * Start a stream operating in mmap mode.
+ * createMmapBuffer() must be called before calling start()
+ *
+ * \param[in] client a Client struct describing the client starting on this stream.
+ * \param[out] handle unique handle for this instance. Used with stop().
+ * \return OK in case of success.
+ * NO_INIT in case of initialization error
+ * INVALID_OPERATION if called out of sequence
+ */
+ virtual status_t start(const Client& client, audio_port_handle_t *handle) = 0;
+
+ /**
+ * Stop a stream operating in mmap mode.
+ * Must be called after start()
+ *
+ * \param[in] handle unique handle allocated by start().
+ * \return OK in case of success.
+ * NO_INIT in case of initialization error
+ * INVALID_OPERATION if called out of sequence
+ */
+ virtual status_t stop(audio_port_handle_t handle) = 0;
+
+ /**
+ * Put a stream operating in mmap mode into standby.
+ * Must be called after createMmapBuffer(). Cannot be called if any client is active.
+ * It is recommended to place a mmap stream into standby as often as possible when no client is
+ * active to save power.
+ *
+ * \return OK in case of success.
+ * NO_INIT in case of initialization error
+ * INVALID_OPERATION if called out of sequence
+ */
+ virtual status_t standby() = 0;
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ MmapStreamInterface() {}
+
+ // The destructor automatically closes the stream.
+ virtual ~MmapStreamInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_MMAP_STREAM_INTERFACE_H
diff --git a/include/media/Modulo.h b/include/media/Modulo.h
new file mode 120000
index 0000000..989c4cb
--- /dev/null
+++ b/include/media/Modulo.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/Modulo.h
\ No newline at end of file
diff --git a/include/media/OMXBuffer.h b/include/media/OMXBuffer.h
new file mode 120000
index 0000000..00db207
--- /dev/null
+++ b/include/media/OMXBuffer.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/OMXBuffer.h
\ No newline at end of file
diff --git a/include/media/OMXFenceParcelable.h b/include/media/OMXFenceParcelable.h
new file mode 120000
index 0000000..c4c1b0a
--- /dev/null
+++ b/include/media/OMXFenceParcelable.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/OMXFenceParcelable.h
\ No newline at end of file
diff --git a/include/media/PluginLoader.h b/include/media/PluginLoader.h
new file mode 120000
index 0000000..9101735
--- /dev/null
+++ b/include/media/PluginLoader.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/PluginLoader.h
\ No newline at end of file
diff --git a/include/media/RecordBufferConverter.h b/include/media/RecordBufferConverter.h
new file mode 120000
index 0000000..2d7bc0c
--- /dev/null
+++ b/include/media/RecordBufferConverter.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/RecordBufferConverter.h
\ No newline at end of file
diff --git a/include/media/RingBuffer.h b/include/media/RingBuffer.h
new file mode 120000
index 0000000..9af28d5
--- /dev/null
+++ b/include/media/RingBuffer.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/RingBuffer.h
\ No newline at end of file
diff --git a/include/media/SharedLibrary.h b/include/media/SharedLibrary.h
new file mode 120000
index 0000000..9f8f5a4
--- /dev/null
+++ b/include/media/SharedLibrary.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/SharedLibrary.h
\ No newline at end of file
diff --git a/include/media/SingleStateQueue.h b/include/media/SingleStateQueue.h
new file mode 120000
index 0000000..619f6ee
--- /dev/null
+++ b/include/media/SingleStateQueue.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/SingleStateQueue.h
\ No newline at end of file
diff --git a/include/media/StringArray.h b/include/media/StringArray.h
new file mode 120000
index 0000000..616ce6c
--- /dev/null
+++ b/include/media/StringArray.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/StringArray.h
\ No newline at end of file
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
deleted file mode 100644
index 9fd5f61..0000000
--- a/include/media/ToneGenerator.h
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_TONEGENERATOR_H_
-#define ANDROID_TONEGENERATOR_H_
-
-#include <media/AudioSystem.h>
-#include <media/AudioTrack.h>
-#include <utils/Compat.h>
-#include <utils/KeyedVector.h>
-#include <utils/RefBase.h>
-#include <utils/threads.h>
-
-namespace android {
-
-class ToneGenerator {
-public:
-
- // List of all available tones
- // This enum must be kept consistant with constants in ToneGenerator JAVA class
- enum tone_type {
- // DTMF tones ITU-T Recommendation Q.23
- TONE_DTMF_0 = 0, // 0 key: 1336Hz, 941Hz
- TONE_DTMF_1, // 1 key: 1209Hz, 697Hz
- TONE_DTMF_2, // 2 key: 1336Hz, 697Hz
- TONE_DTMF_3, // 3 key: 1477Hz, 697Hz
- TONE_DTMF_4, // 4 key: 1209Hz, 770Hz
- TONE_DTMF_5, // 5 key: 1336Hz, 770Hz
- TONE_DTMF_6, // 6 key: 1477Hz, 770Hz
- TONE_DTMF_7, // 7 key: 1209Hz, 852Hz
- TONE_DTMF_8, // 8 key: 1336Hz, 852Hz
- TONE_DTMF_9, // 9 key: 1477Hz, 852Hz
- TONE_DTMF_S, // * key: 1209Hz, 941Hz
- TONE_DTMF_P, // # key: 1477Hz, 941Hz
- TONE_DTMF_A, // A key: 1633Hz, 697Hz
- TONE_DTMF_B, // B key: 1633Hz, 770Hz
- TONE_DTMF_C, // C key: 1633Hz, 852Hz
- TONE_DTMF_D, // D key: 1633Hz, 941Hz
- // Call supervisory tones: 3GPP TS 22.001 (CEPT)
- TONE_SUP_DIAL, // Dial tone: CEPT: 425Hz, continuous
- FIRST_SUP_TONE = TONE_SUP_DIAL,
- TONE_SUP_BUSY, // Busy tone, CEPT: 425Hz, 500ms ON, 500ms OFF...
- TONE_SUP_CONGESTION, // Congestion tone CEPT, JAPAN: 425Hz, 200ms ON, 200ms OFF...
- TONE_SUP_RADIO_ACK, // Radio path acknowlegment, CEPT, ANSI: 425Hz, 200ms ON
- TONE_SUP_RADIO_NOTAVAIL, // Radio path not available: 425Hz, 200ms ON, 200 OFF 3 bursts
- TONE_SUP_ERROR, // Error/Special info: 950Hz+1400Hz+1800Hz, 330ms ON, 1s OFF...
- TONE_SUP_CALL_WAITING, // Call Waiting CEPT,JAPAN: 425Hz, 200ms ON, 600ms OFF, 200ms ON, 3s OFF...
- TONE_SUP_RINGTONE, // Ring Tone CEPT, JAPAN: 425Hz, 1s ON, 4s OFF...
- LAST_SUP_TONE = TONE_SUP_RINGTONE,
- // Proprietary tones: 3GPP TS 31.111
- TONE_PROP_BEEP, // General beep: 400Hz+1200Hz, 35ms ON
- TONE_PROP_ACK, // Positive Acknowlgement: 1200Hz, 100ms ON, 100ms OFF 2 bursts
- TONE_PROP_NACK, // Negative Acknowlgement: 300Hz+400Hz+500Hz, 400ms ON
- TONE_PROP_PROMPT, // Prompt tone: 400Hz+1200Hz, 200ms ON
- TONE_PROP_BEEP2, // General double beep: 400Hz+1200Hz, 35ms ON, 200ms OFF, 35ms on
- // Additional call supervisory tones: specified by IS-95 only
- TONE_SUP_INTERCEPT, // Intercept tone: alternating 440 Hz and 620 Hz tones, each on for 250 ms.
- TONE_SUP_INTERCEPT_ABBREV, // Abbreviated intercept: intercept tone limited to 4 seconds
- TONE_SUP_CONGESTION_ABBREV, // Abbreviated congestion: congestion tone limited to 4 seconds
- TONE_SUP_CONFIRM, // Confirm tone: a 350 Hz tone added to a 440 Hz tone repeated 3 times in a 100 ms on, 100 ms off cycle.
- TONE_SUP_PIP, // Pip tone: four bursts of 480 Hz tone (0.1 s on, 0.1 s off).
-
- // CDMA Tones
- TONE_CDMA_DIAL_TONE_LITE,
- TONE_CDMA_NETWORK_USA_RINGBACK,
- TONE_CDMA_INTERCEPT,
- TONE_CDMA_ABBR_INTERCEPT,
- TONE_CDMA_REORDER,
- TONE_CDMA_ABBR_REORDER,
- TONE_CDMA_NETWORK_BUSY,
- TONE_CDMA_CONFIRM,
- TONE_CDMA_ANSWER,
- TONE_CDMA_NETWORK_CALLWAITING,
- TONE_CDMA_PIP,
-
- // ISDN
- TONE_CDMA_CALL_SIGNAL_ISDN_NORMAL, // ISDN Alert Normal
- TONE_CDMA_CALL_SIGNAL_ISDN_INTERGROUP, // ISDN Intergroup
- TONE_CDMA_CALL_SIGNAL_ISDN_SP_PRI, // ISDN SP PRI
- TONE_CDMA_CALL_SIGNAL_ISDN_PAT3, // ISDN Alert PAT3
- TONE_CDMA_CALL_SIGNAL_ISDN_PING_RING, // ISDN Alert PING RING
- TONE_CDMA_CALL_SIGNAL_ISDN_PAT5, // ISDN Alert PAT5
- TONE_CDMA_CALL_SIGNAL_ISDN_PAT6, // ISDN Alert PAT6
- TONE_CDMA_CALL_SIGNAL_ISDN_PAT7, // ISDN Alert PAT7
- // ISDN end
-
- // IS54
- TONE_CDMA_HIGH_L, // IS54 High Pitch Long
- TONE_CDMA_MED_L, // IS54 Med Pitch Long
- TONE_CDMA_LOW_L, // IS54 Low Pitch Long
- TONE_CDMA_HIGH_SS, // IS54 High Pitch Short Short
- TONE_CDMA_MED_SS, // IS54 Medium Pitch Short Short
- TONE_CDMA_LOW_SS, // IS54 Low Pitch Short Short
- TONE_CDMA_HIGH_SSL, // IS54 High Pitch Short Short Long
- TONE_CDMA_MED_SSL, // IS54 Medium Pitch Short Short Long
- TONE_CDMA_LOW_SSL, // IS54 Low Pitch Short Short Long
- TONE_CDMA_HIGH_SS_2, // IS54 High Pitch Short Short 2
- TONE_CDMA_MED_SS_2, // IS54 Med Pitch Short Short 2
- TONE_CDMA_LOW_SS_2, // IS54 Low Pitch Short Short 2
- TONE_CDMA_HIGH_SLS, // IS54 High Pitch Short Long Short
- TONE_CDMA_MED_SLS, // IS54 Med Pitch Short Long Short
- TONE_CDMA_LOW_SLS, // IS54 Low Pitch Short Long Short
- TONE_CDMA_HIGH_S_X4, // IS54 High Pitch Short Short Short Short
- TONE_CDMA_MED_S_X4, // IS54 Med Pitch Short Short Short Short
- TONE_CDMA_LOW_S_X4, // IS54 Low Pitch Short Short Short Short
- TONE_CDMA_HIGH_PBX_L, // PBX High Pitch Long
- TONE_CDMA_MED_PBX_L, // PBX Med Pitch Long
- TONE_CDMA_LOW_PBX_L, // PBX Low Pitch Long
- TONE_CDMA_HIGH_PBX_SS, // PBX High Short Short
- TONE_CDMA_MED_PBX_SS, // PBX Med Short Short
- TONE_CDMA_LOW_PBX_SS, // PBX Low Short Short
- TONE_CDMA_HIGH_PBX_SSL, // PBX High Short Short Long
- TONE_CDMA_MED_PBX_SSL, // PBX Med Short Short Long
- TONE_CDMA_LOW_PBX_SSL, // PBX Low Short Short Long
- TONE_CDMA_HIGH_PBX_SLS, // PBX High SLS
- TONE_CDMA_MED_PBX_SLS, // PBX Med SLS
- TONE_CDMA_LOW_PBX_SLS, // PBX Low SLS
- TONE_CDMA_HIGH_PBX_S_X4, // PBX High SSSS
- TONE_CDMA_MED_PBX_S_X4, // PBX Med SSSS
- TONE_CDMA_LOW_PBX_S_X4, // PBX LOW SSSS
- //IS54 end
- // proprietary
- TONE_CDMA_ALERT_NETWORK_LITE,
- TONE_CDMA_ALERT_AUTOREDIAL_LITE,
- TONE_CDMA_ONE_MIN_BEEP,
- TONE_CDMA_KEYPAD_VOLUME_KEY_LITE,
- TONE_CDMA_PRESSHOLDKEY_LITE,
- TONE_CDMA_ALERT_INCALL_LITE,
- TONE_CDMA_EMERGENCY_RINGBACK,
- TONE_CDMA_ALERT_CALL_GUARD,
- TONE_CDMA_SOFT_ERROR_LITE,
- TONE_CDMA_CALLDROP_LITE,
- // proprietary end
- TONE_CDMA_NETWORK_BUSY_ONE_SHOT,
- TONE_CDMA_ABBR_ALERT,
- TONE_CDMA_SIGNAL_OFF,
- //CDMA end
- NUM_TONES,
- NUM_SUP_TONES = LAST_SUP_TONE-FIRST_SUP_TONE+1
- };
-
- ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava = false);
- ~ToneGenerator();
-
- bool startTone(tone_type toneType, int durationMs = -1);
- void stopTone();
-
- bool isInited() { return (mState == TONE_IDLE)?false:true;}
-
- // returns the audio session this ToneGenerator belongs to or 0 if an error occured.
- int getSessionId() { return (mpAudioTrack == 0) ? 0 : mpAudioTrack->getSessionId(); }
-
-private:
-
- enum tone_state {
- TONE_IDLE, // ToneGenerator is being initialized or initialization failed
- TONE_INIT, // ToneGenerator has been successfully initialized and is not playing
- TONE_STARTING, // ToneGenerator is starting playing
- TONE_PLAYING, // ToneGenerator is playing
- TONE_STOPPING, // ToneGenerator is stoping
- TONE_STOPPED, // ToneGenerator is stopped: the AudioTrack will be stopped
- TONE_RESTARTING // A start request was received in active state (playing or stopping)
- };
-
-
- // Region specific tones.
- // These supervisory tones are different depending on the region (USA/CANADA, JAPAN, rest of the world).
- // When a tone in the range [FIRST_SUP_TONE, LAST_SUP_TONE] is requested, the region is determined
- // from system property gsm.operator.iso-country and the proper tone descriptor is selected with the
- // help of sToneMappingTable[]
- enum regional_tone_type {
- // ANSI supervisory tones
- TONE_ANSI_DIAL = NUM_TONES, // Dial tone: a continuous 350 Hz + 440 Hz tone.
- TONE_ANSI_BUSY, // Busy tone on: a 480 Hz + 620 Hz tone repeated in a 500 ms on, 500 ms off cycle.
- TONE_ANSI_CONGESTION, // Network congestion (reorder) tone on: a 480 Hz + 620 Hz tone repeated in a 250 ms on, 250 ms off cycle.
- TONE_ANSI_CALL_WAITING, // Call waiting tone on: 440 Hz, on for 300 ms, 9,7 s off followed by
- // (440 Hz, on for 100 ms off for 100 ms, on for 100 ms, 9,7s off and repeated as necessary).
- TONE_ANSI_RINGTONE, // Ring Tone: a 440 Hz + 480 Hz tone repeated in a 2 s on, 4 s off pattern.
- // JAPAN Supervisory tones
- TONE_JAPAN_DIAL, // Dial tone: 400Hz, continuous
- TONE_JAPAN_BUSY, // Busy tone: 400Hz, 500ms ON, 500ms OFF...
- TONE_JAPAN_RADIO_ACK, // Radio path acknowlegment: 400Hz, 1s ON, 2s OFF...
- // GB Supervisory tones
- TONE_GB_RINGTONE, // Ring Tone: A 400Hz + 450Hz tone repeated in a 0.4s on, 0.2s off, 0.4s on, 2.0s off pattern.
- // AUSTRALIA Supervisory tones
- TONE_AUSTRALIA_RINGTONE, // Ring tone: A 400Hz + 450Hz tone repeated in a 0.4s on, 0.2s off, 0.4s on, 2.0s off pattern.
- TONE_AUSTRALIA_BUSY, // Busy tone: 425 Hz repeated in a 0.375s on, 0.375s off pattern.
- TONE_AUSTRALIA_CALL_WAITING,// Call waiting tone: 425Hz tone repeated in a 0.2s on, 0.2s off, 0.2s on, 4.4s off pattern.
- TONE_AUSTRALIA_CONGESTION, // Congestion tone: 425Hz tone repeated in a 0.375s on, 0.375s off pattern
- NUM_ALTERNATE_TONES
- };
-
- enum region {
- ANSI,
- JAPAN,
- GB,
- AUSTRALIA,
- CEPT,
- NUM_REGIONS
- };
-
- static const unsigned char sToneMappingTable[NUM_REGIONS-1][NUM_SUP_TONES];
-
- static const unsigned int TONEGEN_MAX_WAVES = 3; // Maximun number of sine waves in a tone segment
- static const unsigned int TONEGEN_MAX_SEGMENTS = 12; // Maximun number of segments in a tone descriptor
- static const unsigned int TONEGEN_INF = 0xFFFFFFFF; // Represents infinite time duration
- static const CONSTEXPR float TONEGEN_GAIN = 0.9; // Default gain passed to WaveGenerator().
-
- // ToneDescriptor class contains all parameters needed to generate a tone:
- // - The array waveFreq[]:
- // 1 for static tone descriptors: contains the frequencies of all individual waves making the multi-tone.
- // 2 for active tone descritors: contains the indexes of the WaveGenerator objects in mWaveGens
- // The number of sine waves varies from 1 to TONEGEN_MAX_WAVES.
- // The first null value indicates that no more waves are needed.
- // - The array segments[] is used to generate the tone pulses. A segment is a period of time
- // during which the tone is ON or OFF. Segments with even index (starting from 0)
- // correspond to tone ON state and segments with odd index to OFF state.
- // The data stored in segments[] is the duration of the corresponding period in ms.
- // The first segment encountered with a 0 duration indicates that no more segment follows.
- // - loopCnt - Number of times to repeat a sequence of seqments after playing this
- // - loopIndx - The segment index to go back and play is loopcnt > 0
- // - repeatCnt indicates the number of times the sequence described by segments[] array must be repeated.
- // When the tone generator encounters the first 0 duration segment, it will compare repeatCnt to mCurCount.
- // If mCurCount > repeatCnt, the tone is stopped automatically. Otherwise, tone sequence will be
- // restarted from segment repeatSegment.
- // - repeatSegment number of the first repeated segment when repeatCnt is not null
-
- class ToneSegment {
- public:
- unsigned int duration;
- unsigned short waveFreq[TONEGEN_MAX_WAVES+1];
- unsigned short loopCnt;
- unsigned short loopIndx;
- };
-
- class ToneDescriptor {
- public:
- ToneSegment segments[TONEGEN_MAX_SEGMENTS+1];
- unsigned long repeatCnt;
- unsigned long repeatSegment;
- };
-
- static const ToneDescriptor sToneDescriptors[];
-
- bool mThreadCanCallJava;
- unsigned int mTotalSmp; // Total number of audio samples played (gives current time)
- unsigned int mNextSegSmp; // Position of next segment transition expressed in samples
- // NOTE: because mTotalSmp, mNextSegSmp are stored on 32 bit, current design will operate properly
- // only if tone duration is less than about 27 Hours(@44100Hz sampling rate). If this time is exceeded,
- // no crash will occur but tone sequence will show a glitch.
- unsigned int mMaxSmp; // Maximum number of audio samples played (maximun tone duration)
- int mDurationMs; // Maximum tone duration in ms
-
- unsigned short mCurSegment; // Current segment index in ToneDescriptor segments[]
- unsigned short mCurCount; // Current sequence repeat count
- volatile unsigned short mState; // ToneGenerator state (tone_state)
- unsigned short mRegion;
- const ToneDescriptor *mpToneDesc; // pointer to active tone descriptor
- const ToneDescriptor *mpNewToneDesc; // pointer to next active tone descriptor
-
- unsigned short mLoopCounter; // Current tone loopback count
-
- uint32_t mSamplingRate; // AudioFlinger Sampling rate
- sp<AudioTrack> mpAudioTrack; // Pointer to audio track used for playback
- Mutex mLock; // Mutex to control concurent access to ToneGenerator object from audio callback and application API
- Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond
- Condition mWaitCbkCond; // condition enabling interface to wait for audio callback completion after a change is requested
- float mVolume; // Volume applied to audio track
- audio_stream_type_t mStreamType; // Audio stream used for output
- unsigned int mProcessSize; // Size of audio blocks generated at a time by audioCallback() (in PCM frames).
- struct timespec mStartTime; // tone start time: needed to guaranty actual tone duration
-
- bool initAudioTrack();
- static void audioCallback(int event, void* user, void *info);
- bool prepareWave();
- unsigned int numWaves(unsigned int segmentIdx);
- void clearWaveGens();
- tone_type getToneForRegion(tone_type toneType);
-
- // WaveGenerator generates a single sine wave
- class WaveGenerator {
- public:
- enum gen_command {
- WAVEGEN_START, // Start/restart wave from phase 0
- WAVEGEN_CONT, // Continue wave from current phase
- WAVEGEN_STOP // Stop wave on zero crossing
- };
-
- WaveGenerator(unsigned short samplingRate, unsigned short frequency,
- float volume);
- ~WaveGenerator();
-
- void getSamples(short *outBuffer, unsigned int count,
- unsigned int command);
-
- private:
- static const short GEN_AMP = 32000; // amplitude of generator
- static const short S_Q14 = 14; // shift for Q14
- static const short S_Q15 = 15; // shift for Q15
-
- short mA1_Q14; // Q14 coefficient
- // delay line of full amplitude generator
- long mS1, mS2; // delay line S2 oldest
- short mS2_0; // saved value for reinitialisation
- short mAmplitude_Q15; // Q15 amplitude
- };
-
- KeyedVector<unsigned short, WaveGenerator *> mWaveGens; // list of active wave generators.
-};
-
-}
-; // namespace android
-
-#endif /*ANDROID_TONEGENERATOR_H_*/
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
new file mode 120000
index 0000000..33df0e3
--- /dev/null
+++ b/include/media/ToneGenerator.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/ToneGenerator.h
\ No newline at end of file
diff --git a/include/media/TypeConverter.h b/include/media/TypeConverter.h
new file mode 120000
index 0000000..837af44
--- /dev/null
+++ b/include/media/TypeConverter.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/TypeConverter.h
\ No newline at end of file
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
deleted file mode 100644
index 7bb9e8b..0000000
--- a/include/media/Visualizer.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIA_VISUALIZER_H
-#define ANDROID_MEDIA_VISUALIZER_H
-
-#include <media/AudioEffect.h>
-#include <audio_effects/effect_visualizer.h>
-#include <utils/Thread.h>
-
-/**
- * The Visualizer class enables application to retrieve part of the currently playing audio for
- * visualization purpose. It is not an audio recording interface and only returns partial and low
- * quality audio content. However, to protect privacy of certain audio data (e.g voice mail) the use
- * of the visualizer requires the permission android.permission.RECORD_AUDIO.
- * The audio session ID passed to the constructor indicates which audio content should be
- * visualized:
- * - If the session is 0, the audio output mix is visualized
- * - If the session is not 0, the audio from a particular MediaPlayer or AudioTrack
- * using this audio session is visualized
- * Two types of representation of audio content can be captured:
- * - Waveform data: consecutive 8-bit (unsigned) mono samples by using the getWaveForm() method
- * - Frequency data: 8-bit magnitude FFT by using the getFft() method
- *
- * The length of the capture can be retrieved or specified by calling respectively
- * getCaptureSize() and setCaptureSize() methods. Note that the size of the FFT
- * is half of the specified capture size but both sides of the spectrum are returned yielding in a
- * number of bytes equal to the capture size. The capture size must be a power of 2 in the range
- * returned by getMinCaptureSize() and getMaxCaptureSize().
- * In addition to the polling capture mode, a callback mode is also available by installing a
- * callback function by use of the setCaptureCallBack() method. The rate at which the callback
- * is called as well as the type of data returned is specified.
- * Before capturing data, the Visualizer must be enabled by calling the setEnabled() method.
- * When data capture is not needed any more, the Visualizer should be disabled.
- */
-
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class Visualizer: public AudioEffect {
-public:
-
- enum callback_flags {
- CAPTURE_WAVEFORM = 0x00000001, // capture callback returns a PCM wave form
- CAPTURE_FFT = 0x00000002, // apture callback returns a frequency representation
- CAPTURE_CALL_JAVA = 0x00000004 // the callback thread can call java
- };
-
-
- /* Constructor.
- * See AudioEffect constructor for details on parameters.
- */
- Visualizer(const String16& opPackageName,
- int32_t priority = 0,
- effect_callback_t cbf = NULL,
- void* user = NULL,
- audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX);
-
- ~Visualizer();
-
- virtual status_t setEnabled(bool enabled);
-
- // maximum capture size in samples
- static uint32_t getMaxCaptureSize() { return VISUALIZER_CAPTURE_SIZE_MAX; }
- // minimum capture size in samples
- static uint32_t getMinCaptureSize() { return VISUALIZER_CAPTURE_SIZE_MIN; }
- // maximum capture rate in millihertz
- static uint32_t getMaxCaptureRate() { return CAPTURE_RATE_MAX; }
-
- // callback used to return periodic PCM or FFT captures to the application. Either one or both
- // types of data are returned (PCM and FFT) according to flags indicated when installing the
- // callback. When a type of data is not present, the corresponding size (waveformSize or
- // fftSize) is 0.
- typedef void (*capture_cbk_t)(void* user,
- uint32_t waveformSize,
- uint8_t *waveform,
- uint32_t fftSize,
- uint8_t *fft,
- uint32_t samplingrate);
-
- // install a callback to receive periodic captures. The capture rate is specified in milliHertz
- // and the capture format is according to flags (see callback_flags).
- status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate);
-
- // set the capture size capture size must be a power of two in the range
- // [VISUALIZER_CAPTURE_SIZE_MAX. VISUALIZER_CAPTURE_SIZE_MIN]
- // must be called when the visualizer is not enabled
- status_t setCaptureSize(uint32_t size);
- uint32_t getCaptureSize() { return mCaptureSize; }
-
- // returns the capture rate indicated when installing the callback
- uint32_t getCaptureRate() { return mCaptureRate; }
-
- // returns the sampling rate of the audio being captured
- uint32_t getSamplingRate() { return mSampleRate; }
-
- // set the way volume affects the captured data
- // mode must one of VISUALIZER_SCALING_MODE_NORMALIZED,
- // VISUALIZER_SCALING_MODE_AS_PLAYED
- status_t setScalingMode(uint32_t mode);
- uint32_t getScalingMode() { return mScalingMode; }
-
- // set which measurements are done on the audio buffers processed by the effect.
- // valid measurements (mask): MEASUREMENT_MODE_PEAK_RMS
- status_t setMeasurementMode(uint32_t mode);
- uint32_t getMeasurementMode() { return mMeasurementMode; }
-
- // return a set of int32_t measurements
- status_t getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements);
-
- // return a capture in PCM 8 bit unsigned format. The size of the capture is equal to
- // getCaptureSize()
- status_t getWaveForm(uint8_t *waveform);
-
- // return a capture in FFT 8 bit signed format. The size of the capture is equal to
- // getCaptureSize() but the length of the FFT is half of the size (both parts of the spectrum
- // are returned
- status_t getFft(uint8_t *fft);
-
-protected:
- // from IEffectClient
- virtual void controlStatusChanged(bool controlGranted);
-
-private:
-
- static const uint32_t CAPTURE_RATE_MAX = 20000;
- static const uint32_t CAPTURE_RATE_DEF = 10000;
- static const uint32_t CAPTURE_SIZE_DEF = VISUALIZER_CAPTURE_SIZE_MAX;
-
- /* internal class to handle the callback */
- class CaptureThread : public Thread
- {
- public:
- CaptureThread(Visualizer& receiver, uint32_t captureRate, bool bCanCallJava = false);
-
- private:
- friend class Visualizer;
- virtual bool threadLoop();
- Visualizer& mReceiver;
- Mutex mLock;
- uint32_t mSleepTimeUs;
- };
-
- status_t doFft(uint8_t *fft, uint8_t *waveform);
- void periodicCapture();
- uint32_t initCaptureSize();
-
- Mutex mCaptureLock;
- uint32_t mCaptureRate;
- uint32_t mCaptureSize;
- uint32_t mSampleRate;
- uint32_t mScalingMode;
- uint32_t mMeasurementMode;
- capture_cbk_t mCaptureCallBack;
- void *mCaptureCbkUser;
- sp<CaptureThread> mCaptureThread;
- uint32_t mCaptureFlags;
-};
-
-
-}; // namespace android
-
-#endif // ANDROID_MEDIA_VISUALIZER_H
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
new file mode 120000
index 0000000..ed2ec15
--- /dev/null
+++ b/include/media/Visualizer.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/Visualizer.h
\ No newline at end of file
diff --git a/include/media/VolumeShaper.h b/include/media/VolumeShaper.h
new file mode 100644
index 0000000..302641f
--- /dev/null
+++ b/include/media/VolumeShaper.h
@@ -0,0 +1,1027 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_VOLUME_SHAPER_H
+#define ANDROID_VOLUME_SHAPER_H
+
+#include <cmath>
+#include <list>
+#include <math.h>
+#include <sstream>
+
+#include <binder/Parcel.h>
+#include <media/Interpolator.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+#pragma push_macro("LOG_TAG")
+#undef LOG_TAG
+#define LOG_TAG "VolumeShaper"
+
+// turn on VolumeShaper logging
+#define VS_LOGGING 0
+#define VS_LOG(...) ALOGD_IF(VS_LOGGING, __VA_ARGS__)
+
+namespace android {
+
+// The native VolumeShaper class mirrors the java VolumeShaper class;
+// in addition, the native class contains implementation for actual operation.
+//
+// VolumeShaper methods are not safe for multiple thread access.
+// Use VolumeHandler for thread-safe encapsulation of multiple VolumeShapers.
+//
+// Classes below written are to avoid naked pointers so there are no
+// explicit destructors required.
+
+class VolumeShaper {
+public:
+ // S and T are like template typenames (matching the Interpolator<S, T>)
+ using S = float; // time type
+ using T = float; // volume type
+
+// Curve and dimension information
+// TODO: member static const or constexpr float initialization not permitted in C++11
+#define MIN_CURVE_TIME 0.f // type S: start of VolumeShaper curve (normalized)
+#define MAX_CURVE_TIME 1.f // type S: end of VolumeShaper curve (normalized)
+#define MIN_LINEAR_VOLUME 0.f // type T: silence / mute audio
+#define MAX_LINEAR_VOLUME 1.f // type T: max volume, unity gain
+#define MAX_LOG_VOLUME 0.f // type T: max volume, unity gain in dBFS
+
+ /* kSystemVolumeShapersMax is the maximum number of system VolumeShapers.
+ * Each system VolumeShapers has a predefined Id, which ranges from 0
+ * to kSystemVolumeShapersMax - 1 and is unique for its usage.
+ *
+ * "1" is reserved for system ducking.
+ */
+ static const int kSystemVolumeShapersMax = 16;
+
+ /* kUserVolumeShapersMax is the maximum number of application
+ * VolumeShapers for a player/track. Application VolumeShapers are
+ * assigned on creation by the client, and have Ids ranging
+ * from kSystemVolumeShapersMax to INT32_MAX.
+ *
+ * The number of user/application volume shapers is independent to the
+ * system volume shapers. If an application tries to create more than
+ * kUserVolumeShapersMax to a player, then the apply() will fail.
+ * This prevents exhausting server side resources by a potentially malicious
+ * application.
+ */
+ static const int kUserVolumeShapersMax = 16;
+
+ /* VolumeShaper::Status is equivalent to status_t if negative
+ * but if non-negative represents the id operated on.
+ * It must be expressible as an int32_t for binder purposes.
+ */
+ using Status = status_t;
+
+ // Local definition for clamp as std::clamp is included in C++17 only.
+ // TODO: use the std::clamp version when Android build uses C++17.
+ template<typename R>
+ static constexpr const R &clamp(const R &v, const R &lo, const R &hi) {
+ return (v < lo) ? lo : (hi < v) ? hi : v;
+ }
+
+ /* VolumeShaper.Configuration derives from the Interpolator class and adds
+ * parameters relating to the volume shape.
+ *
+ * This parallels the Java implementation and the enums must match.
+ * See "frameworks/base/media/java/android/media/VolumeShaper.java" for
+ * details on the Java implementation.
+ */
+ class Configuration : public Interpolator<S, T>, public RefBase {
+ public:
+ // Must match with VolumeShaper.java in frameworks/base.
+ enum Type : int32_t {
+ TYPE_ID,
+ TYPE_SCALE,
+ };
+
+ // Must match with VolumeShaper.java in frameworks/base.
+ enum OptionFlag : int32_t {
+ OPTION_FLAG_NONE = 0,
+ OPTION_FLAG_VOLUME_IN_DBFS = (1 << 0),
+ OPTION_FLAG_CLOCK_TIME = (1 << 1),
+
+ OPTION_FLAG_ALL = (OPTION_FLAG_VOLUME_IN_DBFS | OPTION_FLAG_CLOCK_TIME),
+ };
+
+ // Bring from base class; must match with VolumeShaper.java in frameworks/base.
+ using InterpolatorType = Interpolator<S, T>::InterpolatorType;
+
+ Configuration()
+ : Interpolator<S, T>()
+ , RefBase()
+ , mType(TYPE_SCALE)
+ , mId(-1)
+ , mOptionFlags(OPTION_FLAG_NONE)
+ , mDurationMs(1000.) {
+ }
+
+ explicit Configuration(const Configuration &configuration)
+ : Interpolator<S, T>(*static_cast<const Interpolator<S, T> *>(&configuration))
+ , RefBase()
+ , mType(configuration.mType)
+ , mId(configuration.mId)
+ , mOptionFlags(configuration.mOptionFlags)
+ , mDurationMs(configuration.mDurationMs) {
+ }
+
+ Type getType() const {
+ return mType;
+ }
+
+ status_t setType(Type type) {
+ switch (type) {
+ case TYPE_ID:
+ case TYPE_SCALE:
+ mType = type;
+ return NO_ERROR;
+ default:
+ ALOGE("invalid Type: %d", type);
+ return BAD_VALUE;
+ }
+ }
+
+ OptionFlag getOptionFlags() const {
+ return mOptionFlags;
+ }
+
+ status_t setOptionFlags(OptionFlag optionFlags) {
+ if ((optionFlags & ~OPTION_FLAG_ALL) != 0) {
+ ALOGE("optionFlags has invalid bits: %#x", optionFlags);
+ return BAD_VALUE;
+ }
+ mOptionFlags = optionFlags;
+ return NO_ERROR;
+ }
+
+ double getDurationMs() const {
+ return mDurationMs;
+ }
+
+ status_t setDurationMs(double durationMs) {
+ if (durationMs > 0.) {
+ mDurationMs = durationMs;
+ return NO_ERROR;
+ }
+ // zero, negative, or nan. These values not possible from Java.
+ return BAD_VALUE;
+ }
+
+ int32_t getId() const {
+ return mId;
+ }
+
+ void setId(int32_t id) {
+ // We permit a negative id here (representing invalid).
+ mId = id;
+ }
+
+ /* Adjust the volume to be in linear range from MIN_LINEAR_VOLUME to MAX_LINEAR_VOLUME
+ * and compensate for log dbFS volume as needed.
+ */
+ T adjustVolume(T volume) const {
+ if ((getOptionFlags() & OPTION_FLAG_VOLUME_IN_DBFS) != 0) {
+ const T out = powf(10.f, volume / 10.f);
+ VS_LOG("in: %f out: %f", volume, out);
+ volume = out;
+ }
+ return clamp(volume, MIN_LINEAR_VOLUME /* lo */, MAX_LINEAR_VOLUME /* hi */);
+ }
+
+ /* Check if the existing curve is valid.
+ */
+ status_t checkCurve() const {
+ if (mType == TYPE_ID) return NO_ERROR;
+ if (this->size() < 2) {
+ ALOGE("curve must have at least 2 points");
+ return BAD_VALUE;
+ }
+ if (first().first != MIN_CURVE_TIME || last().first != MAX_CURVE_TIME) {
+ ALOGE("curve must start at MIN_CURVE_TIME and end at MAX_CURVE_TIME");
+ return BAD_VALUE;
+ }
+ if ((getOptionFlags() & OPTION_FLAG_VOLUME_IN_DBFS) != 0) {
+ for (const auto &pt : *this) {
+ if (!(pt.second <= MAX_LOG_VOLUME) /* handle nan */) {
+ ALOGE("positive volume dbFS");
+ return BAD_VALUE;
+ }
+ }
+ } else {
+ for (const auto &pt : *this) {
+ if (!(pt.second >= MIN_LINEAR_VOLUME)
+ || !(pt.second <= MAX_LINEAR_VOLUME) /* handle nan */) {
+ ALOGE("volume < MIN_LINEAR_VOLUME or > MAX_LINEAR_VOLUME");
+ return BAD_VALUE;
+ }
+ }
+ }
+ return NO_ERROR;
+ }
+
+ /* Clamps the volume curve in the configuration to
+ * the valid range for log or linear scale.
+ */
+ void clampVolume() {
+ if ((mOptionFlags & OPTION_FLAG_VOLUME_IN_DBFS) != 0) {
+ for (auto it = this->begin(); it != this->end(); ++it) {
+ if (!(it->second <= MAX_LOG_VOLUME) /* handle nan */) {
+ it->second = MAX_LOG_VOLUME;
+ }
+ }
+ } else {
+ for (auto it = this->begin(); it != this->end(); ++it) {
+ if (!(it->second >= MIN_LINEAR_VOLUME) /* handle nan */) {
+ it->second = MIN_LINEAR_VOLUME;
+ } else if (!(it->second <= MAX_LINEAR_VOLUME)) {
+ it->second = MAX_LINEAR_VOLUME;
+ }
+ }
+ }
+ }
+
+ /* scaleToStartVolume() is used to set the start volume of a
+ * new VolumeShaper curve, when replacing one VolumeShaper
+ * with another using the "join" (volume match) option.
+ *
+ * It works best for monotonic volume ramps or ducks.
+ */
+ void scaleToStartVolume(T volume) {
+ if (this->size() < 2) {
+ return;
+ }
+ const T startVolume = first().second;
+ const T endVolume = last().second;
+ if (endVolume == startVolume) {
+ // match with linear ramp
+ const T offset = volume - startVolume;
+ static const T scale = 1.f / (MAX_CURVE_TIME - MIN_CURVE_TIME); // nominally 1.f
+ for (auto it = this->begin(); it != this->end(); ++it) {
+ it->second = it->second + offset * (MAX_CURVE_TIME - it->first) * scale;
+ }
+ } else {
+ const T scale = (volume - endVolume) / (startVolume - endVolume);
+ for (auto it = this->begin(); it != this->end(); ++it) {
+ it->second = scale * (it->second - endVolume) + endVolume;
+ }
+ }
+ clampVolume();
+ }
+
+ // The parcel layout must match VolumeShaper.java
+ status_t writeToParcel(Parcel *parcel) const {
+ if (parcel == nullptr) return BAD_VALUE;
+ return parcel->writeInt32((int32_t)mType)
+ ?: parcel->writeInt32(mId)
+ ?: mType == TYPE_ID
+ ? NO_ERROR
+ : parcel->writeInt32((int32_t)mOptionFlags)
+ ?: parcel->writeDouble(mDurationMs)
+ ?: Interpolator<S, T>::writeToParcel(parcel);
+ }
+
+ status_t readFromParcel(const Parcel &parcel) {
+ int32_t type, optionFlags;
+ return parcel.readInt32(&type)
+ ?: setType((Type)type)
+ ?: parcel.readInt32(&mId)
+ ?: mType == TYPE_ID
+ ? NO_ERROR
+ : parcel.readInt32(&optionFlags)
+ ?: setOptionFlags((OptionFlag)optionFlags)
+ ?: parcel.readDouble(&mDurationMs)
+ ?: Interpolator<S, T>::readFromParcel(parcel)
+ ?: checkCurve();
+ }
+
+ // Returns a string for debug printing.
+ std::string toString() const {
+ std::stringstream ss;
+ ss << "VolumeShaper::Configuration{mType=" << static_cast<int32_t>(mType);
+ ss << ", mId=" << mId;
+ if (mType != TYPE_ID) {
+ ss << ", mOptionFlags=" << static_cast<int32_t>(mOptionFlags);
+ ss << ", mDurationMs=" << mDurationMs;
+ ss << ", " << Interpolator<S, T>::toString().c_str();
+ }
+ ss << "}";
+ return ss.str();
+ }
+
+ private:
+ Type mType; // type of configuration
+ int32_t mId; // A valid id is >= 0.
+ OptionFlag mOptionFlags; // option flags for the configuration.
+ double mDurationMs; // duration, must be > 0; default is 1000 ms.
+ }; // Configuration
+
+ /* VolumeShaper::Operation expresses an operation to perform on the
+ * configuration (either explicitly specified or an id).
+ *
+ * This parallels the Java implementation and the enums must match.
+ * See "frameworks/base/media/java/android/media/VolumeShaper.java" for
+ * details on the Java implementation.
+ */
+ class Operation : public RefBase {
+ public:
+ // Must match with VolumeShaper.java.
+ enum Flag : int32_t {
+ FLAG_NONE = 0,
+ FLAG_REVERSE = (1 << 0), // the absence of this indicates "play"
+ FLAG_TERMINATE = (1 << 1),
+ FLAG_JOIN = (1 << 2),
+ FLAG_DELAY = (1 << 3),
+ FLAG_CREATE_IF_NECESSARY = (1 << 4),
+
+ FLAG_ALL = (FLAG_REVERSE | FLAG_TERMINATE | FLAG_JOIN | FLAG_DELAY
+ | FLAG_CREATE_IF_NECESSARY),
+ };
+
+ Operation()
+ : Operation(FLAG_NONE, -1 /* replaceId */) {
+ }
+
+ Operation(Flag flags, int replaceId)
+ : Operation(flags, replaceId, std::numeric_limits<S>::quiet_NaN() /* xOffset */) {
+ }
+
+ explicit Operation(const Operation &operation)
+ : Operation(operation.mFlags, operation.mReplaceId, operation.mXOffset) {
+ }
+
+ explicit Operation(const sp<Operation> &operation)
+ : Operation(*operation.get()) {
+ }
+
+ Operation(Flag flags, int replaceId, S xOffset)
+ : mFlags(flags)
+ , mReplaceId(replaceId)
+ , mXOffset(xOffset) {
+ }
+
+ int32_t getReplaceId() const {
+ return mReplaceId;
+ }
+
+ void setReplaceId(int32_t replaceId) {
+ mReplaceId = replaceId;
+ }
+
+ S getXOffset() const {
+ return mXOffset;
+ }
+
+ void setXOffset(S xOffset) {
+ mXOffset = clamp(xOffset, MIN_CURVE_TIME /* lo */, MAX_CURVE_TIME /* hi */);
+ }
+
+ Flag getFlags() const {
+ return mFlags;
+ }
+
+ /* xOffset is the position on the volume curve and may go backwards
+ * if you are in reverse mode. This must be in the range from
+ * [MIN_CURVE_TIME, MAX_CURVE_TIME].
+ *
+ * normalizedTime always increases as time or framecount increases.
+ * normalizedTime is nominally from MIN_CURVE_TIME to MAX_CURVE_TIME when
+ * running through the curve, but could be outside this range afterwards.
+ * If you are reversing, this means the position on the curve, or xOffset,
+ * is computed as MAX_CURVE_TIME - normalizedTime, clamped to
+ * [MIN_CURVE_TIME, MAX_CURVE_TIME].
+ */
+ void setNormalizedTime(S normalizedTime) {
+ setXOffset((mFlags & FLAG_REVERSE) != 0
+ ? MAX_CURVE_TIME - normalizedTime : normalizedTime);
+ }
+
+ status_t setFlags(Flag flags) {
+ if ((flags & ~FLAG_ALL) != 0) {
+ ALOGE("flags has invalid bits: %#x", flags);
+ return BAD_VALUE;
+ }
+ mFlags = flags;
+ return NO_ERROR;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const {
+ if (parcel == nullptr) return BAD_VALUE;
+ return parcel->writeInt32((int32_t)mFlags)
+ ?: parcel->writeInt32(mReplaceId)
+ ?: parcel->writeFloat(mXOffset);
+ }
+
+ status_t readFromParcel(const Parcel &parcel) {
+ int32_t flags;
+ return parcel.readInt32(&flags)
+ ?: parcel.readInt32(&mReplaceId)
+ ?: parcel.readFloat(&mXOffset)
+ ?: setFlags((Flag)flags);
+ }
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << "VolumeShaper::Operation{mFlags=" << static_cast<int32_t>(mFlags) ;
+ ss << ", mReplaceId=" << mReplaceId;
+ ss << ", mXOffset=" << mXOffset;
+ ss << "}";
+ return ss.str();
+ }
+
+ private:
+ Flag mFlags; // operation to do
+ int32_t mReplaceId; // if >= 0 the id to remove in a replace operation.
+ S mXOffset; // position in the curve to set if a valid number (not nan)
+ }; // Operation
+
+ /* VolumeShaper.State is returned when requesting the last
+ * state of the VolumeShaper.
+ *
+ * This parallels the Java implementation.
+ * See "frameworks/base/media/java/android/media/VolumeShaper.java" for
+ * details on the Java implementation.
+ */
+ class State : public RefBase {
+ public:
+ State(T volume, S xOffset)
+ : mVolume(volume)
+ , mXOffset(xOffset) {
+ }
+
+ State()
+ : State(NAN, NAN) { }
+
+ T getVolume() const {
+ return mVolume;
+ }
+
+ void setVolume(T volume) {
+ mVolume = volume;
+ }
+
+ S getXOffset() const {
+ return mXOffset;
+ }
+
+ void setXOffset(S xOffset) {
+ mXOffset = xOffset;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const {
+ if (parcel == nullptr) return BAD_VALUE;
+ return parcel->writeFloat(mVolume)
+ ?: parcel->writeFloat(mXOffset);
+ }
+
+ status_t readFromParcel(const Parcel &parcel) {
+ return parcel.readFloat(&mVolume)
+ ?: parcel.readFloat(&mXOffset);
+ }
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << "VolumeShaper::State{mVolume=" << mVolume;
+ ss << ", mXOffset=" << mXOffset;
+ ss << "}";
+ return ss.str();
+ }
+
+ private:
+ T mVolume; // linear volume in the range MIN_LINEAR_VOLUME to MAX_LINEAR_VOLUME
+ S mXOffset; // position on curve expressed from MIN_CURVE_TIME to MAX_CURVE_TIME
+ }; // State
+
+ // Internal helper class to do an affine transform for time and amplitude scaling.
+ template <typename R>
+ class Translate {
+ public:
+ Translate()
+ : mOffset(0)
+ , mScale(1) {
+ }
+
+ R getOffset() const {
+ return mOffset;
+ }
+
+ void setOffset(R offset) {
+ mOffset = offset;
+ }
+
+ R getScale() const {
+ return mScale;
+ }
+
+ void setScale(R scale) {
+ mScale = scale;
+ }
+
+ R operator()(R in) const {
+ return mScale * (in - mOffset);
+ }
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << "VolumeShaper::Translate{mOffset=" << mOffset;
+ ss << ", mScale=" << mScale;
+ ss << "}";
+ return ss.str();
+ }
+
+ private:
+ R mOffset;
+ R mScale;
+ }; // Translate
+
+ static int64_t convertTimespecToUs(const struct timespec &tv)
+ {
+ return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
+ }
+
+ // current monotonic time in microseconds.
+ static int64_t getNowUs()
+ {
+ struct timespec tv;
+ if (clock_gettime(CLOCK_MONOTONIC, &tv) != 0) {
+ return 0; // system is really sick, just return 0 for consistency.
+ }
+ return convertTimespecToUs(tv);
+ }
+
+ /* Native implementation of VolumeShaper. This is NOT mirrored
+ * on the Java side, so we don't need to mimic Java side layout
+ * and data; furthermore, this isn't refcounted as a "RefBase" object.
+ *
+ * Since we pass configuration and operation as shared pointers (like
+ * Java) there is a potential risk that the caller may modify
+ * these after delivery.
+ */
+ VolumeShaper(
+ const sp<VolumeShaper::Configuration> &configuration,
+ const sp<VolumeShaper::Operation> &operation)
+ : mConfiguration(configuration) // we do not make a copy
+ , mOperation(operation) // ditto
+ , mStartFrame(-1)
+ , mLastVolume(T(1))
+ , mLastXOffset(MIN_CURVE_TIME)
+ , mDelayXOffset(MIN_CURVE_TIME) {
+ if (configuration.get() != nullptr
+ && (getFlags() & VolumeShaper::Operation::FLAG_DELAY) == 0) {
+ mLastVolume = configuration->first().second;
+ }
+ }
+
+ // We allow a null operation here, though VolumeHandler always provides one.
+ VolumeShaper::Operation::Flag getFlags() const {
+ return mOperation == nullptr
+ ? VolumeShaper::Operation::FLAG_NONE : mOperation->getFlags();
+ }
+
+ /* Returns the last volume and xoffset reported to the AudioFlinger.
+ * If the VolumeShaper has not been started, compute what the volume
+ * should be based on the initial offset specified.
+ */
+ sp<VolumeShaper::State> getState() const {
+ if (!isStarted()) {
+ const T volume = computeVolumeFromXOffset(mDelayXOffset);
+ VS_LOG("delayed VolumeShaper, using cached offset:%f for volume:%f",
+ mDelayXOffset, volume);
+ return new VolumeShaper::State(volume, mDelayXOffset);
+ } else {
+ return new VolumeShaper::State(mLastVolume, mLastXOffset);
+ }
+ }
+
+ S getDelayXOffset() const {
+ return mDelayXOffset;
+ }
+
+ void setDelayXOffset(S xOffset) {
+ mDelayXOffset = clamp(xOffset, MIN_CURVE_TIME /* lo */, MAX_CURVE_TIME /* hi */);
+ }
+
+ bool isStarted() const {
+ return mStartFrame >= 0;
+ }
+
+ /* getVolume() updates the last volume/xoffset state so it is not
+ * const, even though logically it may be viewed as const.
+ */
+ std::pair<T /* volume */, bool /* active */> getVolume(
+ int64_t trackFrameCount, double trackSampleRate) {
+ if ((getFlags() & VolumeShaper::Operation::FLAG_DELAY) != 0) {
+ // We haven't had PLAY called yet, so just return the value
+ // as if PLAY were called just now.
+ VS_LOG("delayed VolumeShaper, using cached offset %f", mDelayXOffset);
+ const T volume = computeVolumeFromXOffset(mDelayXOffset);
+ return std::make_pair(volume, false);
+ }
+ const bool clockTime = (mConfiguration->getOptionFlags()
+ & VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME) != 0;
+ const int64_t frameCount = clockTime ? getNowUs() : trackFrameCount;
+ const double sampleRate = clockTime ? 1000000 : trackSampleRate;
+
+ if (mStartFrame < 0) {
+ updatePosition(frameCount, sampleRate, mDelayXOffset);
+ mStartFrame = frameCount;
+ }
+ VS_LOG("frameCount: %lld", (long long)frameCount);
+ const S x = mXTranslate((T)frameCount);
+ VS_LOG("translation to normalized time: %f", x);
+
+ std::tuple<T /* volume */, S /* position */, bool /* active */> vt =
+ computeStateFromNormalizedTime(x);
+
+ mLastVolume = std::get<0>(vt);
+ mLastXOffset = std::get<1>(vt);
+ const bool active = std::get<2>(vt);
+ VS_LOG("rescaled time:%f volume:%f xOffset:%f active:%s",
+ x, mLastVolume, mLastXOffset, active ? "true" : "false");
+ return std::make_pair(mLastVolume, active);
+ }
+
+ std::string toString() const {
+ std::stringstream ss;
+ ss << "VolumeShaper{mStartFrame=" << mStartFrame;
+ ss << ", mXTranslate=" << mXTranslate.toString().c_str();
+ ss << ", mConfiguration=" <<
+ (mConfiguration.get() == nullptr
+ ? "nullptr" : mConfiguration->toString().c_str());
+ ss << ", mOperation=" <<
+ (mOperation.get() == nullptr
+ ? "nullptr" : mOperation->toString().c_str());
+ ss << "}";
+ return ss.str();
+ }
+
+ Translate<S> mXTranslate; // translation from frames (usec for clock time) to normalized time.
+ sp<VolumeShaper::Configuration> mConfiguration;
+ sp<VolumeShaper::Operation> mOperation;
+
+private:
+ int64_t mStartFrame; // starting frame, non-negative when started (in usec for clock time)
+ T mLastVolume; // last computed interpolated volume (y-axis)
+ S mLastXOffset; // last computed interpolated xOffset/time (x-axis)
+ S mDelayXOffset; // xOffset to use for first invocation of VolumeShaper.
+
+ // Called internally to adjust mXTranslate for first time start.
+ void updatePosition(int64_t startFrame, double sampleRate, S xOffset) {
+ double scale = (mConfiguration->last().first - mConfiguration->first().first)
+ / (mConfiguration->getDurationMs() * 0.001 * sampleRate);
+ const double minScale = 1. / static_cast<double>(INT64_MAX);
+ scale = std::max(scale, minScale);
+ VS_LOG("update position: scale %lf frameCount:%lld, sampleRate:%lf, xOffset:%f",
+ scale, (long long) startFrame, sampleRate, xOffset);
+
+ S normalizedTime = (getFlags() & VolumeShaper::Operation::FLAG_REVERSE) != 0 ?
+ MAX_CURVE_TIME - xOffset : xOffset;
+ mXTranslate.setOffset(static_cast<float>(static_cast<double>(startFrame)
+ - static_cast<double>(normalizedTime) / scale));
+ mXTranslate.setScale(static_cast<float>(scale));
+ VS_LOG("translate: %s", mXTranslate.toString().c_str());
+ }
+
+ T computeVolumeFromXOffset(S xOffset) const {
+ const T unscaledVolume = mConfiguration->findY(xOffset);
+ const T volume = mConfiguration->adjustVolume(unscaledVolume); // handle log scale
+ VS_LOG("computeVolumeFromXOffset %f -> %f -> %f", xOffset, unscaledVolume, volume);
+ return volume;
+ }
+
+ std::tuple<T /* volume */, S /* position */, bool /* active */>
+ computeStateFromNormalizedTime(S x) const {
+ bool active = true;
+ // handle reversal of position
+ if (getFlags() & VolumeShaper::Operation::FLAG_REVERSE) {
+ x = MAX_CURVE_TIME - x;
+ VS_LOG("reversing to %f", x);
+ if (x < MIN_CURVE_TIME) {
+ x = MIN_CURVE_TIME;
+ active = false; // at the end
+ } else if (x > MAX_CURVE_TIME) {
+ x = MAX_CURVE_TIME; //early
+ }
+ } else {
+ if (x < MIN_CURVE_TIME) {
+ x = MIN_CURVE_TIME; // early
+ } else if (x > MAX_CURVE_TIME) {
+ x = MAX_CURVE_TIME;
+ active = false; // at end
+ }
+ }
+ const S xOffset = x;
+ const T volume = computeVolumeFromXOffset(xOffset);
+ return std::make_tuple(volume, xOffset, active);
+ }
+}; // VolumeShaper
+
+/* VolumeHandler combines the volume factors of multiple VolumeShapers associated
+ * with a player. It is thread safe by synchronizing all public methods.
+ *
+ * This is a native-only implementation.
+ *
+ * The server side VolumeHandler is used to maintain a list of volume handlers,
+ * keep state, and obtain volume.
+ *
+ * The client side VolumeHandler is used to maintain a list of volume handlers,
+ * keep some partial state, and restore if the server dies.
+ */
+class VolumeHandler : public RefBase {
+public:
+ using S = float;
+ using T = float;
+
+ // A volume handler which just keeps track of active VolumeShapers does not need sampleRate.
+ VolumeHandler()
+ : VolumeHandler(0 /* sampleRate */) {
+ }
+
+ explicit VolumeHandler(uint32_t sampleRate)
+ : mSampleRate((double)sampleRate)
+ , mLastFrame(0)
+ , mVolumeShaperIdCounter(VolumeShaper::kSystemVolumeShapersMax)
+ , mLastVolume(1.f, false) {
+ }
+
+ VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration> &configuration,
+ const sp<VolumeShaper::Operation> &operation_in) {
+ // make a local copy of operation, as we modify it.
+ sp<VolumeShaper::Operation> operation(new VolumeShaper::Operation(operation_in));
+ VS_LOG("applyVolumeShaper:configuration: %s", configuration->toString().c_str());
+ VS_LOG("applyVolumeShaper:operation: %s", operation->toString().c_str());
+ AutoMutex _l(mLock);
+ if (configuration == nullptr) {
+ ALOGE("null configuration");
+ return VolumeShaper::Status(BAD_VALUE);
+ }
+ if (operation == nullptr) {
+ ALOGE("null operation");
+ return VolumeShaper::Status(BAD_VALUE);
+ }
+ const int32_t id = configuration->getId();
+ if (id < 0) {
+ ALOGE("negative id: %d", id);
+ return VolumeShaper::Status(BAD_VALUE);
+ }
+ VS_LOG("applyVolumeShaper id: %d", id);
+
+ switch (configuration->getType()) {
+ case VolumeShaper::Configuration::TYPE_SCALE: {
+ const int replaceId = operation->getReplaceId();
+ if (replaceId >= 0) {
+ VS_LOG("replacing %d", replaceId);
+ auto replaceIt = findId_l(replaceId);
+ if (replaceIt == mVolumeShapers.end()) {
+ ALOGW("cannot find replace id: %d", replaceId);
+ } else {
+ if ((operation->getFlags() & VolumeShaper::Operation::FLAG_JOIN) != 0) {
+ // For join, we scale the start volume of the current configuration
+ // to match the last-used volume of the replacing VolumeShaper.
+ auto state = replaceIt->getState();
+ ALOGD("join: state:%s", state->toString().c_str());
+ if (state->getXOffset() >= 0) { // valid
+ const T volume = state->getVolume();
+ ALOGD("join: scaling start volume to %f", volume);
+ configuration->scaleToStartVolume(volume);
+ }
+ }
+ (void)mVolumeShapers.erase(replaceIt);
+ }
+ operation->setReplaceId(-1);
+ }
+ // check if we have another of the same id.
+ auto oldIt = findId_l(id);
+ if (oldIt != mVolumeShapers.end()) {
+ if ((operation->getFlags()
+ & VolumeShaper::Operation::FLAG_CREATE_IF_NECESSARY) != 0) {
+ // TODO: move the case to a separate function.
+ goto HANDLE_TYPE_ID; // no need to create, take over existing id.
+ }
+ ALOGW("duplicate id, removing old %d", id);
+ (void)mVolumeShapers.erase(oldIt);
+ }
+
+ /* Check if too many application VolumeShapers (with id >= kSystemVolumeShapersMax).
+ * We check on the server side to ensure synchronization and robustness.
+ *
+ * This shouldn't fail on a replace command unless the replaced id is
+ * already invalid (which *should* be checked in the Java layer).
+ */
+ if (id >= VolumeShaper::kSystemVolumeShapersMax
+ && numberOfUserVolumeShapers_l() >= VolumeShaper::kUserVolumeShapersMax) {
+ ALOGW("Too many app VolumeShapers, cannot add to VolumeHandler");
+ return VolumeShaper::Status(INVALID_OPERATION);
+ }
+
+ // create new VolumeShaper with default behavior.
+ mVolumeShapers.emplace_back(configuration, new VolumeShaper::Operation());
+ VS_LOG("after adding, number of volumeShapers:%zu", mVolumeShapers.size());
+ }
+ // fall through to handle the operation
+ HANDLE_TYPE_ID:
+ case VolumeShaper::Configuration::TYPE_ID: {
+ VS_LOG("trying to find id: %d", id);
+ auto it = findId_l(id);
+ if (it == mVolumeShapers.end()) {
+ VS_LOG("couldn't find id: %d", id);
+ return VolumeShaper::Status(INVALID_OPERATION);
+ }
+ if ((operation->getFlags() & VolumeShaper::Operation::FLAG_TERMINATE) != 0) {
+ VS_LOG("terminate id: %d", id);
+ mVolumeShapers.erase(it);
+ break;
+ }
+ const bool clockTime = (it->mConfiguration->getOptionFlags()
+ & VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME) != 0;
+ if ((it->getFlags() & VolumeShaper::Operation::FLAG_REVERSE) !=
+ (operation->getFlags() & VolumeShaper::Operation::FLAG_REVERSE)) {
+ if (it->isStarted()) {
+ const int64_t frameCount = clockTime ? VolumeShaper::getNowUs() : mLastFrame;
+ const S x = it->mXTranslate((T)frameCount);
+ VS_LOG("reverse normalizedTime: %f", x);
+ // reflect position
+ S target = MAX_CURVE_TIME - x;
+ if (target < MIN_CURVE_TIME) {
+ VS_LOG("clamp to start - begin immediately");
+ target = MIN_CURVE_TIME;
+ }
+ VS_LOG("reverse normalizedTime target: %f", target);
+ it->mXTranslate.setOffset(it->mXTranslate.getOffset()
+ + (x - target) / it->mXTranslate.getScale());
+ }
+ // if not started, the delay offset doesn't change.
+ }
+ const S xOffset = operation->getXOffset();
+ if (!std::isnan(xOffset)) {
+ if (it->isStarted()) {
+ const int64_t frameCount = clockTime ? VolumeShaper::getNowUs() : mLastFrame;
+ const S x = it->mXTranslate((T)frameCount);
+ VS_LOG("normalizedTime translation: %f", x);
+ const S target =
+ (operation->getFlags() & VolumeShaper::Operation::FLAG_REVERSE) != 0 ?
+ MAX_CURVE_TIME - xOffset : xOffset;
+ VS_LOG("normalizedTime target x offset: %f", target);
+ it->mXTranslate.setOffset(it->mXTranslate.getOffset()
+ + (x - target) / it->mXTranslate.getScale());
+ } else {
+ it->setDelayXOffset(xOffset);
+ }
+ }
+ it->mOperation = operation; // replace the operation
+ } break;
+ }
+ return VolumeShaper::Status(id);
+ }
+
+ sp<VolumeShaper::State> getVolumeShaperState(int id) {
+ AutoMutex _l(mLock);
+ auto it = findId_l(id);
+ if (it == mVolumeShapers.end()) {
+ VS_LOG("cannot find state for id: %d", id);
+ return nullptr;
+ }
+ return it->getState();
+ }
+
+ /* getVolume() is not const, as it updates internal state.
+ * Once called, any VolumeShapers not already started begin running.
+ */
+ std::pair<T /* volume */, bool /* active */> getVolume(int64_t trackFrameCount) {
+ AutoMutex _l(mLock);
+ mLastFrame = trackFrameCount;
+ T volume(1);
+ size_t activeCount = 0;
+ for (auto it = mVolumeShapers.begin(); it != mVolumeShapers.end();) {
+ const std::pair<T, bool> shaperVolume =
+ it->getVolume(trackFrameCount, mSampleRate);
+ volume *= shaperVolume.first;
+ activeCount += shaperVolume.second;
+ ++it;
+ }
+ mLastVolume = std::make_pair(volume, activeCount != 0);
+ VS_LOG("getVolume: <%f, %s>", mLastVolume.first, mLastVolume.second ? "true" : "false");
+ return mLastVolume;
+ }
+
+ /* Used by a client side VolumeHandler to ensure all the VolumeShapers
+ * indicate that they have been started. Upon a change in audioserver
+ * output sink, this information is used for restoration of the server side
+ * VolumeHandler.
+ */
+ void setStarted() {
+ (void)getVolume(mLastFrame); // getVolume() will start the individual VolumeShapers.
+ }
+
+ std::pair<T /* volume */, bool /* active */> getLastVolume() const {
+ AutoMutex _l(mLock);
+ return mLastVolume;
+ }
+
+ std::string toString() const {
+ AutoMutex _l(mLock);
+ std::stringstream ss;
+ ss << "VolumeHandler{mSampleRate=" << mSampleRate;
+ ss << ", mLastFrame=" << mLastFrame;
+ ss << ", mVolumeShapers={";
+ bool first = true;
+ for (const auto &shaper : mVolumeShapers) {
+ if (first) {
+ first = false;
+ } else {
+ ss << ", ";
+ }
+ ss << shaper.toString().c_str();
+ }
+ ss << "}}";
+ return ss.str();
+ }
+
+ void forall(const std::function<VolumeShaper::Status (const VolumeShaper &)> &lambda) {
+ AutoMutex _l(mLock);
+ VS_LOG("forall: mVolumeShapers.size() %zu", mVolumeShapers.size());
+ for (const auto &shaper : mVolumeShapers) {
+ VolumeShaper::Status status = lambda(shaper);
+ VS_LOG("forall applying lambda on shaper (%p): %d", &shaper, (int)status);
+ }
+ }
+
+ void reset() {
+ AutoMutex _l(mLock);
+ mVolumeShapers.clear();
+ mLastFrame = 0;
+ // keep mVolumeShaperIdCounter as is.
+ }
+
+ /* Sets the configuration id if necessary - This is based on the counter
+ * internal to the VolumeHandler.
+ */
+ void setIdIfNecessary(const sp<VolumeShaper::Configuration> &configuration) {
+ if (configuration->getType() == VolumeShaper::Configuration::TYPE_SCALE) {
+ const int id = configuration->getId();
+ if (id == -1) {
+ // Reassign to a unique id, skipping system ids.
+ AutoMutex _l(mLock);
+ while (true) {
+ if (mVolumeShaperIdCounter == INT32_MAX) {
+ mVolumeShaperIdCounter = VolumeShaper::kSystemVolumeShapersMax;
+ } else {
+ ++mVolumeShaperIdCounter;
+ }
+ if (findId_l(mVolumeShaperIdCounter) != mVolumeShapers.end()) {
+ continue; // collision with an existing id.
+ }
+ configuration->setId(mVolumeShaperIdCounter);
+ ALOGD("setting id to %d", mVolumeShaperIdCounter);
+ break;
+ }
+ }
+ }
+ }
+
+private:
+ std::list<VolumeShaper>::iterator findId_l(int32_t id) {
+ std::list<VolumeShaper>::iterator it = mVolumeShapers.begin();
+ for (; it != mVolumeShapers.end(); ++it) {
+ if (it->mConfiguration->getId() == id) {
+ break;
+ }
+ }
+ return it;
+ }
+
+ size_t numberOfUserVolumeShapers_l() const {
+ size_t count = 0;
+ for (const auto &shaper : mVolumeShapers) {
+ count += (shaper.mConfiguration->getId() >= VolumeShaper::kSystemVolumeShapersMax);
+ }
+ return count;
+ }
+
+ mutable Mutex mLock;
+ double mSampleRate; // in samples (frames) per second
+ int64_t mLastFrame; // logging purpose only, 0 on start
+ int32_t mVolumeShaperIdCounter; // a counter to return a unique volume shaper id.
+ std::pair<T /* volume */, bool /* active */> mLastVolume;
+ std::list<VolumeShaper> mVolumeShapers; // list provides stable iterators on erase
+}; // VolumeHandler
+
+} // namespace android
+
+#pragma pop_macro("LOG_TAG")
+
+#endif // ANDROID_VOLUME_SHAPER_H
diff --git a/include/media/audiohal b/include/media/audiohal
new file mode 120000
index 0000000..37e2c39
--- /dev/null
+++ b/include/media/audiohal
@@ -0,0 +1 @@
+../../media/libaudiohal/include
\ No newline at end of file
diff --git a/include/media/convert.h b/include/media/convert.h
new file mode 120000
index 0000000..cb0d00d
--- /dev/null
+++ b/include/media/convert.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/convert.h
\ No newline at end of file
diff --git a/include/media/mediametadataretriever.h b/include/media/mediametadataretriever.h
new file mode 120000
index 0000000..b401bab
--- /dev/null
+++ b/include/media/mediametadataretriever.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/mediametadataretriever.h
\ No newline at end of file
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
deleted file mode 100644
index 389ec01..0000000
--- a/include/media/mediaplayer.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIAPLAYER_H
-#define ANDROID_MEDIAPLAYER_H
-
-#include <arpa/inet.h>
-
-#include <binder/IMemory.h>
-
-#include <media/AudioResamplerPublic.h>
-#include <media/IMediaPlayerClient.h>
-#include <media/IMediaPlayer.h>
-#include <media/IMediaDeathNotifier.h>
-#include <media/IStreamSource.h>
-
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-
-struct ANativeWindow;
-
-namespace android {
-
-struct AVSyncSettings;
-class IGraphicBufferProducer;
-class Surface;
-
-enum media_event_type {
- MEDIA_NOP = 0, // interface test message
- MEDIA_PREPARED = 1,
- MEDIA_PLAYBACK_COMPLETE = 2,
- MEDIA_BUFFERING_UPDATE = 3,
- MEDIA_SEEK_COMPLETE = 4,
- MEDIA_SET_VIDEO_SIZE = 5,
- MEDIA_STARTED = 6,
- MEDIA_PAUSED = 7,
- MEDIA_STOPPED = 8,
- MEDIA_SKIPPED = 9,
- MEDIA_TIMED_TEXT = 99,
- MEDIA_ERROR = 100,
- MEDIA_INFO = 200,
- MEDIA_SUBTITLE_DATA = 201,
- MEDIA_META_DATA = 202,
-};
-
-// Generic error codes for the media player framework. Errors are fatal, the
-// playback must abort.
-//
-// Errors are communicated back to the client using the
-// MediaPlayerListener::notify method defined below.
-// In this situation, 'notify' is invoked with the following:
-// 'msg' is set to MEDIA_ERROR.
-// 'ext1' should be a value from the enum media_error_type.
-// 'ext2' contains an implementation dependant error code to provide
-// more details. Should default to 0 when not used.
-//
-// The codes are distributed as follow:
-// 0xx: Reserved
-// 1xx: Android Player errors. Something went wrong inside the MediaPlayer.
-// 2xx: Media errors (e.g Codec not supported). There is a problem with the
-// media itself.
-// 3xx: Runtime errors. Some extraordinary condition arose making the playback
-// impossible.
-//
-enum media_error_type {
- // 0xx
- MEDIA_ERROR_UNKNOWN = 1,
- // 1xx
- MEDIA_ERROR_SERVER_DIED = 100,
- // 2xx
- MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK = 200,
- // 3xx
-};
-
-
-// Info and warning codes for the media player framework. These are non fatal,
-// the playback is going on but there might be some user visible issues.
-//
-// Info and warning messages are communicated back to the client using the
-// MediaPlayerListener::notify method defined below. In this situation,
-// 'notify' is invoked with the following:
-// 'msg' is set to MEDIA_INFO.
-// 'ext1' should be a value from the enum media_info_type.
-// 'ext2' contains an implementation dependant info code to provide
-// more details. Should default to 0 when not used.
-//
-// The codes are distributed as follow:
-// 0xx: Reserved
-// 7xx: Android Player info/warning (e.g player lagging behind.)
-// 8xx: Media info/warning (e.g media badly interleaved.)
-//
-enum media_info_type {
- // 0xx
- MEDIA_INFO_UNKNOWN = 1,
- // The player was started because it was used as the next player for another
- // player, which just completed playback
- MEDIA_INFO_STARTED_AS_NEXT = 2,
- // The player just pushed the very first video frame for rendering
- MEDIA_INFO_RENDERING_START = 3,
- // 7xx
- // The video is too complex for the decoder: it can't decode frames fast
- // enough. Possibly only the audio plays fine at this stage.
- MEDIA_INFO_VIDEO_TRACK_LAGGING = 700,
- // MediaPlayer is temporarily pausing playback internally in order to
- // buffer more data.
- MEDIA_INFO_BUFFERING_START = 701,
- // MediaPlayer is resuming playback after filling buffers.
- MEDIA_INFO_BUFFERING_END = 702,
- // Bandwidth in recent past
- MEDIA_INFO_NETWORK_BANDWIDTH = 703,
-
- // 8xx
- // Bad interleaving means that a media has been improperly interleaved or not
- // interleaved at all, e.g has all the video samples first then all the audio
- // ones. Video is playing but a lot of disk seek may be happening.
- MEDIA_INFO_BAD_INTERLEAVING = 800,
- // The media is not seekable (e.g live stream).
- MEDIA_INFO_NOT_SEEKABLE = 801,
- // New media metadata is available.
- MEDIA_INFO_METADATA_UPDATE = 802,
-
- //9xx
- MEDIA_INFO_TIMED_TEXT_ERROR = 900,
-};
-
-
-
-enum media_player_states {
- MEDIA_PLAYER_STATE_ERROR = 0,
- MEDIA_PLAYER_IDLE = 1 << 0,
- MEDIA_PLAYER_INITIALIZED = 1 << 1,
- MEDIA_PLAYER_PREPARING = 1 << 2,
- MEDIA_PLAYER_PREPARED = 1 << 3,
- MEDIA_PLAYER_STARTED = 1 << 4,
- MEDIA_PLAYER_PAUSED = 1 << 5,
- MEDIA_PLAYER_STOPPED = 1 << 6,
- MEDIA_PLAYER_PLAYBACK_COMPLETE = 1 << 7
-};
-
-// Keep KEY_PARAMETER_* in sync with MediaPlayer.java.
-// The same enum space is used for both set and get, in case there are future keys that
-// can be both set and get. But as of now, all parameters are either set only or get only.
-enum media_parameter_keys {
- // Streaming/buffering parameters
- KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS = 1100, // set only
-
- // Return a Parcel containing a single int, which is the channel count of the
- // audio track, or zero for error (e.g. no audio track) or unknown.
- KEY_PARAMETER_AUDIO_CHANNEL_COUNT = 1200, // get only
-
- // Playback rate expressed in permille (1000 is normal speed), saved as int32_t, with negative
- // values used for rewinding or reverse playback.
- KEY_PARAMETER_PLAYBACK_RATE_PERMILLE = 1300, // set only
-
- // Set a Parcel containing the value of a parcelled Java AudioAttribute instance
- KEY_PARAMETER_AUDIO_ATTRIBUTES = 1400 // set only
-};
-
-// Keep INVOKE_ID_* in sync with MediaPlayer.java.
-enum media_player_invoke_ids {
- INVOKE_ID_GET_TRACK_INFO = 1,
- INVOKE_ID_ADD_EXTERNAL_SOURCE = 2,
- INVOKE_ID_ADD_EXTERNAL_SOURCE_FD = 3,
- INVOKE_ID_SELECT_TRACK = 4,
- INVOKE_ID_UNSELECT_TRACK = 5,
- INVOKE_ID_SET_VIDEO_SCALING_MODE = 6,
- INVOKE_ID_GET_SELECTED_TRACK = 7
-};
-
-// Keep MEDIA_TRACK_TYPE_* in sync with MediaPlayer.java.
-enum media_track_type {
- MEDIA_TRACK_TYPE_UNKNOWN = 0,
- MEDIA_TRACK_TYPE_VIDEO = 1,
- MEDIA_TRACK_TYPE_AUDIO = 2,
- MEDIA_TRACK_TYPE_TIMEDTEXT = 3,
- MEDIA_TRACK_TYPE_SUBTITLE = 4,
- MEDIA_TRACK_TYPE_METADATA = 5,
-};
-
-// ----------------------------------------------------------------------------
-// ref-counted object for callbacks
-class MediaPlayerListener: virtual public RefBase
-{
-public:
- virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) = 0;
-};
-
-struct IMediaHTTPService;
-
-class MediaPlayer : public BnMediaPlayerClient,
- public virtual IMediaDeathNotifier
-{
-public:
- MediaPlayer();
- ~MediaPlayer();
- void died();
- void disconnect();
-
- status_t setDataSource(
- const sp<IMediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8> *headers);
-
- status_t setDataSource(int fd, int64_t offset, int64_t length);
- status_t setDataSource(const sp<IDataSource> &source);
- status_t setVideoSurfaceTexture(
- const sp<IGraphicBufferProducer>& bufferProducer);
- status_t setListener(const sp<MediaPlayerListener>& listener);
- status_t prepare();
- status_t prepareAsync();
- status_t start();
- status_t stop();
- status_t pause();
- bool isPlaying();
- status_t setPlaybackSettings(const AudioPlaybackRate& rate);
- status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
- status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
- status_t getSyncSettings(
- AVSyncSettings* sync /* nonnull */,
- float* videoFps /* nonnull */);
- status_t getVideoWidth(int *w);
- status_t getVideoHeight(int *h);
- status_t seekTo(int msec);
- status_t getCurrentPosition(int *msec);
- status_t getDuration(int *msec);
- status_t reset();
- status_t setAudioStreamType(audio_stream_type_t type);
- status_t getAudioStreamType(audio_stream_type_t *type);
- status_t setLooping(int loop);
- bool isLooping();
- status_t setVolume(float leftVolume, float rightVolume);
- void notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
- status_t invoke(const Parcel& request, Parcel *reply);
- status_t setMetadataFilter(const Parcel& filter);
- status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
- status_t setAudioSessionId(audio_session_t sessionId);
- audio_session_t getAudioSessionId();
- status_t setAuxEffectSendLevel(float level);
- status_t attachAuxEffect(int effectId);
- status_t setParameter(int key, const Parcel& request);
- status_t getParameter(int key, Parcel* reply);
- status_t setRetransmitEndpoint(const char* addrString, uint16_t port);
- status_t setNextMediaPlayer(const sp<MediaPlayer>& player);
-
-private:
- void clear_l();
- status_t seekTo_l(int msec);
- status_t prepareAsync_l();
- status_t getDuration_l(int *msec);
- status_t attachNewPlayer(const sp<IMediaPlayer>& player);
- status_t reset_l();
- status_t doSetRetransmitEndpoint(const sp<IMediaPlayer>& player);
- status_t checkStateForKeySet_l(int key);
-
- sp<IMediaPlayer> mPlayer;
- thread_id_t mLockThreadId;
- Mutex mLock;
- Mutex mNotifyLock;
- Condition mSignal;
- sp<MediaPlayerListener> mListener;
- void* mCookie;
- media_player_states mCurrentState;
- int mCurrentPosition;
- int mSeekPosition;
- bool mPrepareSync;
- status_t mPrepareStatus;
- audio_stream_type_t mStreamType;
- Parcel* mAudioAttributesParcel;
- bool mLoop;
- float mLeftVolume;
- float mRightVolume;
- int mVideoWidth;
- int mVideoHeight;
- audio_session_t mAudioSessionId;
- float mSendLevel;
- struct sockaddr_in mRetransmitEndpoint;
- bool mRetransmitEndpointValid;
-};
-
-}; // namespace android
-
-#endif // ANDROID_MEDIAPLAYER_H
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
new file mode 120000
index 0000000..06d537b
--- /dev/null
+++ b/include/media/mediaplayer.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/mediaplayer.h
\ No newline at end of file
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
deleted file mode 100644
index c3f39a2..0000000
--- a/include/media/mediarecorder.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- ** Copyright (C) 2008 The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- **
- ** limitations under the License.
- */
-
-#ifndef ANDROID_MEDIARECORDER_H
-#define ANDROID_MEDIARECORDER_H
-
-#include <utils/Log.h>
-#include <utils/threads.h>
-#include <utils/List.h>
-#include <utils/Errors.h>
-#include <media/IMediaRecorderClient.h>
-#include <media/IMediaDeathNotifier.h>
-
-namespace android {
-
-class Surface;
-class IMediaRecorder;
-class ICameraRecordingProxy;
-class IGraphicBufferProducer;
-struct PersistentSurface;
-class Surface;
-
-namespace hardware {
-class ICamera;
-}
-
-typedef void (*media_completion_f)(status_t status, void *cookie);
-
-enum video_source {
- VIDEO_SOURCE_DEFAULT = 0,
- VIDEO_SOURCE_CAMERA = 1,
- VIDEO_SOURCE_SURFACE = 2,
-
- VIDEO_SOURCE_LIST_END // must be last - used to validate audio source type
-};
-
-//Please update media/java/android/media/MediaRecorder.java if the following is updated.
-enum output_format {
- OUTPUT_FORMAT_DEFAULT = 0,
- OUTPUT_FORMAT_THREE_GPP = 1,
- OUTPUT_FORMAT_MPEG_4 = 2,
-
-
- OUTPUT_FORMAT_AUDIO_ONLY_START = 3, // Used in validating the output format. Should be the
- // at the start of the audio only output formats.
-
- /* These are audio only file formats */
- OUTPUT_FORMAT_RAW_AMR = 3, //to be backward compatible
- OUTPUT_FORMAT_AMR_NB = 3,
- OUTPUT_FORMAT_AMR_WB = 4,
- OUTPUT_FORMAT_AAC_ADIF = 5,
- OUTPUT_FORMAT_AAC_ADTS = 6,
-
- OUTPUT_FORMAT_AUDIO_ONLY_END = 7, // Used in validating the output format. Should be the
- // at the end of the audio only output formats.
-
- /* Stream over a socket, limited to a single stream */
- OUTPUT_FORMAT_RTP_AVP = 7,
-
- /* H.264/AAC data encapsulated in MPEG2/TS */
- OUTPUT_FORMAT_MPEG2TS = 8,
-
- /* VP8/VORBIS data in a WEBM container */
- OUTPUT_FORMAT_WEBM = 9,
-
- OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
-};
-
-enum audio_encoder {
- AUDIO_ENCODER_DEFAULT = 0,
- AUDIO_ENCODER_AMR_NB = 1,
- AUDIO_ENCODER_AMR_WB = 2,
- AUDIO_ENCODER_AAC = 3,
- AUDIO_ENCODER_HE_AAC = 4,
- AUDIO_ENCODER_AAC_ELD = 5,
- AUDIO_ENCODER_VORBIS = 6,
-
- AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
-};
-
-enum video_encoder {
- VIDEO_ENCODER_DEFAULT = 0,
- VIDEO_ENCODER_H263 = 1,
- VIDEO_ENCODER_H264 = 2,
- VIDEO_ENCODER_MPEG_4_SP = 3,
- VIDEO_ENCODER_VP8 = 4,
- VIDEO_ENCODER_HEVC = 5,
-
- VIDEO_ENCODER_LIST_END // must be the last - used to validate the video encoder type
-};
-
-/*
- * The state machine of the media_recorder.
- */
-enum media_recorder_states {
- // Error state.
- MEDIA_RECORDER_ERROR = 0,
-
- // Recorder was just created.
- MEDIA_RECORDER_IDLE = 1 << 0,
-
- // Recorder has been initialized.
- MEDIA_RECORDER_INITIALIZED = 1 << 1,
-
- // Configuration of the recorder has been completed.
- MEDIA_RECORDER_DATASOURCE_CONFIGURED = 1 << 2,
-
- // Recorder is ready to start.
- MEDIA_RECORDER_PREPARED = 1 << 3,
-
- // Recording is in progress.
- MEDIA_RECORDER_RECORDING = 1 << 4,
-};
-
-// The "msg" code passed to the listener in notify.
-enum media_recorder_event_type {
- MEDIA_RECORDER_EVENT_LIST_START = 1,
- MEDIA_RECORDER_EVENT_ERROR = 1,
- MEDIA_RECORDER_EVENT_INFO = 2,
- MEDIA_RECORDER_EVENT_LIST_END = 99,
-
- // Track related event types
- MEDIA_RECORDER_TRACK_EVENT_LIST_START = 100,
- MEDIA_RECORDER_TRACK_EVENT_ERROR = 100,
- MEDIA_RECORDER_TRACK_EVENT_INFO = 101,
- MEDIA_RECORDER_TRACK_EVENT_LIST_END = 1000,
-};
-
-/*
- * The (part of) "what" code passed to the listener in notify.
- * When the error or info type is track specific, the what has
- * the following layout:
- * the left-most 16-bit is meant for error or info type.
- * the right-most 4-bit is meant for track id.
- * the rest is reserved.
- *
- * | track id | reserved | error or info type |
- * 31 28 16 0
- *
- */
-enum media_recorder_error_type {
- MEDIA_RECORDER_ERROR_UNKNOWN = 1,
-
- // Track related error type
- MEDIA_RECORDER_TRACK_ERROR_LIST_START = 100,
- MEDIA_RECORDER_TRACK_ERROR_GENERAL = 100,
- MEDIA_RECORDER_ERROR_VIDEO_NO_SYNC_FRAME = 200,
- MEDIA_RECORDER_TRACK_ERROR_LIST_END = 1000,
-};
-
-// The codes are distributed as follow:
-// 0xx: Reserved
-// 8xx: General info/warning
-//
-enum media_recorder_info_type {
- MEDIA_RECORDER_INFO_UNKNOWN = 1,
-
- MEDIA_RECORDER_INFO_MAX_DURATION_REACHED = 800,
- MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED = 801,
-
- // All track related informtional events start here
- MEDIA_RECORDER_TRACK_INFO_LIST_START = 1000,
- MEDIA_RECORDER_TRACK_INFO_COMPLETION_STATUS = 1000,
- MEDIA_RECORDER_TRACK_INFO_PROGRESS_IN_TIME = 1001,
- MEDIA_RECORDER_TRACK_INFO_TYPE = 1002,
- MEDIA_RECORDER_TRACK_INFO_DURATION_MS = 1003,
-
- // The time to measure the max chunk duration
- MEDIA_RECORDER_TRACK_INFO_MAX_CHUNK_DUR_MS = 1004,
-
- MEDIA_RECORDER_TRACK_INFO_ENCODED_FRAMES = 1005,
-
- // The time to measure how well the audio and video
- // track data is interleaved.
- MEDIA_RECORDER_TRACK_INTER_CHUNK_TIME_MS = 1006,
-
- // The time to measure system response. Note that
- // the delay does not include the intentional delay
- // we use to eliminate the recording sound.
- MEDIA_RECORDER_TRACK_INFO_INITIAL_DELAY_MS = 1007,
-
- // The time used to compensate for initial A/V sync.
- MEDIA_RECORDER_TRACK_INFO_START_OFFSET_MS = 1008,
-
- // Total number of bytes of the media data.
- MEDIA_RECORDER_TRACK_INFO_DATA_KBYTES = 1009,
-
- MEDIA_RECORDER_TRACK_INFO_LIST_END = 2000,
-};
-
-// ----------------------------------------------------------------------------
-// ref-counted object for callbacks
-class MediaRecorderListener: virtual public RefBase
-{
-public:
- virtual void notify(int msg, int ext1, int ext2) = 0;
-};
-
-class MediaRecorder : public BnMediaRecorderClient,
- public virtual IMediaDeathNotifier
-{
-public:
- MediaRecorder(const String16& opPackageName);
- ~MediaRecorder();
-
- void died();
- status_t initCheck();
- status_t setCamera(const sp<hardware::ICamera>& camera,
- const sp<ICameraRecordingProxy>& proxy);
- status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
- status_t setVideoSource(int vs);
- status_t setAudioSource(int as);
- status_t setOutputFormat(int of);
- status_t setVideoEncoder(int ve);
- status_t setAudioEncoder(int ae);
- status_t setOutputFile(int fd, int64_t offset, int64_t length);
- status_t setVideoSize(int width, int height);
- status_t setVideoFrameRate(int frames_per_second);
- status_t setParameters(const String8& params);
- status_t setListener(const sp<MediaRecorderListener>& listener);
- status_t setClientName(const String16& clientName);
- status_t prepare();
- status_t getMaxAmplitude(int* max);
- status_t start();
- status_t stop();
- status_t reset();
- status_t pause();
- status_t resume();
- status_t init();
- status_t close();
- status_t release();
- void notify(int msg, int ext1, int ext2);
- status_t setInputSurface(const sp<PersistentSurface>& surface);
- sp<IGraphicBufferProducer> querySurfaceMediaSourceFromMediaServer();
-
-private:
- void doCleanUp();
- status_t doReset();
-
- sp<IMediaRecorder> mMediaRecorder;
- sp<MediaRecorderListener> mListener;
-
- // Reference to IGraphicBufferProducer
- // for encoding GL Frames. That is useful only when the
- // video source is set to VIDEO_SOURCE_GRALLOC_BUFFER
- sp<IGraphicBufferProducer> mSurfaceMediaSource;
-
- media_recorder_states mCurrentState;
- bool mIsAudioSourceSet;
- bool mIsVideoSourceSet;
- bool mIsAudioEncoderSet;
- bool mIsVideoEncoderSet;
- bool mIsOutputFileSet;
- Mutex mLock;
- Mutex mNotifyLock;
-};
-
-}; // namespace android
-
-#endif // ANDROID_MEDIARECORDER_H
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
new file mode 120000
index 0000000..a24deb3
--- /dev/null
+++ b/include/media/mediarecorder.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/mediarecorder.h
\ No newline at end of file
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
new file mode 120000
index 0000000..91479e0
--- /dev/null
+++ b/include/media/mediascanner.h
@@ -0,0 +1 @@
+../../media/libmedia/include/media/mediascanner.h
\ No newline at end of file
diff --git a/include/media/nbaio b/include/media/nbaio
new file mode 120000
index 0000000..67d0ba6
--- /dev/null
+++ b/include/media/nbaio
@@ -0,0 +1 @@
+../../media/libnbaio/include
\ No newline at end of file
diff --git a/include/media/nbaio/AudioStreamInSource.h b/include/media/nbaio/AudioStreamInSource.h
deleted file mode 100644
index a6e7992..0000000
--- a/include/media/nbaio/AudioStreamInSource.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_STREAM_IN_SOURCE_H
-#define ANDROID_AUDIO_STREAM_IN_SOURCE_H
-
-#include <hardware/audio.h>
-#include "NBAIO.h"
-
-namespace android {
-
-// not multi-thread safe
-class AudioStreamInSource : public NBAIO_Source {
-
-public:
- AudioStreamInSource(audio_stream_in *stream);
- virtual ~AudioStreamInSource();
-
- // NBAIO_Port interface
-
- virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
- NBAIO_Format counterOffers[], size_t& numCounterOffers);
- //virtual NBAIO_Format format() const;
-
- // NBAIO_Sink interface
-
- //virtual size_t framesRead() const;
- virtual int64_t framesOverrun();
- virtual int64_t overruns() { (void) framesOverrun(); return mOverruns; }
-
- // This is an over-estimate, and could dupe the caller into making a blocking read()
- // FIXME Use an audio HAL API to query the buffer filling status when it's available.
- virtual ssize_t availableToRead() { return mStreamBufferSizeBytes / mFrameSize; }
-
- virtual ssize_t read(void *buffer, size_t count);
-
- // NBAIO_Sink end
-
-#if 0 // until necessary
- audio_stream_in *stream() const { return mStream; }
-#endif
-
-private:
- audio_stream_in * const mStream;
- size_t mStreamBufferSizeBytes; // as reported by get_buffer_size()
- int64_t mFramesOverrun;
- int64_t mOverruns;
-};
-
-} // namespace android
-
-#endif // ANDROID_AUDIO_STREAM_IN_SOURCE_H
diff --git a/include/media/nbaio/AudioStreamOutSink.h b/include/media/nbaio/AudioStreamOutSink.h
deleted file mode 100644
index e86b018..0000000
--- a/include/media/nbaio/AudioStreamOutSink.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_STREAM_OUT_SINK_H
-#define ANDROID_AUDIO_STREAM_OUT_SINK_H
-
-#include <hardware/audio.h>
-#include "NBAIO.h"
-
-namespace android {
-
-// not multi-thread safe
-class AudioStreamOutSink : public NBAIO_Sink {
-
-public:
- AudioStreamOutSink(audio_stream_out *stream);
- virtual ~AudioStreamOutSink();
-
- // NBAIO_Port interface
-
- virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
- NBAIO_Format counterOffers[], size_t& numCounterOffers);
- //virtual NBAIO_Format format();
-
- // NBAIO_Sink interface
-
- //virtual size_t framesWritten() const;
- //virtual size_t framesUnderrun() const;
- //virtual size_t underruns() const;
-
- // This is an over-estimate, and could dupe the caller into making a blocking write()
- // FIXME Use an audio HAL API to query the buffer emptying status when it's available.
- virtual ssize_t availableToWrite() const { return mStreamBufferSizeBytes / mFrameSize; }
-
- virtual ssize_t write(const void *buffer, size_t count);
-
- virtual status_t getTimestamp(ExtendedTimestamp ×tamp);
-
- // NBAIO_Sink end
-
-#if 0 // until necessary
- audio_stream_out *stream() const { return mStream; }
-#endif
-
-private:
- audio_stream_out * const mStream;
- size_t mStreamBufferSizeBytes; // as reported by get_buffer_size()
-};
-
-} // namespace android
-
-#endif // ANDROID_AUDIO_STREAM_OUT_SINK_H
diff --git a/include/media/nbaio/LibsndfileSink.h b/include/media/nbaio/LibsndfileSink.h
deleted file mode 100644
index f5d53d5..0000000
--- a/include/media/nbaio/LibsndfileSink.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_LIBSNDFILE_SINK_H
-#define ANDROID_AUDIO_LIBSNDFILE_SINK_H
-
-#include "NBAIO.h"
-#include "sndfile.h"
-
-// Implementation of NBAIO_Sink that wraps a libsndfile opened in SFM_WRITE mode
-
-namespace android {
-
-class LibsndfileSink : public NBAIO_Sink {
-
-public:
- LibsndfileSink(SNDFILE *sndfile, const SF_INFO &sfinfo);
- virtual ~LibsndfileSink();
-
- // NBAIO_Port interface
-
- //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
- // NBAIO_Format counterOffers[], size_t& numCounterOffers);
- //virtual NBAIO_Format format() const;
-
- // NBAIO_Sink interface
-
- //virtual size_t framesWritten() const;
- //virtual size_t framesUnderrun() const;
- //virtual size_t underruns() const;
- //virtual ssize_t availableToWrite() const;
- virtual ssize_t write(const void *buffer, size_t count);
- //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
-
-private:
- SNDFILE * mSndfile;
-};
-
-} // namespace android
-
-#endif // ANDROID_AUDIO_LIBSNDFILE_SINK_H
diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h
deleted file mode 100644
index d2cd218..0000000
--- a/include/media/nbaio/MonoPipe.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_MONO_PIPE_H
-#define ANDROID_AUDIO_MONO_PIPE_H
-
-#include <time.h>
-#include "NBAIO.h"
-#include <media/SingleStateQueue.h>
-
-namespace android {
-
-typedef SingleStateQueue<ExtendedTimestamp> ExtendedTimestampSingleStateQueue;
-
-// MonoPipe is similar to Pipe except:
-// - supports only a single reader, called MonoPipeReader
-// - write() cannot overrun; instead it will return a short actual count if insufficient space
-// - write() can optionally block if the pipe is full
-// Like Pipe, it is not multi-thread safe for either writer or reader
-// but writer and reader can be different threads.
-class MonoPipe : public NBAIO_Sink {
-
- friend class MonoPipeReader;
-
-public:
- // reqFrames will be rounded up to a power of 2, and all slots are available. Must be >= 2.
- // Note: whatever shares this object with another thread needs to do so in an SMP-safe way (like
- // creating it the object before creating the other thread, or storing the object with a
- // release_store). Otherwise the other thread could see a partially-constructed object.
- MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock = false);
- virtual ~MonoPipe();
-
- // NBAIO_Port interface
-
- //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
- // NBAIO_Format counterOffers[], size_t& numCounterOffers);
- //virtual NBAIO_Format format() const;
-
- // NBAIO_Sink interface
-
- //virtual int64_t framesWritten() const;
- //virtual int64_t framesUnderrun() const;
- //virtual int64_t underruns() const;
-
- virtual ssize_t availableToWrite() const;
- virtual ssize_t write(const void *buffer, size_t count);
- //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
-
- // average number of frames present in the pipe under normal conditions.
- // See throttling mechanism in MonoPipe::write()
- size_t getAvgFrames() const { return mSetpoint; }
- void setAvgFrames(size_t setpoint);
- size_t maxFrames() const { return mMaxFrames; }
-
- // Set the shutdown state for the write side of a pipe.
- // This may be called by an unrelated thread. When shutdown state is 'true',
- // a write that would otherwise block instead returns a short transfer count.
- // There is no guarantee how long it will take for the shutdown to be recognized,
- // but it will not be an unbounded amount of time.
- // The state can be restored to normal by calling shutdown(false).
- void shutdown(bool newState = true);
-
- // Return true if the write side of a pipe is currently shutdown.
- bool isShutdown();
-
- // Return NO_ERROR if there is a timestamp available
- status_t getTimestamp(ExtendedTimestamp ×tamp);
-
-private:
- const size_t mReqFrames; // as requested in constructor, unrounded
- const size_t mMaxFrames; // always a power of 2
- void * const mBuffer;
- // mFront and mRear will never be separated by more than mMaxFrames.
- // 32-bit overflow is possible if the pipe is active for a long time, but if that happens it's
- // safe because we "&" with (mMaxFrames-1) at end of computations to calculate a buffer index.
- volatile int32_t mFront; // written by reader with android_atomic_release_store,
- // read by writer with android_atomic_acquire_load
- volatile int32_t mRear; // written by writer with android_atomic_release_store,
- // read by reader with android_atomic_acquire_load
- bool mWriteTsValid; // whether mWriteTs is valid
- struct timespec mWriteTs; // time that the previous write() completed
- size_t mSetpoint; // target value for pipe fill depth
- const bool mWriteCanBlock; // whether write() should block if the pipe is full
-
- bool mIsShutdown; // whether shutdown(true) was called, no barriers are needed
-
- ExtendedTimestampSingleStateQueue::Shared mTimestampShared;
- ExtendedTimestampSingleStateQueue::Mutator mTimestampMutator;
- ExtendedTimestampSingleStateQueue::Observer mTimestampObserver;
-};
-
-} // namespace android
-
-#endif // ANDROID_AUDIO_MONO_PIPE_H
diff --git a/include/media/nbaio/MonoPipeReader.h b/include/media/nbaio/MonoPipeReader.h
deleted file mode 100644
index b3c891d..0000000
--- a/include/media/nbaio/MonoPipeReader.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_MONO_PIPE_READER_H
-#define ANDROID_AUDIO_MONO_PIPE_READER_H
-
-#include "MonoPipe.h"
-
-namespace android {
-
-// MonoPipeReader is safe for only a single reader thread
-class MonoPipeReader : public NBAIO_Source {
-
-public:
-
- // Construct a MonoPipeReader and associate it with a MonoPipe;
- // any data already in the pipe is visible to this PipeReader.
- // There can be only a single MonoPipeReader per MonoPipe.
- // FIXME make this constructor a factory method of MonoPipe.
- MonoPipeReader(MonoPipe* pipe);
- virtual ~MonoPipeReader();
-
- // NBAIO_Port interface
-
- //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
- // NBAIO_Format counterOffers[], size_t& numCounterOffers);
- //virtual NBAIO_Format format() const;
-
- // NBAIO_Source interface
-
- //virtual size_t framesRead() const;
- //virtual size_t framesOverrun();
- //virtual size_t overruns();
-
- virtual ssize_t availableToRead();
-
- virtual ssize_t read(void *buffer, size_t count);
-
- virtual void onTimestamp(const ExtendedTimestamp ×tamp);
-
- // NBAIO_Source end
-
-#if 0 // until necessary
- MonoPipe* pipe() const { return mPipe; }
-#endif
-
-private:
- MonoPipe * const mPipe;
-};
-
-} // namespace android
-
-#endif // ANDROID_AUDIO_MONO_PIPE_READER_H
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
deleted file mode 100644
index 212f8e8..0000000
--- a/include/media/nbaio/NBAIO.h
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_NBAIO_H
-#define ANDROID_AUDIO_NBAIO_H
-
-// Non-blocking audio I/O interface
-//
-// This header file has the abstract interfaces only. Concrete implementation classes are declared
-// elsewhere. Implementations _should_ be non-blocking for all methods, especially read() and
-// write(), but this is not enforced. In general, implementations do not need to be multi-thread
-// safe, and any exceptions are noted in the particular implementation.
-
-#include <limits.h>
-#include <stdlib.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-#include <media/AudioTimestamp.h>
-#include <system/audio.h>
-
-namespace android {
-
-// In addition to the usual status_t
-enum {
- NEGOTIATE = 0x80000010, // Must (re-)negotiate format. For negotiate() only, the offeree
- // doesn't accept offers, and proposes counter-offers
- OVERRUN = 0x80000011, // availableToRead(), read(), or readVia() detected lost input due
- // to overrun; an event is counted and the caller should re-try
- UNDERRUN = 0x80000012, // availableToWrite(), write(), or writeVia() detected a gap in
- // output due to underrun (not being called often enough, or with
- // enough data); an event is counted and the caller should re-try
-};
-
-// Negotiation of format is based on the data provider and data sink, or the data consumer and
-// data source, exchanging prioritized arrays of offers and counter-offers until a single offer is
-// mutually agreed upon. Each offer is an NBAIO_Format. For simplicity and performance,
-// NBAIO_Format is a typedef that ties together the most important combinations of the various
-// attributes, rather than a struct with separate fields for format, sample rate, channel count,
-// interleave, packing, alignment, etc. The reason is that NBAIO_Format tries to abstract out only
-// the combinations that are actually needed within AudioFlinger. If the list of combinations grows
-// too large, then this decision should be re-visited.
-// Sample rate and channel count are explicit, PCM interleaved 16-bit is assumed.
-struct NBAIO_Format {
-// FIXME make this a class, and change Format_... global methods to class methods
-//private:
- unsigned mSampleRate;
- unsigned mChannelCount;
- audio_format_t mFormat;
- size_t mFrameSize;
-};
-
-extern const NBAIO_Format Format_Invalid;
-
-// Return the frame size of an NBAIO_Format in bytes
-size_t Format_frameSize(const NBAIO_Format& format);
-
-// Convert a sample rate in Hz and channel count to an NBAIO_Format
-// FIXME rename
-NBAIO_Format Format_from_SR_C(unsigned sampleRate, unsigned channelCount, audio_format_t format);
-
-// Return the sample rate in Hz of an NBAIO_Format
-unsigned Format_sampleRate(const NBAIO_Format& format);
-
-// Return the channel count of an NBAIO_Format
-unsigned Format_channelCount(const NBAIO_Format& format);
-
-// Callbacks used by NBAIO_Sink::writeVia() and NBAIO_Source::readVia() below.
-typedef ssize_t (*writeVia_t)(void *user, void *buffer, size_t count);
-typedef ssize_t (*readVia_t)(void *user, const void *buffer, size_t count);
-
-// Check whether an NBAIO_Format is valid
-bool Format_isValid(const NBAIO_Format& format);
-
-// Compare two NBAIO_Format values
-bool Format_isEqual(const NBAIO_Format& format1, const NBAIO_Format& format2);
-
-// Abstract class (interface) representing a data port.
-class NBAIO_Port : public RefBase {
-
-public:
-
- // negotiate() must called first. The purpose of negotiate() is to check compatibility of
- // formats, not to automatically adapt if they are incompatible. It's the responsibility of
- // whoever sets up the graph connections to make sure formats are compatible, and this method
- // just verifies that. The edges are "dumb" and don't attempt to adapt to bad connections.
- // How it works: offerer proposes an array of formats, in descending order of preference from
- // offers[0] to offers[numOffers - 1]. If offeree accepts one of these formats, it returns
- // the index of that offer. Otherwise, offeree sets numCounterOffers to the number of
- // counter-offers (up to a maximumum of the entry value of numCounterOffers), fills in the
- // provided array counterOffers[] with its counter-offers, in descending order of preference
- // from counterOffers[0] to counterOffers[numCounterOffers - 1], and returns NEGOTIATE.
- // Note that since the offerer allocates space for counter-offers, but only the offeree knows
- // how many counter-offers it has, there may be insufficient space for all counter-offers.
- // In that case, the offeree sets numCounterOffers to the requested number of counter-offers
- // (which is greater than the entry value of numCounterOffers), fills in as many of the most
- // important counterOffers as will fit, and returns NEGOTIATE. As this implies a re-allocation,
- // it should be used as a last resort. It is preferable for the offerer to simply allocate a
- // larger space to begin with, and/or for the offeree to tolerate a smaller space than desired.
- // Alternatively, the offerer can pass NULL for offers and counterOffers, and zero for
- // numOffers. This indicates that it has not allocated space for any counter-offers yet.
- // In this case, the offerree should set numCounterOffers appropriately and return NEGOTIATE.
- // Then the offerer will allocate the correct amount of memory and retry.
- // Format_Invalid is not allowed as either an offer or counter-offer.
- // Returns:
- // >= 0 Offer accepted.
- // NEGOTIATE No offer accepted, and counter-offer(s) optionally made. See above for details.
- virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
- NBAIO_Format counterOffers[], size_t& numCounterOffers);
-
- // Return the current negotiated format, or Format_Invalid if negotiation has not been done,
- // or if re-negotiation is required.
- virtual NBAIO_Format format() const { return mNegotiated ? mFormat : Format_Invalid; }
-
-protected:
- NBAIO_Port(const NBAIO_Format& format) : mNegotiated(false), mFormat(format),
- mFrameSize(Format_frameSize(format)) { }
- virtual ~NBAIO_Port() { }
-
- // Implementations are free to ignore these if they don't need them
-
- bool mNegotiated; // mNegotiated implies (mFormat != Format_Invalid)
- NBAIO_Format mFormat; // (mFormat != Format_Invalid) does not imply mNegotiated
- size_t mFrameSize; // assign in parallel with any assignment to mFormat
-};
-
-// Abstract class (interface) representing a non-blocking data sink, for use by a data provider.
-class NBAIO_Sink : public NBAIO_Port {
-
-public:
-
- // For the next two APIs:
- // 32 bits rolls over after 27 hours at 44.1 kHz; if that concerns you then poll periodically.
-
- // Return the number of frames written successfully since construction.
- virtual int64_t framesWritten() const { return mFramesWritten; }
-
- // Number of frames lost due to underrun since construction.
- virtual int64_t framesUnderrun() const { return 0; }
-
- // Number of underruns since construction, where a set of contiguous lost frames is one event.
- virtual int64_t underruns() const { return 0; }
-
- // Estimate of number of frames that could be written successfully now without blocking.
- // When a write() is actually attempted, the implementation is permitted to return a smaller or
- // larger transfer count, however it will make a good faith effort to give an accurate estimate.
- // Errors:
- // NEGOTIATE (Re-)negotiation is needed.
- // UNDERRUN write() has not been called frequently enough, or with enough frames to keep up.
- // An underrun event is counted, and the caller should re-try this operation.
- // WOULD_BLOCK Determining how many frames can be written without blocking would itself block.
- virtual ssize_t availableToWrite() const { return SSIZE_MAX; }
-
- // Transfer data to sink from single input buffer. Implies a copy.
- // Inputs:
- // buffer Non-NULL buffer owned by provider.
- // count Maximum number of frames to transfer.
- // Return value:
- // > 0 Number of frames successfully transferred prior to first error.
- // = 0 Count was zero.
- // < 0 status_t error occurred prior to the first frame transfer.
- // Errors:
- // NEGOTIATE (Re-)negotiation is needed.
- // WOULD_BLOCK No frames can be transferred without blocking.
- // UNDERRUN write() has not been called frequently enough, or with enough frames to keep up.
- // An underrun event is counted, and the caller should re-try this operation.
- virtual ssize_t write(const void *buffer, size_t count) = 0;
-
- // Transfer data to sink using a series of callbacks. More suitable for zero-fill, synthesis,
- // and non-contiguous transfers (e.g. circular buffer or writev).
- // Inputs:
- // via Callback function that the sink will call as many times as needed to consume data.
- // total Estimate of the number of frames the provider has available. This is an estimate,
- // and it can provide a different number of frames during the series of callbacks.
- // user Arbitrary void * reserved for data provider.
- // block Number of frames per block, that is a suggested value for 'count' in each callback.
- // Zero means no preference. This parameter is a hint only, and may be ignored.
- // Return value:
- // > 0 Total number of frames successfully transferred prior to first error.
- // = 0 Count was zero.
- // < 0 status_t error occurred prior to the first frame transfer.
- // Errors:
- // NEGOTIATE (Re-)negotiation is needed.
- // WOULD_BLOCK No frames can be transferred without blocking.
- // UNDERRUN write() has not been called frequently enough, or with enough frames to keep up.
- // An underrun event is counted, and the caller should re-try this operation.
- //
- // The 'via' callback is called by the data sink as follows:
- // Inputs:
- // user Arbitrary void * reserved for data provider.
- // buffer Non-NULL buffer owned by sink that callback should fill in with data,
- // up to a maximum of 'count' frames.
- // count Maximum number of frames to transfer during this callback.
- // Return value:
- // > 0 Number of frames successfully transferred during this callback prior to first error.
- // = 0 Count was zero.
- // < 0 status_t error occurred prior to the first frame transfer during this callback.
- virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block = 0);
-
- // Returns NO_ERROR if a timestamp is available. The timestamp includes the total number
- // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC
- // as of this presentation count. The timestamp parameter is undefined if error is returned.
- virtual status_t getTimestamp(ExtendedTimestamp& /*timestamp*/) { return INVALID_OPERATION; }
-
-protected:
- NBAIO_Sink(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0)
- { }
- virtual ~NBAIO_Sink() { }
-
- // Implementations are free to ignore these if they don't need them
- int64_t mFramesWritten;
-};
-
-// Abstract class (interface) representing a non-blocking data source, for use by a data consumer.
-class NBAIO_Source : public NBAIO_Port {
-
-public:
-
- // For the next two APIs:
- // 32 bits rolls over after 27 hours at 44.1 kHz; if that concerns you then poll periodically.
-
- // Number of frames read successfully since construction.
- virtual int64_t framesRead() const { return mFramesRead; }
-
- // Number of frames lost due to overrun since construction.
- // Not const because implementations may need to do I/O.
- virtual int64_t framesOverrun() /*const*/ { return 0; }
-
- // Number of overruns since construction, where a set of contiguous lost frames is one event.
- // Not const because implementations may need to do I/O.
- virtual int64_t overruns() /*const*/ { return 0; }
-
- // Estimate of number of frames that could be read successfully now.
- // When a read() is actually attempted, the implementation is permitted to return a smaller or
- // larger transfer count, however it will make a good faith effort to give an accurate estimate.
- // Errors:
- // NEGOTIATE (Re-)negotiation is needed.
- // OVERRUN One or more frames were lost due to overrun, try again to read more recent data.
- // WOULD_BLOCK Determining how many frames can be read without blocking would itself block.
- virtual ssize_t availableToRead() { return SSIZE_MAX; }
-
- // Transfer data from source into single destination buffer. Implies a copy.
- // Inputs:
- // buffer Non-NULL destination buffer owned by consumer.
- // count Maximum number of frames to transfer.
- // Return value:
- // > 0 Number of frames successfully transferred prior to first error.
- // = 0 Count was zero.
- // < 0 status_t error occurred prior to the first frame transfer.
- // Errors:
- // NEGOTIATE (Re-)negotiation is needed.
- // WOULD_BLOCK No frames can be transferred without blocking.
- // OVERRUN read() has not been called frequently enough, or with enough frames to keep up.
- // One or more frames were lost due to overrun, try again to read more recent data.
- virtual ssize_t read(void *buffer, size_t count) = 0;
-
- // Transfer data from source using a series of callbacks. More suitable for zero-fill,
- // synthesis, and non-contiguous transfers (e.g. circular buffer or readv).
- // Inputs:
- // via Callback function that the source will call as many times as needed to provide data.
- // total Estimate of the number of frames the consumer desires. This is an estimate,
- // and it can consume a different number of frames during the series of callbacks.
- // user Arbitrary void * reserved for data consumer.
- // block Number of frames per block, that is a suggested value for 'count' in each callback.
- // Zero means no preference. This parameter is a hint only, and may be ignored.
- // Return value:
- // > 0 Total number of frames successfully transferred prior to first error.
- // = 0 Count was zero.
- // < 0 status_t error occurred prior to the first frame transfer.
- // Errors:
- // NEGOTIATE (Re-)negotiation is needed.
- // WOULD_BLOCK No frames can be transferred without blocking.
- // OVERRUN read() has not been called frequently enough, or with enough frames to keep up.
- // One or more frames were lost due to overrun, try again to read more recent data.
- //
- // The 'via' callback is called by the data source as follows:
- // Inputs:
- // user Arbitrary void * reserved for data consumer.
- // dest Non-NULL buffer owned by source that callback should consume data from,
- // up to a maximum of 'count' frames.
- // count Maximum number of frames to transfer during this callback.
- // Return value:
- // > 0 Number of frames successfully transferred during this callback prior to first error.
- // = 0 Count was zero.
- // < 0 status_t error occurred prior to the first frame transfer during this callback.
- virtual ssize_t readVia(readVia_t via, size_t total, void *user, size_t block = 0);
-
- // Invoked asynchronously by corresponding sink when a new timestamp is available.
- // Default implementation ignores the timestamp.
- virtual void onTimestamp(const ExtendedTimestamp& /*timestamp*/) { }
-
-protected:
- NBAIO_Source(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0)
- { }
- virtual ~NBAIO_Source() { }
-
- // Implementations are free to ignore these if they don't need them
- int64_t mFramesRead;
-};
-
-} // namespace android
-
-#endif // ANDROID_AUDIO_NBAIO_H
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
deleted file mode 100644
index 1297b51..0000000
--- a/include/media/nbaio/NBLog.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Non-blocking event logger intended for safe communication between processes via shared memory
-
-#ifndef ANDROID_MEDIA_NBLOG_H
-#define ANDROID_MEDIA_NBLOG_H
-
-#include <binder/IMemory.h>
-#include <utils/Mutex.h>
-#include <audio_utils/roundup.h>
-
-namespace android {
-
-class String8;
-
-class NBLog {
-
-public:
-
-class Writer;
-class Reader;
-
-private:
-
-enum Event {
- EVENT_RESERVED,
- EVENT_STRING, // ASCII string, not NUL-terminated
- EVENT_TIMESTAMP, // clock_gettime(CLOCK_MONOTONIC)
-};
-
-// ---------------------------------------------------------------------------
-
-// representation of a single log entry in private memory
-struct Entry {
- Entry(Event event, const void *data, size_t length)
- : mEvent(event), mLength(length), mData(data) { }
- /*virtual*/ ~Entry() { }
-
- int readAt(size_t offset) const;
-
-private:
- friend class Writer;
- Event mEvent; // event type
- size_t mLength; // length of additional data, 0 <= mLength <= 255
- const void *mData; // event type-specific data
-};
-
-// representation of a single log entry in shared memory
-// byte[0] mEvent
-// byte[1] mLength
-// byte[2] mData[0]
-// ...
-// byte[2+i] mData[i]
-// ...
-// byte[2+mLength-1] mData[mLength-1]
-// byte[2+mLength] duplicate copy of mLength to permit reverse scan
-// byte[3+mLength] start of next log entry
-
-// located in shared memory
-struct Shared {
- Shared() : mRear(0) { }
- /*virtual*/ ~Shared() { }
-
- volatile int32_t mRear; // index one byte past the end of most recent Entry
- char mBuffer[0]; // circular buffer for entries
-};
-
-public:
-
-// ---------------------------------------------------------------------------
-
-// FIXME Timeline was intended to wrap Writer and Reader, but isn't actually used yet.
-// For now it is just a namespace for sharedSize().
-class Timeline : public RefBase {
-public:
-#if 0
- Timeline(size_t size, void *shared = NULL);
- virtual ~Timeline();
-#endif
-
- // Input parameter 'size' is the desired size of the timeline in byte units.
- // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
- static size_t sharedSize(size_t size);
-
-#if 0
-private:
- friend class Writer;
- friend class Reader;
-
- const size_t mSize; // circular buffer size in bytes, must be a power of 2
- bool mOwn; // whether I own the memory at mShared
- Shared* const mShared; // pointer to shared memory
-#endif
-};
-
-// ---------------------------------------------------------------------------
-
-// Writer is thread-safe with respect to Reader, but not with respect to multiple threads
-// calling Writer methods. If you need multi-thread safety for writing, use LockedWriter.
-class Writer : public RefBase {
-public:
- Writer(); // dummy nop implementation without shared memory
-
- // Input parameter 'size' is the desired size of the timeline in byte units.
- // The size of the shared memory must be at least Timeline::sharedSize(size).
- Writer(size_t size, void *shared);
- Writer(size_t size, const sp<IMemory>& iMemory);
-
- virtual ~Writer() { }
-
- virtual void log(const char *string);
- virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
- virtual void logvf(const char *fmt, va_list ap);
- virtual void logTimestamp();
- virtual void logTimestamp(const struct timespec& ts);
-
- virtual bool isEnabled() const;
-
- // return value for all of these is the previous isEnabled()
- virtual bool setEnabled(bool enabled); // but won't enable if no shared memory
- bool enable() { return setEnabled(true); }
- bool disable() { return setEnabled(false); }
-
- sp<IMemory> getIMemory() const { return mIMemory; }
-
-private:
- void log(Event event, const void *data, size_t length);
- void log(const Entry *entry, bool trusted = false);
-
- const size_t mSize; // circular buffer size in bytes, must be a power of 2
- Shared* const mShared; // raw pointer to shared memory
- const sp<IMemory> mIMemory; // ref-counted version
- int32_t mRear; // my private copy of mShared->mRear
- bool mEnabled; // whether to actually log
-};
-
-// ---------------------------------------------------------------------------
-
-// Similar to Writer, but safe for multiple threads to call concurrently
-class LockedWriter : public Writer {
-public:
- LockedWriter();
- LockedWriter(size_t size, void *shared);
-
- virtual void log(const char *string);
- virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
- virtual void logvf(const char *fmt, va_list ap);
- virtual void logTimestamp();
- virtual void logTimestamp(const struct timespec& ts);
-
- virtual bool isEnabled() const;
- virtual bool setEnabled(bool enabled);
-
-private:
- mutable Mutex mLock;
-};
-
-// ---------------------------------------------------------------------------
-
-class Reader : public RefBase {
-public:
-
- // Input parameter 'size' is the desired size of the timeline in byte units.
- // The size of the shared memory must be at least Timeline::sharedSize(size).
- Reader(size_t size, const void *shared);
- Reader(size_t size, const sp<IMemory>& iMemory);
-
- virtual ~Reader() { }
-
- void dump(int fd, size_t indent = 0);
- bool isIMemory(const sp<IMemory>& iMemory) const;
-
-private:
- const size_t mSize; // circular buffer size in bytes, must be a power of 2
- const Shared* const mShared; // raw pointer to shared memory
- const sp<IMemory> mIMemory; // ref-counted version
- int32_t mFront; // index of oldest acknowledged Entry
- int mFd; // file descriptor
- int mIndent; // indentation level
-
- void dumpLine(const String8& timestamp, String8& body);
-
- static const size_t kSquashTimestamp = 5; // squash this many or more adjacent timestamps
-};
-
-}; // class NBLog
-
-} // namespace android
-
-#endif // ANDROID_MEDIA_NBLOG_H
diff --git a/include/media/nbaio/Pipe.h b/include/media/nbaio/Pipe.h
deleted file mode 100644
index cc95ff7..0000000
--- a/include/media/nbaio/Pipe.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_PIPE_H
-#define ANDROID_AUDIO_PIPE_H
-
-#include "NBAIO.h"
-
-namespace android {
-
-// Pipe is multi-thread safe for readers (see PipeReader), but safe for only a single writer thread.
-// It cannot UNDERRUN on write, unless we allow designation of a master reader that provides the
-// time-base. Readers can be added and removed dynamically, and it's OK to have no readers.
-class Pipe : public NBAIO_Sink {
-
- friend class PipeReader;
-
-public:
- // maxFrames will be rounded up to a power of 2, and all slots are available. Must be >= 2.
- // buffer is an optional parameter specifying the virtual address of the pipe buffer,
- // which must be of size roundup(maxFrames) * Format_frameSize(format) bytes.
- Pipe(size_t maxFrames, const NBAIO_Format& format, void *buffer = NULL);
-
- // If a buffer was specified in the constructor, it is not automatically freed by destructor.
- virtual ~Pipe();
-
- // NBAIO_Port interface
-
- //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
- // NBAIO_Format counterOffers[], size_t& numCounterOffers);
- //virtual NBAIO_Format format() const;
-
- // NBAIO_Sink interface
-
- //virtual int64_t framesWritten() const;
- //virtual int64_t framesUnderrun() const;
- //virtual int64_t underruns() const;
-
- // The write side of a pipe permits overruns; flow control is the caller's responsibility.
- // It doesn't return +infinity because that would guarantee an overrun.
- virtual ssize_t availableToWrite() const { return mMaxFrames; }
-
- virtual ssize_t write(const void *buffer, size_t count);
- //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
-
-private:
- const size_t mMaxFrames; // always a power of 2
- void * const mBuffer;
- volatile int32_t mRear; // written by android_atomic_release_store
- volatile int32_t mReaders; // number of PipeReader clients currently attached to this Pipe
- const bool mFreeBufferInDestructor;
-};
-
-} // namespace android
-
-#endif // ANDROID_AUDIO_PIPE_H
diff --git a/include/media/nbaio/PipeReader.h b/include/media/nbaio/PipeReader.h
deleted file mode 100644
index 7c733ad..0000000
--- a/include/media/nbaio/PipeReader.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_PIPE_READER_H
-#define ANDROID_AUDIO_PIPE_READER_H
-
-#include "Pipe.h"
-
-namespace android {
-
-// PipeReader is safe for only a single thread
-class PipeReader : public NBAIO_Source {
-
-public:
-
- // Construct a PipeReader and associate it with a Pipe
- // FIXME make this constructor a factory method of Pipe.
- PipeReader(Pipe& pipe);
- virtual ~PipeReader();
-
- // NBAIO_Port interface
-
- //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
- // NBAIO_Format counterOffers[], size_t& numCounterOffers);
- //virtual NBAIO_Format format() const;
-
- // NBAIO_Source interface
-
- //virtual size_t framesRead() const;
- virtual int64_t framesOverrun() { return mFramesOverrun; }
- virtual int64_t overruns() { return mOverruns; }
-
- virtual ssize_t availableToRead();
-
- virtual ssize_t read(void *buffer, size_t count);
-
- // NBAIO_Source end
-
-#if 0 // until necessary
- Pipe& pipe() const { return mPipe; }
-#endif
-
-private:
- Pipe& mPipe;
- int32_t mFront; // follows behind mPipe.mRear
- int64_t mFramesOverrun;
- int64_t mOverruns;
-};
-
-} // namespace android
-
-#endif // ANDROID_AUDIO_PIPE_READER_H
diff --git a/include/media/omx/1.0/Conversion.h b/include/media/omx/1.0/Conversion.h
new file mode 100644
index 0000000..9816fe1
--- /dev/null
+++ b/include/media/omx/1.0/Conversion.h
@@ -0,0 +1,899 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+
+#include <vector>
+#include <list>
+
+#include <unistd.h>
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <hidlmemory/mapping.h>
+
+#include <binder/Binder.h>
+#include <binder/Status.h>
+#include <ui/FenceTime.h>
+#include <cutils/native_handle.h>
+
+#include <ui/GraphicBuffer.h>
+#include <media/OMXFenceParcelable.h>
+#include <media/OMXBuffer.h>
+#include <media/hardware/VideoAPI.h>
+
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <android/hardware/media/omx/1.0/types.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+
+#include <android/IGraphicBufferSource.h>
+#include <android/IOMXBufferSource.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::String8;
+using ::android::OMXFenceParcelable;
+
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::omx_message;
+
+using ::android::hardware::media::omx::V1_0::ColorAspects;
+using ::android::hardware::media::V1_0::Rect;
+using ::android::hardware::media::V1_0::Region;
+
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+
+using ::android::OMXBuffer;
+
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::GraphicBuffer;
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::IOMX;
+
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::IOMXNode;
+
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::IOMXObserver;
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::IOMXBufferSource;
+
+// native_handle_t helper functions.
+
+/**
+ * \brief Take an fd and create a native handle containing only the given fd.
+ * The created handle will need to be deleted manually with
+ * `native_handle_delete()`.
+ *
+ * \param[in] fd The source file descriptor (of type `int`).
+ * \return The create `native_handle_t*` that contains the given \p fd. If the
+ * supplied \p fd is negative, the created native handle will contain no file
+ * descriptors.
+ *
+ * If the native handle cannot be created, the return value will be
+ * `nullptr`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline native_handle_t* native_handle_create_from_fd(int fd) {
+ if (fd < 0) {
+ return native_handle_create(0, 0);
+ }
+ native_handle_t* nh = native_handle_create(1, 0);
+ if (nh == nullptr) {
+ return nullptr;
+ }
+ nh->data[0] = fd;
+ return nh;
+}
+
+/**
+ * \brief Extract a file descriptor from a native handle.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \param[in] index The index of the file descriptor in \p nh to read from. This
+ * input has the default value of `0`.
+ * \return The `index`-th file descriptor in \p nh. If \p nh does not have
+ * enough file descriptors, the returned value will be `-1`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline int native_handle_read_fd(native_handle_t const* nh, int index = 0) {
+ return ((nh == nullptr) || (nh->numFds == 0) ||
+ (nh->numFds <= index) || (index < 0)) ?
+ -1 : nh->data[index];
+}
+
+/**
+ * Conversion functions
+ * ====================
+ *
+ * There are two main directions of conversion:
+ * - `inTargetType(...)`: Create a wrapper whose lifetime depends on the
+ * input. The wrapper has type `TargetType`.
+ * - `toTargetType(...)`: Create a standalone object of type `TargetType` that
+ * corresponds to the input. The lifetime of the output does not depend on the
+ * lifetime of the input.
+ * - `wrapIn(TargetType*, ...)`: Same as `inTargetType()`, but for `TargetType`
+ * that cannot be copied and/or moved efficiently, or when there are multiple
+ * output arguments.
+ * - `convertTo(TargetType*, ...)`: Same as `toTargetType()`, but for
+ * `TargetType` that cannot be copied and/or moved efficiently, or when there
+ * are multiple output arguments.
+ *
+ * `wrapIn()` and `convertTo()` functions will take output arguments before
+ * input arguments. Some of these functions might return a value to indicate
+ * success or error.
+ *
+ * In converting or wrapping something as a Treble type that contains a
+ * `hidl_handle`, `native_handle_t*` will need to be created and returned as
+ * an additional output argument, hence only `wrapIn()` or `convertTo()` would
+ * be available. The caller must call `native_handle_delete()` to deallocate the
+ * returned native handle when it is no longer needed.
+ *
+ * For types that contain file descriptors, `inTargetType()` and `wrapAs()` do
+ * not perform duplication of file descriptors, while `toTargetType()` and
+ * `convertTo()` do.
+ */
+
+/**
+ * \brief Convert `Return<void>` to `binder::Status`.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `binder::Status`.
+ */
+// convert: Return<void> -> ::android::binder::Status
+inline ::android::binder::Status toBinderStatus(
+ Return<void> const& t) {
+ return ::android::binder::Status::fromExceptionCode(
+ t.isOk() ? OK : UNKNOWN_ERROR,
+ t.description().c_str());
+}
+
+/**
+ * \brief Convert `Return<Status>` to `binder::Status`.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `binder::Status`.
+ */
+// convert: Return<Status> -> ::android::binder::Status
+inline ::android::binder::Status toBinderStatus(
+ Return<Status> const& t) {
+ return ::android::binder::Status::fromStatusT(
+ t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR);
+}
+
+/**
+ * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
+ * calls.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `status_t`.
+ *
+ * This function first check if \p t has a transport error. If it does, then the
+ * return value is the transport error code. Otherwise, the return value is
+ * converted from `Status` contained inside \p t.
+ *
+ * Note:
+ * - This `Status` is omx-specific. It is defined in `types.hal`.
+ * - The name of this function is not `convert`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Return<Status> const& t) {
+ return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `status_t`.
+ */
+// convert: Return<void> -> status_t
+inline status_t toStatusT(Return<void> const& t) {
+ return t.isOk() ? OK : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Status`.
+ * \return the corresponding `status_t`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Status const& t) {
+ return static_cast<status_t>(t);
+}
+
+/**
+ * \brief Convert `status_t` to `Status`.
+ *
+ * \param[in] l The source `status_t`.
+ * \return The corresponding `Status`.
+ */
+// convert: status_t -> Status
+inline Status toStatus(status_t l) {
+ return static_cast<Status>(l);
+}
+
+/**
+ * \brief Wrap `native_handle_t*` in `hidl_handle`.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \return The `hidl_handle` that points to \p nh.
+ */
+// wrap: native_handle_t* -> hidl_handle
+inline hidl_handle inHidlHandle(native_handle_t const* nh) {
+ return hidl_handle(nh);
+}
+
+/**
+ * \brief Wrap an `omx_message` and construct the corresponding `Message`.
+ *
+ * \param[out] t The wrapper of type `Message`.
+ * \param[out] nh The native_handle_t referred to by `t->fence`.
+ * \param[in] l The source `omx_message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ *
+ * Upon success, \p nh will be created to hold the file descriptor stored in
+ * `l.fenceFd`, and `t->fence` will point to \p nh. \p nh will need to be
+ * destroyed manually by `native_handle_delete()` when \p t is no longer needed.
+ *
+ * Upon failure, \p nh will not be created and will not need to be deleted. \p t
+ * will be invalid.
+ */
+// wrap, omx_message -> Message, native_handle_t*
+inline bool wrapAs(Message* t, native_handle_t** nh, omx_message const& l) {
+ *nh = native_handle_create_from_fd(l.fenceFd);
+ if (!*nh) {
+ return false;
+ }
+ t->fence = *nh;
+ switch (l.type) {
+ case omx_message::EVENT:
+ t->type = Message::Type::EVENT;
+ t->data.eventData.event = uint32_t(l.u.event_data.event);
+ t->data.eventData.data1 = l.u.event_data.data1;
+ t->data.eventData.data2 = l.u.event_data.data2;
+ t->data.eventData.data3 = l.u.event_data.data3;
+ t->data.eventData.data4 = l.u.event_data.data4;
+ break;
+ case omx_message::EMPTY_BUFFER_DONE:
+ t->type = Message::Type::EMPTY_BUFFER_DONE;
+ t->data.bufferData.buffer = l.u.buffer_data.buffer;
+ break;
+ case omx_message::FILL_BUFFER_DONE:
+ t->type = Message::Type::FILL_BUFFER_DONE;
+ t->data.extendedBufferData.buffer = l.u.extended_buffer_data.buffer;
+ t->data.extendedBufferData.rangeOffset =
+ l.u.extended_buffer_data.range_offset;
+ t->data.extendedBufferData.rangeLength =
+ l.u.extended_buffer_data.range_length;
+ t->data.extendedBufferData.flags = l.u.extended_buffer_data.flags;
+ t->data.extendedBufferData.timestampUs =
+ l.u.extended_buffer_data.timestamp;
+ break;
+ case omx_message::FRAME_RENDERED:
+ t->type = Message::Type::FRAME_RENDERED;
+ t->data.renderData.timestampUs = l.u.render_data.timestamp;
+ t->data.renderData.systemTimeNs = l.u.render_data.nanoTime;
+ break;
+ default:
+ native_handle_delete(*nh);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * \brief Wrap a `Message` inside an `omx_message`.
+ *
+ * \param[out] l The wrapper of type `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: Message -> omx_message
+inline bool wrapAs(omx_message* l, Message const& t) {
+ l->fenceFd = native_handle_read_fd(t.fence);
+ switch (t.type) {
+ case Message::Type::EVENT:
+ l->type = omx_message::EVENT;
+ l->u.event_data.event = OMX_EVENTTYPE(t.data.eventData.event);
+ l->u.event_data.data1 = t.data.eventData.data1;
+ l->u.event_data.data2 = t.data.eventData.data2;
+ l->u.event_data.data3 = t.data.eventData.data3;
+ l->u.event_data.data4 = t.data.eventData.data4;
+ break;
+ case Message::Type::EMPTY_BUFFER_DONE:
+ l->type = omx_message::EMPTY_BUFFER_DONE;
+ l->u.buffer_data.buffer = t.data.bufferData.buffer;
+ break;
+ case Message::Type::FILL_BUFFER_DONE:
+ l->type = omx_message::FILL_BUFFER_DONE;
+ l->u.extended_buffer_data.buffer = t.data.extendedBufferData.buffer;
+ l->u.extended_buffer_data.range_offset =
+ t.data.extendedBufferData.rangeOffset;
+ l->u.extended_buffer_data.range_length =
+ t.data.extendedBufferData.rangeLength;
+ l->u.extended_buffer_data.flags = t.data.extendedBufferData.flags;
+ l->u.extended_buffer_data.timestamp =
+ t.data.extendedBufferData.timestampUs;
+ break;
+ case Message::Type::FRAME_RENDERED:
+ l->type = omx_message::FRAME_RENDERED;
+ l->u.render_data.timestamp = t.data.renderData.timestampUs;
+ l->u.render_data.nanoTime = t.data.renderData.systemTimeNs;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * \brief Similar to `wrapTo(omx_message*, Message const&)`, but the output will
+ * have an extended lifetime.
+ *
+ * \param[out] l The output `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the conversion is successful; `false` otherwise.
+ *
+ * This function calls `wrapto()`, then attempts to duplicate the file
+ * descriptor for the fence if it is not `-1`. If duplication fails, `false`
+ * will be returned.
+ */
+// convert: Message -> omx_message
+inline bool convertTo(omx_message* l, Message const& t) {
+ if (!wrapAs(l, t)) {
+ return false;
+ }
+ if (l->fenceFd == -1) {
+ return true;
+ }
+ l->fenceFd = dup(l->fenceFd);
+ return l->fenceFd != -1;
+}
+
+/**
+ * \brief Wrap an `OMXFenceParcelable` inside a `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle created to hold the file descriptor inside
+ * \p l.
+ * \param[in] l The source `OMXFenceParcelable`, which essentially contains one
+ * file descriptor.
+ * \return `true` if \p t and \p nh are successfully created to wrap around \p
+ * l; `false` otherwise.
+ *
+ * On success, \p nh needs to be deleted by the caller with
+ * `native_handle_delete()` after \p t and \p nh are no longer needed.
+ *
+ * On failure, \p nh will not need to be deleted, and \p t will hold an invalid
+ * value.
+ */
+// wrap: OMXFenceParcelable -> hidl_handle, native_handle_t*
+inline bool wrapAs(hidl_handle* t, native_handle_t** nh,
+ OMXFenceParcelable const& l) {
+ *nh = native_handle_create_from_fd(l.get());
+ if (!*nh) {
+ return false;
+ }
+ *t = *nh;
+ return true;
+}
+
+/**
+ * \brief Wrap a `hidl_handle` inside an `OMXFenceParcelable`.
+ *
+ * \param[out] l The wrapper of type `OMXFenceParcelable`.
+ * \param[in] t The source `hidl_handle`.
+ */
+// wrap: hidl_handle -> OMXFenceParcelable
+inline void wrapAs(OMXFenceParcelable* l, hidl_handle const& t) {
+ l->mFenceFd = native_handle_read_fd(t);
+}
+
+/**
+ * \brief Convert a `hidl_handle` to `OMXFenceParcelable`. If `hidl_handle`
+ * contains file descriptors, the first file descriptor will be duplicated and
+ * stored in the output `OMXFenceParcelable`.
+ *
+ * \param[out] l The output `OMXFenceParcelable`.
+ * \param[in] t The input `hidl_handle`.
+ * \return `false` if \p t contains a valid file descriptor but duplication
+ * fails; `true` otherwise.
+ */
+// convert: hidl_handle -> OMXFenceParcelable
+inline bool convertTo(OMXFenceParcelable* l, hidl_handle const& t) {
+ int fd = native_handle_read_fd(t);
+ if (fd != -1) {
+ fd = dup(fd);
+ if (fd == -1) {
+ return false;
+ }
+ }
+ l->mFenceFd = fd;
+ return true;
+}
+
+/**
+ * \brief Convert `::android::ColorAspects` to `ColorAspects`.
+ *
+ * \param[in] l The source `::android::ColorAspects`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: ::android::ColorAspects -> ColorAspects
+inline ColorAspects toHardwareColorAspects(::android::ColorAspects const& l) {
+ return ColorAspects{
+ static_cast<ColorAspects::Range>(l.mRange),
+ static_cast<ColorAspects::Primaries>(l.mPrimaries),
+ static_cast<ColorAspects::Transfer>(l.mTransfer),
+ static_cast<ColorAspects::MatrixCoeffs>(l.mMatrixCoeffs)};
+}
+
+/**
+ * \brief Convert `int32_t` to `ColorAspects`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: int32_t -> ColorAspects
+inline ColorAspects toHardwareColorAspects(int32_t l) {
+ return ColorAspects{
+ static_cast<ColorAspects::Range>((l >> 24) & 0xFF),
+ static_cast<ColorAspects::Primaries>((l >> 16) & 0xFF),
+ static_cast<ColorAspects::Transfer>(l & 0xFF),
+ static_cast<ColorAspects::MatrixCoeffs>((l >> 8) & 0xFF)};
+}
+
+/**
+ * \brief Convert `ColorAspects` to `::android::ColorAspects`.
+ *
+ * \param[in] t The source `ColorAspects`.
+ * \return The corresponding `::android::ColorAspects`.
+ */
+// convert: ColorAspects -> ::android::ColorAspects
+inline int32_t toCompactColorAspects(ColorAspects const& t) {
+ return static_cast<int32_t>(
+ (static_cast<uint32_t>(t.range) << 24) |
+ (static_cast<uint32_t>(t.primaries) << 16) |
+ (static_cast<uint32_t>(t.transfer)) |
+ (static_cast<uint32_t>(t.matrixCoeffs) << 8));
+}
+
+/**
+ * \brief Convert `int32_t` to `Dataspace`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \result The corresponding `Dataspace`.
+ */
+// convert: int32_t -> Dataspace
+inline Dataspace toHardwareDataspace(int32_t l) {
+ return static_cast<Dataspace>(l);
+}
+
+/**
+ * \brief Convert `Dataspace` to `int32_t`.
+ *
+ * \param[in] t The source `Dataspace`.
+ * \result The corresponding `int32_t`.
+ */
+// convert: Dataspace -> int32_t
+inline int32_t toRawDataspace(Dataspace const& t) {
+ return static_cast<int32_t>(t);
+}
+
+/**
+ * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that points to the buffer.
+ */
+// wrap: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size) {
+ hidl_vec<uint8_t> t;
+ t.setToExternal(static_cast<uint8_t*>(const_cast<void*>(l)), size, false);
+ return t;
+}
+
+/**
+ * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
+ */
+// convert: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size) {
+ hidl_vec<uint8_t> t;
+ t.resize(size);
+ uint8_t const* src = static_cast<uint8_t const*>(l);
+ std::copy(src, src + size, t.data());
+ return t;
+}
+
+/**
+ * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
+ *
+ * \param[out] t The wrapper of type `AnwBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: GraphicBuffer -> AnwBuffer
+inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
+ t->attr.width = l.getWidth();
+ t->attr.height = l.getHeight();
+ t->attr.stride = l.getStride();
+ t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
+ t->attr.layerCount = l.getLayerCount();
+ t->attr.usage = l.getUsage();
+ t->attr.id = l.getId();
+ t->attr.generationNumber = l.getGenerationNumber();
+ t->nativeHandle = hidl_handle(l.handle);
+}
+
+/**
+ * \brief Convert `AnwBuffer` to `GraphicBuffer`.
+ *
+ * \param[out] l The destination `GraphicBuffer`.
+ * \param[in] t The source `AnwBuffer`.
+ *
+ * This function will duplicate all file descriptors in \p t.
+ */
+// convert: AnwBuffer -> GraphicBuffer
+// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
+inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
+ native_handle_t* handle = t.nativeHandle == nullptr ?
+ nullptr : native_handle_clone(t.nativeHandle);
+
+ size_t const numInts = 12 + (handle ? handle->numInts : 0);
+ int32_t* ints = new int32_t[numInts];
+
+ size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
+ int* fds = new int[numFds];
+
+ ints[0] = 'GBFR';
+ ints[1] = static_cast<int32_t>(t.attr.width);
+ ints[2] = static_cast<int32_t>(t.attr.height);
+ ints[3] = static_cast<int32_t>(t.attr.stride);
+ ints[4] = static_cast<int32_t>(t.attr.format);
+ ints[5] = static_cast<int32_t>(t.attr.layerCount);
+ ints[6] = static_cast<int32_t>(t.attr.usage);
+ ints[7] = static_cast<int32_t>(t.attr.id >> 32);
+ ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
+ ints[9] = static_cast<int32_t>(t.attr.generationNumber);
+ ints[10] = 0;
+ ints[11] = 0;
+ if (handle) {
+ ints[10] = static_cast<int32_t>(handle->numFds);
+ ints[11] = static_cast<int32_t>(handle->numInts);
+ int* intsStart = handle->data + handle->numFds;
+ std::copy(handle->data, intsStart, fds);
+ std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
+ }
+
+ void const* constBuffer = static_cast<void const*>(ints);
+ size_t size = numInts * sizeof(int32_t);
+ int const* constFds = static_cast<int const*>(fds);
+ status_t status = l->unflatten(constBuffer, size, constFds, numFds);
+
+ delete [] fds;
+ delete [] ints;
+ native_handle_delete(handle);
+ return status == NO_ERROR;
+}
+
+/**
+ * \brief Wrap `OMXBuffer` in `CodecBuffer`.
+ *
+ * \param[out] t The wrapper of type `CodecBuffer`.
+ * \param[in] l The source `OMXBuffer`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: OMXBuffer -> CodecBuffer
+inline bool wrapAs(CodecBuffer* t, OMXBuffer const& l) {
+ t->sharedMemory = hidl_memory();
+ t->nativeHandle = hidl_handle();
+ switch (l.mBufferType) {
+ case OMXBuffer::kBufferTypeInvalid: {
+ t->type = CodecBuffer::Type::INVALID;
+ return true;
+ }
+ case OMXBuffer::kBufferTypePreset: {
+ t->type = CodecBuffer::Type::PRESET;
+ t->attr.preset.rangeLength = static_cast<uint32_t>(l.mRangeLength);
+ t->attr.preset.rangeOffset = static_cast<uint32_t>(l.mRangeOffset);
+ return true;
+ }
+ case OMXBuffer::kBufferTypeHidlMemory: {
+ t->type = CodecBuffer::Type::SHARED_MEM;
+ t->sharedMemory = l.mHidlMemory;
+ return true;
+ }
+ case OMXBuffer::kBufferTypeSharedMem: {
+ // This is not supported.
+ return false;
+ }
+ case OMXBuffer::kBufferTypeANWBuffer: {
+ t->type = CodecBuffer::Type::ANW_BUFFER;
+ if (l.mGraphicBuffer == nullptr) {
+ t->attr.anwBuffer.width = 0;
+ t->attr.anwBuffer.height = 0;
+ t->attr.anwBuffer.stride = 0;
+ t->attr.anwBuffer.format = static_cast<PixelFormat>(1);
+ t->attr.anwBuffer.layerCount = 0;
+ t->attr.anwBuffer.usage = 0;
+ return true;
+ }
+ t->attr.anwBuffer.width = l.mGraphicBuffer->getWidth();
+ t->attr.anwBuffer.height = l.mGraphicBuffer->getHeight();
+ t->attr.anwBuffer.stride = l.mGraphicBuffer->getStride();
+ t->attr.anwBuffer.format = static_cast<PixelFormat>(
+ l.mGraphicBuffer->getPixelFormat());
+ t->attr.anwBuffer.layerCount = l.mGraphicBuffer->getLayerCount();
+ t->attr.anwBuffer.usage = l.mGraphicBuffer->getUsage();
+ t->nativeHandle = l.mGraphicBuffer->handle;
+ return true;
+ }
+ case OMXBuffer::kBufferTypeNativeHandle: {
+ t->type = CodecBuffer::Type::NATIVE_HANDLE;
+ t->nativeHandle = l.mNativeHandle->handle();
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * \brief Convert `CodecBuffer` to `OMXBuffer`.
+ *
+ * \param[out] l The destination `OMXBuffer`.
+ * \param[in] t The source `CodecBuffer`.
+ * \return `true` if successful; `false` otherwise.
+ */
+// convert: CodecBuffer -> OMXBuffer
+inline bool convertTo(OMXBuffer* l, CodecBuffer const& t) {
+ switch (t.type) {
+ case CodecBuffer::Type::INVALID: {
+ *l = OMXBuffer();
+ return true;
+ }
+ case CodecBuffer::Type::PRESET: {
+ *l = OMXBuffer(
+ t.attr.preset.rangeOffset,
+ t.attr.preset.rangeLength);
+ return true;
+ }
+ case CodecBuffer::Type::SHARED_MEM: {
+ *l = OMXBuffer(t.sharedMemory);
+ return true;
+ }
+ case CodecBuffer::Type::ANW_BUFFER: {
+ if (t.nativeHandle.getNativeHandle() == nullptr) {
+ *l = OMXBuffer(sp<GraphicBuffer>(nullptr));
+ return true;
+ }
+ AnwBuffer anwBuffer;
+ anwBuffer.nativeHandle = t.nativeHandle;
+ anwBuffer.attr = t.attr.anwBuffer;
+ sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
+ if (!convertTo(graphicBuffer.get(), anwBuffer)) {
+ return false;
+ }
+ *l = OMXBuffer(graphicBuffer);
+ return true;
+ }
+ case CodecBuffer::Type::NATIVE_HANDLE: {
+ *l = OMXBuffer(NativeHandle::create(
+ native_handle_clone(t.nativeHandle), true));
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * \brief Convert `IOMX::ComponentInfo` to `IOmx::ComponentInfo`.
+ *
+ * \param[out] t The destination `IOmx::ComponentInfo`.
+ * \param[in] l The source `IOMX::ComponentInfo`.
+ */
+// convert: IOMX::ComponentInfo -> IOmx::ComponentInfo
+inline bool convertTo(IOmx::ComponentInfo* t, IOMX::ComponentInfo const& l) {
+ t->mName = l.mName.string();
+ t->mRoles.resize(l.mRoles.size());
+ size_t i = 0;
+ for (auto& role : l.mRoles) {
+ t->mRoles[i++] = role.string();
+ }
+ return true;
+}
+
+/**
+ * \brief Convert `IOmx::ComponentInfo` to `IOMX::ComponentInfo`.
+ *
+ * \param[out] l The destination `IOMX::ComponentInfo`.
+ * \param[in] t The source `IOmx::ComponentInfo`.
+ */
+// convert: IOmx::ComponentInfo -> IOMX::ComponentInfo
+inline bool convertTo(IOMX::ComponentInfo* l, IOmx::ComponentInfo const& t) {
+ l->mName = t.mName.c_str();
+ l->mRoles.clear();
+ for (size_t i = 0; i < t.mRoles.size(); ++i) {
+ l->mRoles.push_back(String8(t.mRoles[i].c_str()));
+ }
+ return true;
+}
+
+/**
+ * \brief Convert `OMX_BOOL` to `bool`.
+ *
+ * \param[in] l The source `OMX_BOOL`.
+ * \return The destination `bool`.
+ */
+// convert: OMX_BOOL -> bool
+inline bool toRawBool(OMX_BOOL l) {
+ return l == OMX_FALSE ? false : true;
+}
+
+/**
+ * \brief Convert `bool` to `OMX_BOOL`.
+ *
+ * \param[in] t The source `bool`.
+ * \return The destination `OMX_BOOL`.
+ */
+// convert: bool -> OMX_BOOL
+inline OMX_BOOL toEnumBool(bool t) {
+ return t ? OMX_TRUE : OMX_FALSE;
+}
+
+/**
+ * \brief Convert `OMX_COMMANDTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_COMMANDTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_COMMANDTYPE -> uint32_t
+inline uint32_t toRawCommandType(OMX_COMMANDTYPE l) {
+ return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_COMMANDTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_COMMANDTYPE`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_COMMANDTYPE
+inline OMX_COMMANDTYPE toEnumCommandType(uint32_t t) {
+ return static_cast<OMX_COMMANDTYPE>(t);
+}
+
+/**
+ * \brief Convert `OMX_INDEXTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_INDEXTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_INDEXTYPE -> uint32_t
+inline uint32_t toRawIndexType(OMX_INDEXTYPE l) {
+ return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_INDEXTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_INDEXTYPE`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_INDEXTYPE
+inline OMX_INDEXTYPE toEnumIndexType(uint32_t t) {
+ return static_cast<OMX_INDEXTYPE>(t);
+}
+
+/**
+ * \brief Convert `IOMX::PortMode` to `PortMode`.
+ *
+ * \param[in] l The source `IOMX::PortMode`.
+ * \return The destination `PortMode`.
+ */
+// convert: IOMX::PortMode -> PortMode
+inline PortMode toHardwarePortMode(IOMX::PortMode l) {
+ return static_cast<PortMode>(l);
+}
+
+/**
+ * \brief Convert `PortMode` to `IOMX::PortMode`.
+ *
+ * \param[in] t The source `PortMode`.
+ * \return The destination `IOMX::PortMode`.
+ */
+// convert: PortMode -> IOMX::PortMode
+inline IOMX::PortMode toIOMXPortMode(PortMode t) {
+ return static_cast<IOMX::PortMode>(t);
+}
+
+/**
+ * \brief Convert `OMX_TICKS` to `uint64_t`.
+ *
+ * \param[in] l The source `OMX_TICKS`.
+ * \return The destination `uint64_t`.
+ */
+// convert: OMX_TICKS -> uint64_t
+inline uint64_t toRawTicks(OMX_TICKS l) {
+#ifndef OMX_SKIP64BIT
+ return static_cast<uint64_t>(l);
+#else
+ return static_cast<uint64_t>(l.nLowPart) |
+ static_cast<uint64_t>(l.nHighPart << 32);
+#endif
+}
+
+/**
+ * \brief Convert `uint64_t` to `OMX_TICKS`.
+ *
+ * \param[in] l The source `uint64_t`.
+ * \return The destination `OMX_TICKS`.
+ */
+// convert: uint64_t -> OMX_TICKS
+inline OMX_TICKS toOMXTicks(uint64_t t) {
+#ifndef OMX_SKIP64BIT
+ return static_cast<OMX_TICKS>(t);
+#else
+ return OMX_TICKS{
+ static_cast<uint32_t>(t & 0xFFFFFFFF),
+ static_cast<uint32_t>(t >> 32)};
+#endif
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
diff --git a/include/media/omx/1.0/WGraphicBufferSource.h b/include/media/omx/1.0/WGraphicBufferSource.h
new file mode 100644
index 0000000..397e576
--- /dev/null
+++ b/include/media/omx/1.0/WGraphicBufferSource.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/Binder.h>
+#include <media/IOMX.h>
+
+#include <android/hardware/graphics/common/1.0/types.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+
+#include <android/BnGraphicBufferSource.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+using ::android::hardware::media::omx::V1_0::ColorAspects;
+using ::android::hardware::media::omx::V1_0::IGraphicBufferSource;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+using ::android::IOMXNode;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+typedef ::android::binder::Status BnStatus;
+typedef ::android::BnGraphicBufferSource BnGraphicBufferSource;
+typedef ::android::hardware::media::omx::V1_0::IGraphicBufferSource
+ TGraphicBufferSource;
+
+struct LWGraphicBufferSource : public BnGraphicBufferSource {
+ sp<TGraphicBufferSource> mBase;
+ LWGraphicBufferSource(sp<TGraphicBufferSource> const& base);
+ BnStatus configure(const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
+ BnStatus setSuspend(bool suspend, int64_t timeUs) override;
+ BnStatus setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
+ BnStatus setMaxFps(float maxFps) override;
+ BnStatus setTimeLapseConfig(double fps, double captureFps) override;
+ BnStatus setStartTimeUs(int64_t startTimeUs) override;
+ BnStatus setStopTimeUs(int64_t stopTimeUs) override;
+ BnStatus setColorAspects(int32_t aspects) override;
+ BnStatus setTimeOffsetUs(int64_t timeOffsetsUs) override;
+ BnStatus signalEndOfInputStream() override;
+};
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
diff --git a/include/media/omx/1.0/WOmx.h b/include/media/omx/1.0/WOmx.h
new file mode 100644
index 0000000..f13546e
--- /dev/null
+++ b/include/media/omx/1.0/WOmx.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <media/IOMX.h>
+
+#include <hidl/HybridInterface.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::List;
+using ::android::IOMX;
+using ::android::BnOMX;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmx : public BnOMX {
+ sp<IOmx> mBase;
+ LWOmx(sp<IOmx> const& base);
+ status_t listNodes(List<IOMX::ComponentInfo>* list) override;
+ status_t allocateNode(
+ char const* name,
+ sp<IOMXObserver> const& observer,
+ sp<IOMXNode>* omxNode) override;
+ status_t createInputSurface(
+ sp<::android::IGraphicBufferProducer>* bufferProducer,
+ sp<::android::IGraphicBufferSource>* bufferSource) override;
+};
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMX_H
diff --git a/include/media/omx/1.0/WOmxBufferSource.h b/include/media/omx/1.0/WOmxBufferSource.h
new file mode 100644
index 0000000..86322da
--- /dev/null
+++ b/include/media/omx/1.0/WOmxBufferSource.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/Binder.h>
+#include <OMXFenceParcelable.h>
+
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <android/BnOMXBufferSource.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::OMXFenceParcelable;
+using ::android::IOMXBufferSource;
+using ::android::BnOMXBufferSource;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxBufferSource : public BnOMXBufferSource {
+ sp<IOmxBufferSource> mBase;
+ LWOmxBufferSource(sp<IOmxBufferSource> const& base);
+ ::android::binder::Status onOmxExecuting() override;
+ ::android::binder::Status onOmxIdle() override;
+ ::android::binder::Status onOmxLoaded() override;
+ ::android::binder::Status onInputBufferAdded(int32_t bufferID) override;
+ ::android::binder::Status onInputBufferEmptied(
+ int32_t bufferID, OMXFenceParcelable const& fenceParcel) override;
+};
+
+struct TWOmxBufferSource : public IOmxBufferSource {
+ sp<IOMXBufferSource> mBase;
+ TWOmxBufferSource(sp<IOMXBufferSource> const& base);
+ Return<void> onOmxExecuting() override;
+ Return<void> onOmxIdle() override;
+ Return<void> onOmxLoaded() override;
+ Return<void> onInputBufferAdded(uint32_t buffer) override;
+ Return<void> onInputBufferEmptied(
+ uint32_t buffer, hidl_handle const& fence) override;
+};
+
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
diff --git a/include/media/omx/1.0/WOmxNode.h b/include/media/omx/1.0/WOmxNode.h
new file mode 100644
index 0000000..eebc8c6
--- /dev/null
+++ b/include/media/omx/1.0/WOmxNode.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <utils/Errors.h>
+#include <media/IOMX.h>
+#include <hidl/HybridInterface.h>
+
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::media::omx::V1_0::CodecBuffer;
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::hardware::media::omx::V1_0::PortMode;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxNode : public H2BConverter<IOmxNode, IOMXNode, BnOMXNode> {
+ LWOmxNode(sp<IOmxNode> const& base) : CBase(base) {}
+ status_t freeNode() override;
+ status_t sendCommand(
+ OMX_COMMANDTYPE cmd, OMX_S32 param) override;
+ status_t getParameter(
+ OMX_INDEXTYPE index, void *params, size_t size) override;
+ status_t setParameter(
+ OMX_INDEXTYPE index, const void *params, size_t size) override;
+ status_t getConfig(
+ OMX_INDEXTYPE index, void *params, size_t size) override;
+ status_t setConfig(
+ OMX_INDEXTYPE index, const void *params, size_t size) override;
+ status_t setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) override;
+ status_t prepareForAdaptivePlayback(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) override;
+ status_t configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) override;
+ status_t getGraphicBufferUsage(
+ OMX_U32 port_index, OMX_U32* usage) override;
+ status_t setInputSurface(
+ const sp<IOMXBufferSource> &bufferSource) override;
+ status_t allocateSecureBuffer(
+ OMX_U32 port_index, size_t size, buffer_id *buffer,
+ void **buffer_data, sp<NativeHandle> *native_handle) override;
+ status_t useBuffer(
+ OMX_U32 port_index, const OMXBuffer &omxBuf,
+ buffer_id *buffer) override;
+ status_t freeBuffer(
+ OMX_U32 port_index, buffer_id buffer) override;
+ status_t fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ int fenceFd = -1) override;
+ status_t emptyBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) override;
+ status_t getExtensionIndex(
+ const char *parameter_name,
+ OMX_INDEXTYPE *index) override;
+ status_t dispatchMessage(const omx_message &msg) override;
+};
+
+struct TWOmxNode : public IOmxNode {
+ sp<IOMXNode> mBase;
+ TWOmxNode(sp<IOMXNode> const& base);
+
+ Return<Status> freeNode() override;
+ Return<Status> sendCommand(uint32_t cmd, int32_t param) override;
+ Return<void> getParameter(
+ uint32_t index, hidl_vec<uint8_t> const& inParams,
+ getParameter_cb _hidl_cb) override;
+ Return<Status> setParameter(
+ uint32_t index, hidl_vec<uint8_t> const& params) override;
+ Return<void> getConfig(
+ uint32_t index, hidl_vec<uint8_t> const& inConfig,
+ getConfig_cb _hidl_cb) override;
+ Return<Status> setConfig(
+ uint32_t index, hidl_vec<uint8_t> const& config) override;
+ Return<Status> setPortMode(uint32_t portIndex, PortMode mode) override;
+ Return<Status> prepareForAdaptivePlayback(
+ uint32_t portIndex, bool enable,
+ uint32_t maxFrameWidth, uint32_t maxFrameHeight) override;
+ Return<void> configureVideoTunnelMode(
+ uint32_t portIndex, bool tunneled, uint32_t audioHwSync,
+ configureVideoTunnelMode_cb _hidl_cb) override;
+ Return<void> getGraphicBufferUsage(
+ uint32_t portIndex,
+ getGraphicBufferUsage_cb _hidl_cb) override;
+ Return<Status> setInputSurface(
+ sp<IOmxBufferSource> const& bufferSource) override;
+ Return<void> allocateSecureBuffer(
+ uint32_t portIndex, uint64_t size,
+ allocateSecureBuffer_cb _hidl_cb) override;
+ Return<void> useBuffer(
+ uint32_t portIndex, CodecBuffer const& codecBuffer,
+ useBuffer_cb _hidl_cb) override;
+ Return<Status> freeBuffer(uint32_t portIndex, uint32_t buffer) override;
+ Return<Status> fillBuffer(
+ uint32_t buffer, CodecBuffer const& codecBuffer,
+ const hidl_handle& fence) override;
+ Return<Status> emptyBuffer(
+ uint32_t buffer, CodecBuffer const& codecBuffer,
+ uint32_t flags, uint64_t timestampUs,
+ hidl_handle const& fence) override;
+ Return<void> getExtensionIndex(
+ hidl_string const& parameterName,
+ getExtensionIndex_cb _hidl_cb) override;
+ Return<Status> dispatchMessage(Message const& msg) override;
+};
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
diff --git a/include/media/omx/1.0/WOmxObserver.h b/include/media/omx/1.0/WOmxObserver.h
new file mode 100644
index 0000000..d442218
--- /dev/null
+++ b/include/media/omx/1.0/WOmxObserver.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
+
+#include <list>
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <media/IOMX.h>
+
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IOMXObserver;
+using ::android::BnOMXObserver;
+using ::android::omx_message;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxObserver : public BnOMXObserver {
+ sp<IOmxObserver> mBase;
+ LWOmxObserver(sp<IOmxObserver> const& base);
+ void onMessages(std::list<omx_message> const& lMessages) override;
+};
+
+struct TWOmxObserver : public IOmxObserver {
+ sp<IOMXObserver> mBase;
+ TWOmxObserver(sp<IOMXObserver> const& base);
+ Return<void> onMessages(const hidl_vec<Message>& tMessages) override;
+};
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
diff --git a/include/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h b/include/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h
new file mode 100644
index 0000000..b324cd8
--- /dev/null
+++ b/include/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODECS_XML_PARSER_H_
+
+#define MEDIA_CODECS_XML_PARSER_H_
+
+#include <map>
+#include <vector>
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/Vector.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+struct AMessage;
+
+// Quirk still supported, even though deprecated
+enum Quirks {
+ kRequiresAllocateBufferOnInputPorts = 1,
+ kRequiresAllocateBufferOnOutputPorts = 2,
+
+ kQuirksMask = kRequiresAllocateBufferOnInputPorts
+ | kRequiresAllocateBufferOnOutputPorts,
+};
+
+// Lightweight struct for querying components.
+struct TypeInfo {
+ AString mName;
+ std::map<AString, AString> mStringFeatures;
+ std::map<AString, bool> mBoolFeatures;
+ std::map<AString, AString> mDetails;
+};
+
+struct ProfileLevel {
+ uint32_t mProfile;
+ uint32_t mLevel;
+};
+
+struct CodecInfo {
+ std::vector<TypeInfo> mTypes;
+ std::vector<ProfileLevel> mProfileLevels;
+ std::vector<uint32_t> mColorFormats;
+ uint32_t mFlags;
+ bool mIsEncoder;
+};
+
+class MediaCodecsXmlParser {
+public:
+ MediaCodecsXmlParser();
+ ~MediaCodecsXmlParser();
+
+ void getGlobalSettings(std::map<AString, AString> *settings) const;
+
+ status_t getCodecInfo(const char *name, CodecInfo *info) const;
+
+ status_t getQuirks(const char *name, std::vector<AString> *quirks) const;
+
+private:
+ enum Section {
+ SECTION_TOPLEVEL,
+ SECTION_SETTINGS,
+ SECTION_DECODERS,
+ SECTION_DECODER,
+ SECTION_DECODER_TYPE,
+ SECTION_ENCODERS,
+ SECTION_ENCODER,
+ SECTION_ENCODER_TYPE,
+ SECTION_INCLUDE,
+ };
+
+ status_t mInitCheck;
+ Section mCurrentSection;
+ bool mUpdate;
+ Vector<Section> mPastSections;
+ int32_t mDepth;
+ AString mHrefBase;
+
+ std::map<AString, AString> mGlobalSettings;
+
+ // name -> CodecInfo
+ std::map<AString, CodecInfo> mCodecInfos;
+ std::map<AString, std::vector<AString>> mQuirks;
+ AString mCurrentName;
+ std::vector<TypeInfo>::iterator mCurrentType;
+
+ status_t initCheck() const;
+ void parseTopLevelXMLFile(const char *path, bool ignore_errors = false);
+
+ void parseXMLFile(const char *path);
+
+ static void StartElementHandlerWrapper(
+ void *me, const char *name, const char **attrs);
+
+ static void EndElementHandlerWrapper(void *me, const char *name);
+
+ void startElementHandler(const char *name, const char **attrs);
+ void endElementHandler(const char *name);
+
+ status_t includeXMLFile(const char **attrs);
+ status_t addSettingFromAttributes(const char **attrs);
+ status_t addMediaCodecFromAttributes(bool encoder, const char **attrs);
+ void addMediaCodec(bool encoder, const char *name, const char *type = NULL);
+
+ status_t addQuirk(const char **attrs);
+ status_t addTypeFromAttributes(const char **attrs, bool encoder);
+ status_t addLimit(const char **attrs);
+ status_t addFeature(const char **attrs);
+ void addType(const char *name);
+
+ DISALLOW_EVIL_CONSTRUCTORS(MediaCodecsXmlParser);
+};
+
+} // namespace android
+
+#endif // MEDIA_CODECS_XML_PARSER_H_
+
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index c620e7c..afd1189 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -6,16 +6,20 @@
main_audioserver.cpp
LOCAL_SHARED_LIBRARIES := \
+ libaaudioservice \
libaudioflinger \
libaudiopolicyservice \
libbinder \
libcutils \
liblog \
libmedialogservice \
+ libnbaio \
libradioservice \
libsoundtriggerservice \
- libutils
+ libutils \
+ libhwbinder
+# TODO oboeservice is the old folder name for aaudioservice. It will be changed.
LOCAL_C_INCLUDES := \
frameworks/av/services/audioflinger \
frameworks/av/services/audiopolicy \
@@ -24,8 +28,12 @@
frameworks/av/services/audiopolicy/engine/interface \
frameworks/av/services/audiopolicy/service \
frameworks/av/services/medialog \
+ frameworks/av/services/oboeservice \
frameworks/av/services/radio \
frameworks/av/services/soundtrigger \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src \
+ frameworks/av/media/libaaudio/src/binding \
$(call include-path-for, audio-utils) \
external/sonic \
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 80f78b6..9d42bce 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -5,3 +5,9 @@
group audio camera drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
ioprio rt 4
writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
+ onrestart restart audio-hal-2-0
+
+on property:vts.native_server.on=1
+ stop audioserver
+on property:vts.native_server.on=0
+ start audioserver
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 4a7a988..ee02d23 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -27,9 +27,14 @@
#include <binder/IServiceManager.h>
#include <utils/Log.h>
+// FIXME: remove when BUG 31748996 is fixed
+#include <hwbinder/IPCThreadState.h>
+#include <hwbinder/ProcessState.h>
+
// from LOCAL_C_INCLUDES
#include "AudioFlinger.h"
#include "AudioPolicyService.h"
+#include "AAudioService.h"
#include "MediaLogService.h"
#include "RadioService.h"
#include "SoundTriggerHwService.h"
@@ -127,9 +132,14 @@
ALOGI("ServiceManager: %p", sm.get());
AudioFlinger::instantiate();
AudioPolicyService::instantiate();
+ AAudioService::instantiate();
RadioService::instantiate();
SoundTriggerHwService::instantiate();
ProcessState::self()->startThreadPool();
+
+// FIXME: remove when BUG 31748996 is fixed
+ android::hardware::ProcessState::self()->startThreadPool();
+
IPCThreadState::self()->joinThreadPool();
}
}
diff --git a/media/img_utils/src/TiffWriter.cpp b/media/img_utils/src/TiffWriter.cpp
index 564474f..1711242 100644
--- a/media/img_utils/src/TiffWriter.cpp
+++ b/media/img_utils/src/TiffWriter.cpp
@@ -350,7 +350,7 @@
if (nextIfd == NULL) {
break;
}
- ifd = nextIfd;
+ ifd = std::move(nextIfd);
}
return ifd;
}
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
new file mode 100644
index 0000000..f539ba9
--- /dev/null
+++ b/media/libaaudio/Android.bp
@@ -0,0 +1,28 @@
+// Copyright (C) 2016 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+ndk_headers {
+ name: "libAAudio_headers",
+ from: "include",
+ to: "",
+ srcs: ["include/aaudio/*.h"],
+ license: "include/aaudio/NOTICE",
+}
+
+ndk_library {
+ name: "libaaudio",
+ symbol_file: "libaaudio.map.txt",
+ first_version: "26",
+ unversioned_until: "current",
+}
diff --git a/media/libaaudio/Android.mk b/media/libaaudio/Android.mk
new file mode 100644
index 0000000..5053e7d
--- /dev/null
+++ b/media/libaaudio/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/Doxyfile b/media/libaaudio/Doxyfile
new file mode 100644
index 0000000..e2c4960
--- /dev/null
+++ b/media/libaaudio/Doxyfile
@@ -0,0 +1,2312 @@
+# Doxyfile 1.8.6
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "AAudio"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT = include/aaudio/AAudio.h \
+ src/legacy/AudioStreamTrack.h \
+ src/legacy/AudioStreamRecord.h \
+ src/legacy/AAudioLegacy.h \
+ src/core/AudioStreamBuilder.h \
+ src/core/AudioStream.h \
+ src/utility/HandleTracker.h \
+ src/utility/MonotonicCounter.h \
+ src/utility/AudioClock.h \
+ src/utility/AAudioUtilities.h
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all refrences to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/media/libaaudio/Doxyfile.orig b/media/libaaudio/Doxyfile.orig
new file mode 100644
index 0000000..137facb
--- /dev/null
+++ b/media/libaaudio/Doxyfile.orig
@@ -0,0 +1,2303 @@
+# Doxyfile 1.8.6
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "My Project"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
+# the documentation. The maximum height of the logo should not exceed 55 pixels
+# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
+# to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
+# new page for each member. If set to NO, the documentation of a member will be
+# part of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C.
+#
+# Note For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by by putting a % sign in front of the word
+# or globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO these classes will be included in the various overviews. This option has
+# no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
+# todo list. This list is created by putting \todo commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
+# test list. This list is created by putting \test commands in the
+# documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES the list
+# will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. Do not use file names with spaces, bibtex cannot handle them. See
+# also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO doxygen will only warn about wrong or incomplete parameter
+# documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT =
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER ) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
+# defined cascading style sheet that is included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefor more robust against future updates.
+# Doxygen will copy the style sheet file to the output directory. For an example
+# see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the stylesheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated (
+# YES) or that it should be included in the master .chm file ( NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated (
+# YES) or a normal table of contents ( NO) in the .chm file.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavours of web server based searching depending on the
+# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
+# searching and an index file used by the script. When EXTERNAL_SEARCH is
+# enabled the indexing and searching needs to be provided by external tools. See
+# the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
+# replace them by respectively the title of the page, the current date and time,
+# only the current date, the version number of doxygen, the project name (see
+# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
+# validating XML parser to check the syntax of the XML files.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
+# Definitions (see http://autogen.sf.net) file that captures the structure of
+# the code including all documentation. Note that this feature is still
+# experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
+# in the source code. If set to NO only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all refrences to function-like macros that are alone on a line, have an
+# all uppercase name, and do not end with a semicolon. Such function macros are
+# typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have an unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
+# class index. If set to NO only the inherited external classes will be listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
+# the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font n the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = YES
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/media/libaaudio/README.md b/media/libaaudio/README.md
new file mode 100644
index 0000000..0c9050e
--- /dev/null
+++ b/media/libaaudio/README.md
@@ -0,0 +1,3 @@
+AAudio input/output API
+
+To generate Doxygen output, run command "doxygen" in this directory.
diff --git a/media/libaaudio/examples/Android.mk b/media/libaaudio/examples/Android.mk
new file mode 100644
index 0000000..5053e7d
--- /dev/null
+++ b/media/libaaudio/examples/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/input_monitor/Android.mk b/media/libaaudio/examples/input_monitor/Android.mk
new file mode 100644
index 0000000..b56328b
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/Android.mk
@@ -0,0 +1,6 @@
+# include $(call all-subdir-makefiles)
+
+# Just include static/ for now.
+LOCAL_PATH := $(call my-dir)
+#include $(LOCAL_PATH)/jni/Android.mk
+include $(LOCAL_PATH)/static/Android.mk
diff --git a/media/libaaudio/examples/input_monitor/README.md b/media/libaaudio/examples/input_monitor/README.md
new file mode 100644
index 0000000..3e54ef0
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/README.md
@@ -0,0 +1 @@
+Monitor input level and print value.
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
new file mode 100644
index 0000000..3e24f9f
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/jni/Android.mk
@@ -0,0 +1,33 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src \
+ frameworks/av/media/libaaudio/examples/utils
+
+# NDK recommends using this kind of relative path instead of an absolute path.
+LOCAL_SRC_FILES:= ../src/input_monitor.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := input_monitor_ndk
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
+
+LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := input_monitor_callback_ndk
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := libaaudio_prebuilt
+LOCAL_SRC_FILES := libaaudio.so
+LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
+include $(PREBUILT_SHARED_LIBRARY)
\ No newline at end of file
diff --git a/media/libaaudio/examples/input_monitor/jni/Application.mk b/media/libaaudio/examples/input_monitor/jni/Application.mk
new file mode 100644
index 0000000..e74475c
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/jni/Application.mk
@@ -0,0 +1,3 @@
+# TODO remove then when we support other architectures
+APP_ABI := arm64-v8a
+APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
new file mode 100644
index 0000000..715c5f8
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#include <new>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <aaudio/AAudio.h>
+#include "AAudioExampleUtils.h"
+#include "AAudioSimpleRecorder.h"
+
+#define SAMPLE_RATE 48000
+
+#define NUM_SECONDS 10
+
+#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+
+ aaudio_result_t result;
+ AAudioSimpleRecorder recorder;
+ int actualSamplesPerFrame;
+ int actualSampleRate;
+ const aaudio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
+ aaudio_format_t actualDataFormat;
+
+ const int requestedInputChannelCount = 1; // Can affect whether we get a FAST path.
+
+ //aaudio_performance_mode_t requestedPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ const aaudio_performance_mode_t requestedPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ //aaudio_performance_mode_t requestedPerformanceMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
+ const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ aaudio_sharing_mode_t actualSharingMode;
+
+ AAudioStream *aaudioStream = nullptr;
+ aaudio_stream_state_t state;
+ int32_t framesPerBurst = 0;
+ int32_t framesPerRead = 0;
+ int32_t framesToRecord = 0;
+ int32_t framesLeft = 0;
+ int32_t xRunCount = 0;
+ int16_t *data = nullptr;
+ float peakLevel = 0.0;
+ int loopCounter = 0;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("%s - Monitor input level using AAudio\n", argv[0]);
+
+ recorder.setPerformanceMode(requestedPerformanceMode);
+ recorder.setSharingMode(requestedSharingMode);
+
+ result = recorder.open(requestedInputChannelCount, 48000, requestedDataFormat,
+ nullptr, nullptr, nullptr);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ goto finish;
+ }
+ aaudioStream = recorder.getStream();
+
+ actualSamplesPerFrame = AAudioStream_getSamplesPerFrame(aaudioStream);
+ printf("SamplesPerFrame = %d\n", actualSamplesPerFrame);
+ actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
+ printf("SamplesPerFrame = %d\n", actualSampleRate);
+
+ actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
+ printf("SharingMode: requested = %s, actual = %s\n",
+ getSharingModeText(requestedSharingMode),
+ getSharingModeText(actualSharingMode));
+
+ // This is the number of frames that are written in one chunk by a DMA controller
+ // or a DSP.
+ framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
+ printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+
+ // Some DMA might use very short bursts of 16 frames. We don't need to read such small
+ // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+ framesPerRead = framesPerBurst;
+ while (framesPerRead < MIN_FRAMES_TO_READ) {
+ framesPerRead *= 2;
+ }
+ printf("DataFormat: framesPerRead = %d\n",framesPerRead);
+
+ actualDataFormat = AAudioStream_getFormat(aaudioStream);
+ printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+ // TODO handle other data formats
+ assert(actualDataFormat == AAUDIO_FORMAT_PCM_I16);
+
+ printf("PerformanceMode: requested = %d, actual = %d\n", requestedPerformanceMode,
+ AAudioStream_getPerformanceMode(aaudioStream));
+
+ // Allocate a buffer for the audio data.
+ data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
+ if (data == nullptr) {
+ fprintf(stderr, "ERROR - could not allocate data buffer\n");
+ result = AAUDIO_ERROR_NO_MEMORY;
+ goto finish;
+ }
+
+ // Start the stream.
+ result = recorder.start();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - recorder.start() returned %d\n", result);
+ goto finish;
+ }
+
+ state = AAudioStream_getState(aaudioStream);
+ printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
+
+ // Record for a while.
+ framesToRecord = actualSampleRate * NUM_SECONDS;
+ framesLeft = framesToRecord;
+ while (framesLeft > 0) {
+ // Read audio data from the stream.
+ const int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
+ int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
+ int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
+ if (actual < 0) {
+ fprintf(stderr, "ERROR - AAudioStream_read() returned %d\n", actual);
+ result = actual;
+ goto finish;
+ } else if (actual == 0) {
+ fprintf(stderr, "WARNING - AAudioStream_read() returned %d\n", actual);
+ goto finish;
+ }
+ framesLeft -= actual;
+
+ // Peak finder.
+ for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
+ float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+ if (sample > peakLevel) {
+ peakLevel = sample;
+ }
+ }
+
+ // Display level as stars, eg. "******".
+ if ((loopCounter++ % 10) == 0) {
+ displayPeakLevel(peakLevel);
+ peakLevel = 0.0;
+ }
+ }
+
+ xRunCount = AAudioStream_getXRunCount(aaudioStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+ result = recorder.stop();
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
+finish:
+ recorder.close();
+ delete[] data;
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
new file mode 100644
index 0000000..9de2eb0
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudio.h>
+#include "AAudioExampleUtils.h"
+#include "AAudioSimpleRecorder.h"
+
+#define NUM_SECONDS 5
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+ AAudioSimpleRecorder recorder;
+ PeakTrackerData_t myData = {0.0};
+ aaudio_result_t result;
+ aaudio_stream_state_t state;
+ const int displayRateHz = 20; // arbitrary
+ const int loopsNeeded = NUM_SECONDS * displayRateHz;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+ printf("%s - Display audio input using an AAudio callback\n", argv[0]);
+
+ result = recorder.open(2, 48000, AAUDIO_FORMAT_PCM_I16,
+ SimpleRecorderDataCallbackProc, SimpleRecorderErrorCallbackProc, &myData);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - recorder.open() returned %d\n", result);
+ goto error;
+ }
+ printf("recorder.getFramesPerSecond() = %d\n", recorder.getFramesPerSecond());
+ printf("recorder.getSamplesPerFrame() = %d\n", recorder.getSamplesPerFrame());
+
+ result = recorder.start();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - recorder.start() returned %d\n", result);
+ goto error;
+ }
+
+ printf("Sleep for %d seconds while audio record in a callback thread.\n", NUM_SECONDS);
+ for (int i = 0; i < loopsNeeded; i++)
+ {
+ const struct timespec request = { .tv_sec = 0,
+ .tv_nsec = NANOS_PER_SECOND / displayRateHz };
+ (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+ printf("%08d: ", (int)recorder.getFramesRead());
+ displayPeakLevel(myData.peakLevel);
+
+ result = AAudioStream_waitForStateChange(recorder.getStream(),
+ AAUDIO_STREAM_STATE_CLOSED,
+ &state,
+ 0);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_waitForStateChange() returned %d\n", result);
+ goto error;
+ }
+ if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
+ printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
+ break;
+ }
+ }
+ printf("Woke up. Stop for a moment.\n");
+
+ result = recorder.stop();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+ usleep(2000 * 1000);
+ result = recorder.start();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - recorder.start() returned %d\n", result);
+ goto error;
+ }
+
+ printf("Sleep for %d seconds while audio records in a callback thread.\n", NUM_SECONDS);
+ for (int i = 0; i < loopsNeeded; i++)
+ {
+ const struct timespec request = { .tv_sec = 0,
+ .tv_nsec = NANOS_PER_SECOND / displayRateHz };
+ (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+ printf("%08d: ", (int)recorder.getFramesRead());
+ displayPeakLevel(myData.peakLevel);
+
+ state = AAudioStream_getState(recorder.getStream());
+ if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
+ printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
+ break;
+ }
+ }
+ printf("Woke up now.\n");
+
+ result = recorder.stop();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+ result = recorder.close();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ printf("SUCCESS\n");
+ return EXIT_SUCCESS;
+error:
+ recorder.close();
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/examples/input_monitor/static/Android.mk b/media/libaaudio/examples/input_monitor/static/Android.mk
new file mode 100644
index 0000000..61fc3b8
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/static/Android.mk
@@ -0,0 +1,37 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := examples
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
+
+# TODO reorganize folders to avoid using ../
+LOCAL_SRC_FILES:= ../src/input_monitor.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+ libbinder libcutils libutils \
+ libaudioclient liblog libtinyalsa libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := input_monitor
+include $(BUILD_EXECUTABLE)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
+
+LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+ libbinder libcutils libutils \
+ libaudioclient liblog libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := input_monitor_callback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/input_monitor/static/README.md b/media/libaaudio/examples/input_monitor/static/README.md
new file mode 100644
index 0000000..6e26d7b
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/static/README.md
@@ -0,0 +1,2 @@
+Makefile for building simple command line examples.
+They link with AAudio as a static library.
diff --git a/media/libaaudio/examples/loopback/Android.mk b/media/libaaudio/examples/loopback/Android.mk
new file mode 100644
index 0000000..5053e7d
--- /dev/null
+++ b/media/libaaudio/examples/loopback/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/loopback/jni/Android.mk b/media/libaaudio/examples/loopback/jni/Android.mk
new file mode 100644
index 0000000..dc933e3
--- /dev/null
+++ b/media/libaaudio/examples/loopback/jni/Android.mk
@@ -0,0 +1,13 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include
+
+# NDK recommends using this kind of relative path instead of an absolute path.
+LOCAL_SRC_FILES:= ../src/loopback.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := aaudio_loopback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/loopback/jni/Application.mk b/media/libaaudio/examples/loopback/jni/Application.mk
new file mode 100644
index 0000000..ba44f37
--- /dev/null
+++ b/media/libaaudio/examples/loopback/jni/Application.mk
@@ -0,0 +1 @@
+APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
new file mode 100644
index 0000000..9f06ee7
--- /dev/null
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -0,0 +1,528 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play an impulse and then record it.
+// Measure the round trip latency.
+
+#include <assert.h>
+#include <cctype>
+#include <math.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+
+#define INPUT_PEAK_THRESHOLD 0.1f
+#define SILENCE_FRAMES 10000
+#define SAMPLE_RATE 48000
+#define NUM_SECONDS 7
+#define FILENAME "/data/oboe_input.raw"
+
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define MILLIS_PER_SECOND 1000
+#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * MILLIS_PER_SECOND)
+
+class AudioRecorder
+{
+public:
+ AudioRecorder() {
+ }
+ ~AudioRecorder() {
+ delete[] mData;
+ }
+
+ void allocate(int maxFrames) {
+ delete[] mData;
+ mData = new float[maxFrames];
+ mMaxFrames = maxFrames;
+ }
+
+ void record(int16_t *inputData, int inputChannelCount, int numFrames) {
+ // stop at end of buffer
+ if ((mFrameCounter + numFrames) > mMaxFrames) {
+ numFrames = mMaxFrames - mFrameCounter;
+ }
+ for (int i = 0; i < numFrames; i++) {
+ mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
+ }
+ }
+
+ void record(float *inputData, int inputChannelCount, int numFrames) {
+ // stop at end of buffer
+ if ((mFrameCounter + numFrames) > mMaxFrames) {
+ numFrames = mMaxFrames - mFrameCounter;
+ }
+ for (int i = 0; i < numFrames; i++) {
+ mData[mFrameCounter++] = inputData[i * inputChannelCount];
+ }
+ }
+
+ int save(const char *fileName) {
+ FILE *fid = fopen(fileName, "wb");
+ if (fid == NULL) {
+ return errno;
+ }
+ int written = fwrite(mData, sizeof(float), mFrameCounter, fid);
+ fclose(fid);
+ return written;
+ }
+
+private:
+ float *mData = NULL;
+ int32_t mFrameCounter = 0;
+ int32_t mMaxFrames = 0;
+};
+
+// ====================================================================================
+// ========================= Loopback Processor =======================================
+// ====================================================================================
+class LoopbackProcessor {
+public:
+
+ // Calculate mean and standard deviation.
+ double calculateAverageLatency(double *deviation) {
+ if (mLatencyCount <= 0) {
+ return -1.0;
+ }
+ double sum = 0.0;
+ for (int i = 0; i < mLatencyCount; i++) {
+ sum += mLatencyArray[i];
+ }
+ double average = sum / mLatencyCount;
+ sum = 0.0;
+ for (int i = 0; i < mLatencyCount; i++) {
+ double error = average - mLatencyArray[i];
+ sum += error * error; // squared
+ }
+ *deviation = sqrt(sum / mLatencyCount);
+ return average;
+ }
+
+ float getMaxAmplitude() const { return mMaxAmplitude; }
+ int getMeasurementCount() const { return mLatencyCount; }
+ float getAverageAmplitude() const { return mAmplitudeTotal / mAmplitudeCount; }
+
+ // TODO Convert this to a feedback circuit and then use auto-correlation to measure the period.
+ void process(float *inputData, int inputChannelCount,
+ float *outputData, int outputChannelCount,
+ int numFrames) {
+ (void) outputChannelCount;
+
+ // Measure peak and average amplitude.
+ for (int i = 0; i < numFrames; i++) {
+ float sample = inputData[i * inputChannelCount];
+ if (sample > mMaxAmplitude) {
+ mMaxAmplitude = sample;
+ }
+ if (sample < 0) {
+ sample = 0 - sample;
+ }
+ mAmplitudeTotal += sample;
+ mAmplitudeCount++;
+ }
+
+ // Clear output.
+ memset(outputData, 0, numFrames * outputChannelCount * sizeof(float));
+
+ // Wait a while between hearing the pulse and starting a new one.
+ if (mState == STATE_SILENT) {
+ mCounter += numFrames;
+ if (mCounter > SILENCE_FRAMES) {
+ //printf("LoopbackProcessor send impulse, burst #%d\n", mBurstCounter);
+ // copy impulse
+ for (float sample : mImpulse) {
+ *outputData = sample;
+ outputData += outputChannelCount;
+ }
+ mState = STATE_LISTENING;
+ mCounter = 0;
+ }
+ }
+ // Start listening as soon as we send the impulse.
+ if (mState == STATE_LISTENING) {
+ for (int i = 0; i < numFrames; i++) {
+ float sample = inputData[i * inputChannelCount];
+ if (sample >= INPUT_PEAK_THRESHOLD) {
+ mLatencyArray[mLatencyCount++] = mCounter;
+ if (mLatencyCount >= MAX_LATENCY_VALUES) {
+ mState = STATE_DONE;
+ } else {
+ mState = STATE_SILENT;
+ }
+ mCounter = 0;
+ break;
+ } else {
+ mCounter++;
+ }
+ }
+ }
+ }
+
+ void echo(float *inputData, int inputChannelCount,
+ float *outputData, int outputChannelCount,
+ int numFrames) {
+ int channelsValid = (inputChannelCount < outputChannelCount)
+ ? inputChannelCount : outputChannelCount;
+ for (int i = 0; i < numFrames; i++) {
+ int ic;
+ for (ic = 0; ic < channelsValid; ic++) {
+ outputData[ic] = inputData[ic];
+ }
+ for (ic = 0; ic < outputChannelCount; ic++) {
+ outputData[ic] = 0;
+ }
+ inputData += inputChannelCount;
+ outputData += outputChannelCount;
+ }
+ }
+private:
+ enum {
+ STATE_SILENT,
+ STATE_LISTENING,
+ STATE_DONE
+ };
+
+ enum {
+ MAX_LATENCY_VALUES = 64
+ };
+
+ int mState = STATE_SILENT;
+ int32_t mCounter = 0;
+ int32_t mLatencyArray[MAX_LATENCY_VALUES];
+ int32_t mLatencyCount = 0;
+ float mMaxAmplitude = 0;
+ float mAmplitudeTotal = 0;
+ int32_t mAmplitudeCount = 0;
+ static const float mImpulse[5];
+};
+
+const float LoopbackProcessor::mImpulse[5] = {0.5f, 0.9f, 0.0f, -0.9f, -0.5f};
+
+// TODO make this a class that manages its own buffer allocation
+struct LoopbackData {
+ AAudioStream *inputStream = nullptr;
+ int32_t inputFramesMaximum = 0;
+ int16_t *inputData = nullptr;
+ float *conversionBuffer = nullptr;
+ int32_t actualInputChannelCount = 0;
+ int32_t actualOutputChannelCount = 0;
+ int32_t inputBuffersToDiscard = 10;
+
+ aaudio_result_t inputError;
+ LoopbackProcessor loopbackProcessor;
+ AudioRecorder audioRecorder;
+};
+
+static void convertPcm16ToFloat(const int16_t *source,
+ float *destination,
+ int32_t numSamples) {
+ const float scaler = 1.0f / 32768.0f;
+ for (int i = 0; i < numSamples; i++) {
+ destination[i] = source[i] * scaler;
+ }
+}
+
+// ====================================================================================
+// ========================= CALLBACK =================================================
+// ====================================================================================
+// Callback function that fills the audio output buffer.
+static aaudio_data_callback_result_t MyDataCallbackProc(
+ AAudioStream *outputStream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ (void) outputStream;
+ LoopbackData *myData = (LoopbackData *) userData;
+ float *outputData = (float *) audioData;
+
+ // Read audio data from the input stream.
+ int32_t framesRead;
+
+ if (numFrames > myData->inputFramesMaximum) {
+ myData->inputError = AAUDIO_ERROR_OUT_OF_RANGE;
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+
+ if (myData->inputBuffersToDiscard > 0) {
+ // Drain the input.
+ do {
+ framesRead = AAudioStream_read(myData->inputStream, myData->inputData,
+ numFrames, 0);
+ if (framesRead < 0) {
+ myData->inputError = framesRead;
+ } else if (framesRead > 0) {
+ myData->inputBuffersToDiscard--;
+ }
+ } while(framesRead > 0);
+ } else {
+ framesRead = AAudioStream_read(myData->inputStream, myData->inputData,
+ numFrames, 0);
+ if (framesRead < 0) {
+ myData->inputError = framesRead;
+ } else if (framesRead > 0) {
+ // Process valid input data.
+ myData->audioRecorder.record(myData->inputData,
+ myData->actualInputChannelCount,
+ framesRead);
+
+ int32_t numSamples = framesRead * myData->actualInputChannelCount;
+ convertPcm16ToFloat(myData->inputData, myData->conversionBuffer, numSamples);
+
+ myData->loopbackProcessor.process(myData->conversionBuffer,
+ myData->actualInputChannelCount,
+ outputData,
+ myData->actualOutputChannelCount,
+ framesRead);
+ }
+ }
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+static void usage() {
+ printf("loopback: -b{burstsPerBuffer} -p{outputPerfMode} -P{inputPerfMode}\n");
+ printf(" -b{burstsPerBuffer} for example 2 for double buffered\n");
+ printf(" -p{outputPerfMode} set output AAUDIO_PERFORMANCE_MODE*\n");
+ printf(" -P{inputPerfMode} set input AAUDIO_PERFORMANCE_MODE*\n");
+ printf(" n for _NONE\n");
+ printf(" l for _LATENCY\n");
+ printf(" p for _POWER_SAVING;\n");
+ printf("For example: loopback -b2 -pl -Pn\n");
+}
+
+static aaudio_performance_mode_t parsePerformanceMode(char c) {
+ aaudio_performance_mode_t mode = AAUDIO_PERFORMANCE_MODE_NONE;
+ c = tolower(c);
+ switch (c) {
+ case 'n':
+ mode = AAUDIO_PERFORMANCE_MODE_NONE;
+ break;
+ case 'l':
+ mode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ break;
+ case 'p':
+ mode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
+ break;
+ default:
+ printf("ERROR invalue performance mode %c\n", c);
+ break;
+ }
+ return mode;
+}
+
+// ====================================================================================
+// TODO break up this large main() function into smaller functions
+int main(int argc, const char **argv)
+{
+ aaudio_result_t result = AAUDIO_OK;
+ LoopbackData loopbackData;
+ AAudioStream *outputStream = nullptr;
+
+ const int requestedInputChannelCount = 1;
+ const int requestedOutputChannelCount = AAUDIO_UNSPECIFIED;
+ const int requestedSampleRate = SAMPLE_RATE;
+ int actualSampleRate = 0;
+ const aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_PCM_I16;
+ const aaudio_format_t requestedOutputFormat = AAUDIO_FORMAT_PCM_FLOAT;
+ aaudio_format_t actualInputFormat;
+ aaudio_format_t actualOutputFormat;
+
+ const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ aaudio_sharing_mode_t actualSharingMode;
+
+ AAudioStreamBuilder *builder = nullptr;
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ int32_t framesPerBurst = 0;
+ float *outputData = NULL;
+ double deviation;
+ double latency;
+ aaudio_performance_mode_t outputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+
+ int32_t burstsPerBuffer = 1; // single buffered
+
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'b':
+ burstsPerBuffer = atoi(&arg[2]);
+ break;
+ case 'p':
+ outputPerformanceLevel = parsePerformanceMode(arg[2]);
+ break;
+ case 'P':
+ inputPerformanceLevel = parsePerformanceMode(arg[2]);
+ break;
+ default:
+ usage();
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+
+ loopbackData.audioRecorder.allocate(NUM_SECONDS * SAMPLE_RATE);
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, NULL, _IONBF, (size_t) 0);
+
+ printf("%s - Audio loopback using AAudio\n", argv[0]);
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&builder);
+ if (result < 0) {
+ goto finish;
+ }
+
+ // Request common stream properties.
+ AAudioStreamBuilder_setSampleRate(builder, requestedSampleRate);
+ AAudioStreamBuilder_setFormat(builder, requestedInputFormat);
+ AAudioStreamBuilder_setSharingMode(builder, requestedSharingMode);
+
+ // Open the input stream.
+ AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_INPUT);
+ AAudioStreamBuilder_setPerformanceMode(builder, inputPerformanceLevel);
+ AAudioStreamBuilder_setChannelCount(builder, requestedInputChannelCount);
+
+ result = AAudioStreamBuilder_openStream(builder, &loopbackData.inputStream);
+ printf("AAudioStreamBuilder_openStream(input) returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
+ if (result < 0) {
+ goto finish;
+ }
+
+ // Create an output stream using the Builder.
+ AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_OUTPUT);
+ AAudioStreamBuilder_setFormat(builder, requestedOutputFormat);
+ AAudioStreamBuilder_setPerformanceMode(builder, outputPerformanceLevel);
+ AAudioStreamBuilder_setChannelCount(builder, requestedOutputChannelCount);
+ AAudioStreamBuilder_setDataCallback(builder, MyDataCallbackProc, &loopbackData);
+
+ result = AAudioStreamBuilder_openStream(builder, &outputStream);
+ printf("AAudioStreamBuilder_openStream(output) returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
+ printf("Stream INPUT ---------------------\n");
+ loopbackData.actualInputChannelCount = AAudioStream_getChannelCount(loopbackData.inputStream);
+ printf(" channelCount: requested = %d, actual = %d\n", requestedInputChannelCount,
+ loopbackData.actualInputChannelCount);
+ printf(" framesPerBurst = %d\n", AAudioStream_getFramesPerBurst(loopbackData.inputStream));
+
+ actualInputFormat = AAudioStream_getFormat(loopbackData.inputStream);
+ printf(" dataFormat: requested = %d, actual = %d\n", requestedInputFormat, actualInputFormat);
+ assert(actualInputFormat == AAUDIO_FORMAT_PCM_I16);
+
+ printf("Stream OUTPUT ---------------------\n");
+ // Check to see what kind of stream we actually got.
+ actualSampleRate = AAudioStream_getSampleRate(outputStream);
+ printf(" sampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
+
+ loopbackData.actualOutputChannelCount = AAudioStream_getChannelCount(outputStream);
+ printf(" channelCount: requested = %d, actual = %d\n", requestedOutputChannelCount,
+ loopbackData.actualOutputChannelCount);
+
+ actualSharingMode = AAudioStream_getSharingMode(outputStream);
+ printf(" sharingMode: requested = %d, actual = %d\n", requestedSharingMode, actualSharingMode);
+
+ // This is the number of frames that are read in one chunk by a DMA controller
+ // or a DSP or a mixer.
+ framesPerBurst = AAudioStream_getFramesPerBurst(outputStream);
+ printf(" framesPerBurst = %d\n", framesPerBurst);
+
+ printf(" bufferCapacity = %d\n", AAudioStream_getBufferCapacityInFrames(outputStream));
+
+ actualOutputFormat = AAudioStream_getFormat(outputStream);
+ printf(" dataFormat: requested = %d, actual = %d\n", requestedOutputFormat, actualOutputFormat);
+ assert(actualOutputFormat == AAUDIO_FORMAT_PCM_FLOAT);
+
+ // Allocate a buffer for the audio data.
+ loopbackData.inputFramesMaximum = 32 * framesPerBurst;
+
+ loopbackData.inputData = new int16_t[loopbackData.inputFramesMaximum * loopbackData.actualInputChannelCount];
+ loopbackData.conversionBuffer = new float[loopbackData.inputFramesMaximum *
+ loopbackData.actualInputChannelCount];
+
+ result = AAudioStream_setBufferSizeInFrames(outputStream, burstsPerBuffer * framesPerBurst);
+ if (result < 0) { // may be positive buffer size
+ fprintf(stderr, "ERROR - AAudioStream_setBufferSize() returned %d\n", result);
+ goto finish;
+ }
+ printf("AAudioStream_setBufferSize() actual = %d\n",result);
+
+ // Start output first so input stream runs low.
+ result = AAudioStream_requestStart(outputStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart(output) returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
+ goto finish;
+ }
+
+ result = AAudioStream_requestStart(loopbackData.inputStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart(input) returned %d = %s\n",
+ result, AAudio_convertResultToText(result));
+ goto finish;
+ }
+
+ printf("------- sleep while the callback runs --------------\n");
+ fflush(stdout);
+ sleep(NUM_SECONDS);
+
+
+ printf("input error = %d = %s\n",
+ loopbackData.inputError, AAudio_convertResultToText(loopbackData.inputError));
+
+ printf("AAudioStream_getXRunCount %d\n", AAudioStream_getXRunCount(outputStream));
+ printf("framesRead = %d\n", (int) AAudioStream_getFramesRead(outputStream));
+ printf("framesWritten = %d\n", (int) AAudioStream_getFramesWritten(outputStream));
+
+ latency = loopbackData.loopbackProcessor.calculateAverageLatency(&deviation);
+ printf("measured peak = %8.5f\n", loopbackData.loopbackProcessor.getMaxAmplitude());
+ printf("threshold = %8.5f\n", INPUT_PEAK_THRESHOLD);
+ printf("measured average = %8.5f\n", loopbackData.loopbackProcessor.getAverageAmplitude());
+ printf("# latency measurements = %d\n", loopbackData.loopbackProcessor.getMeasurementCount());
+ printf("measured latency = %8.2f +/- %4.5f frames\n", latency, deviation);
+ printf("measured latency = %8.2f msec <===== !!\n", (1000.0 * latency / actualSampleRate));
+
+ {
+ int written = loopbackData.audioRecorder.save(FILENAME);
+ printf("wrote %d samples to %s\n", written, FILENAME);
+ }
+
+finish:
+ AAudioStream_close(outputStream);
+ AAudioStream_close(loopbackData.inputStream);
+ delete[] loopbackData.conversionBuffer;
+ delete[] loopbackData.inputData;
+ delete[] outputData;
+ AAudioStreamBuilder_delete(builder);
+
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/libaaudio/examples/utils/AAudioExampleUtils.h b/media/libaaudio/examples/utils/AAudioExampleUtils.h
new file mode 100644
index 0000000..66de25f
--- /dev/null
+++ b/media/libaaudio/examples/utils/AAudioExampleUtils.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_EXAMPLE_UTILS_H
+#define AAUDIO_EXAMPLE_UTILS_H
+
+#include <unistd.h>
+#include <sched.h>
+#include <aaudio/AAudio.h>
+
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+
+static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+ const char *modeText = "unknown";
+ switch (mode) {
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ modeText = "EXCLUSIVE";
+ break;
+ case AAUDIO_SHARING_MODE_SHARED:
+ modeText = "SHARED";
+ break;
+ default:
+ break;
+ }
+ return modeText;
+}
+
+static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+ struct timespec time;
+ int result = clock_gettime(clockId, &time);
+ if (result < 0) {
+ return -errno;
+ }
+ return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
+}
+
+void displayPeakLevel(float peakLevel) {
+ printf("%5.3f ", peakLevel);
+ const int maxStars = 50; // arbitrary, fits on one line
+ int numStars = (int) (peakLevel * maxStars);
+ for (int i = 0; i < numStars; i++) {
+ printf("*");
+ }
+ printf("\n");
+}
+
+#endif // AAUDIO_EXAMPLE_UTILS_H
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
new file mode 100644
index 0000000..aaeb25f
--- /dev/null
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an AAudio callback.
+
+#ifndef AAUDIO_SIMPLE_PLAYER_H
+#define AAUDIO_SIMPLE_PLAYER_H
+
+#include <unistd.h>
+#include <sched.h>
+
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+#define PERFORMANCE_MODE AAUDIO_PERFORMANCE_MODE_NONE
+
+/**
+ * Simple wrapper for AAudio that opens an output stream either in callback or blocking write mode.
+ */
+class AAudioSimplePlayer {
+public:
+ AAudioSimplePlayer() {}
+ ~AAudioSimplePlayer() {
+ close();
+ };
+
+ /**
+ * Call this before calling open().
+ * @param requestedSharingMode
+ */
+ void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+ mRequestedSharingMode = requestedSharingMode;
+ }
+
+ /**
+ * Call this before calling open().
+ * @param requestedPerformanceMode
+ */
+ void setPerformanceMode(aaudio_performance_mode_t requestedPerformanceMode) {
+ mRequestedPerformanceMode = requestedPerformanceMode;
+ }
+
+ /**
+ * Also known as "sample rate"
+ * Only call this after open() has been called.
+ */
+ int32_t getFramesPerSecond() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSampleRate(mStream);;
+ }
+
+ /**
+ * Only call this after open() has been called.
+ */
+ int32_t getChannelCount() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getChannelCount(mStream);;
+ }
+
+ /**
+ * Open a stream
+ */
+ aaudio_result_t open(int channelCount, int sampSampleRate, aaudio_format_t format,
+ AAudioStream_dataCallback dataProc, AAudioStream_errorCallback errorProc,
+ void *userContext) {
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&mBuilder);
+ if (result != AAUDIO_OK) return result;
+
+ //AAudioStreamBuilder_setSampleRate(mBuilder, 44100);
+ AAudioStreamBuilder_setPerformanceMode(mBuilder, mRequestedPerformanceMode);
+ AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+ if (dataProc != nullptr) {
+ AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
+ }
+ if (errorProc != nullptr) {
+ AAudioStreamBuilder_setErrorCallback(mBuilder, errorProc, userContext);
+ }
+ AAudioStreamBuilder_setChannelCount(mBuilder, channelCount);
+ AAudioStreamBuilder_setSampleRate(mBuilder, sampSampleRate);
+ AAudioStreamBuilder_setFormat(mBuilder, format);
+ //AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
+ AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, 48 * 8);
+
+ //aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ //aaudio_performance_mode_t perfMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
+ AAudioStreamBuilder_setPerformanceMode(mBuilder, perfMode);
+
+ // Open an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+ if (result != AAUDIO_OK) goto finish1;
+
+ printf("AAudioStream_getFramesPerBurst() = %d\n",
+ AAudioStream_getFramesPerBurst(mStream));
+ printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+ AAudioStream_getBufferSizeInFrames(mStream));
+ printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+ AAudioStream_getBufferCapacityInFrames(mStream));
+ printf("AAudioStream_getPerformanceMode() = %d, requested %d\n",
+ AAudioStream_getPerformanceMode(mStream), perfMode);
+
+ finish1:
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ return result;
+ }
+
+ aaudio_result_t close() {
+ if (mStream != nullptr) {
+ printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
+ AAudioStream_close(mStream);
+ mStream = nullptr;
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ }
+ return AAUDIO_OK;
+ }
+
+ // Write zero data to fill up the buffer and prevent underruns.
+ aaudio_result_t prime() {
+ int32_t samplesPerFrame = AAudioStream_getChannelCount(mStream);
+ const int numFrames = 32;
+ float zeros[numFrames * samplesPerFrame];
+ memset(zeros, 0, sizeof(zeros));
+ aaudio_result_t result = numFrames;
+ while (result == numFrames) {
+ result = AAudioStream_write(mStream, zeros, numFrames, 0);
+ }
+ return result;
+ }
+
+ // Start the stream. AAudio will start calling your callback function.
+ aaudio_result_t start() {
+ aaudio_result_t result = AAudioStream_requestStart(mStream);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestStart() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ // Stop the stream. AAudio will stop calling your callback function.
+ aaudio_result_t stop() {
+ aaudio_result_t result = AAudioStream_requestStop(mStream);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestStop() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+ return result;
+ }
+
+ AAudioStream *getStream() const {
+ return mStream;
+ }
+
+private:
+ AAudioStreamBuilder *mBuilder = nullptr;
+ AAudioStream *mStream = nullptr;
+ aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+ aaudio_performance_mode_t mRequestedPerformanceMode = PERFORMANCE_MODE;
+};
+
+typedef struct SineThreadedData_s {
+ SineGenerator sineOsc1;
+ SineGenerator sineOsc2;
+ int scheduler;
+ bool schedulerChecked;
+} SineThreadedData_t;
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t SimplePlayerDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+ ) {
+
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ fprintf(stderr, "ERROR - SimplePlayerDataCallbackProc needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
+
+ if (!sineData->schedulerChecked) {
+ sineData->scheduler = sched_getscheduler(gettid());
+ sineData->schedulerChecked = true;
+ }
+
+ int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
+ // This code only plays on the first one or two channels.
+ // TODO Support arbitrary number of channels.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Render sine waves as shorts to first channel.
+ sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Render sine waves as floats to first channel.
+ sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void SimplePlayerErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error)
+{
+ printf("Error Callback, error: %d\n",(int)error);
+}
+
+#endif //AAUDIO_SIMPLE_PLAYER_H
diff --git a/media/libaaudio/examples/utils/AAudioSimpleRecorder.h b/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
new file mode 100644
index 0000000..9e7c463
--- /dev/null
+++ b/media/libaaudio/examples/utils/AAudioSimpleRecorder.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#ifndef AAUDIO_SIMPLE_RECORDER_H
+#define AAUDIO_SIMPLE_RECORDER_H
+
+#include <aaudio/AAudio.h>
+
+//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+#define PERFORMANCE_MODE AAUDIO_PERFORMANCE_MODE_NONE
+/**
+ * Simple wrapper for AAudio that opens an input stream either in callback or blocking read mode.
+ */
+class AAudioSimpleRecorder {
+public:
+ AAudioSimpleRecorder() {}
+ ~AAudioSimpleRecorder() {
+ close();
+ };
+
+ /**
+ * Call this before calling open().
+ * @param requestedSharingMode
+ */
+ void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+ mRequestedSharingMode = requestedSharingMode;
+ }
+
+ /**
+ * Call this before calling open().
+ * @param requestedPerformanceMode
+ */
+ void setPerformanceMode(aaudio_performance_mode_t requestedPerformanceMode) {
+ mRequestedPerformanceMode = requestedPerformanceMode;
+ }
+
+ /**
+ * Also known as "sample rate"
+ * Only call this after open() has been called.
+ */
+ int32_t getFramesPerSecond() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSampleRate(mStream);;
+ }
+
+ /**
+ * Only call this after open() has been called.
+ */
+ int32_t getSamplesPerFrame() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSamplesPerFrame(mStream);;
+ }
+ /**
+ * Only call this after open() has been called.
+ */
+ int64_t getFramesRead() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getFramesRead(mStream);;
+ }
+
+ /**
+ * Open a stream
+ */
+ aaudio_result_t open(int channelCount, int sampSampleRate, aaudio_format_t format,
+ AAudioStream_dataCallback dataProc, AAudioStream_errorCallback errorProc,
+ void *userContext) {
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&mBuilder);
+ if (result != AAUDIO_OK) return result;
+
+ AAudioStreamBuilder_setDirection(mBuilder, AAUDIO_DIRECTION_INPUT);
+ AAudioStreamBuilder_setPerformanceMode(mBuilder, mRequestedPerformanceMode);
+ AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+ if (dataProc != nullptr) {
+ AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
+ }
+ if (errorProc != nullptr) {
+ AAudioStreamBuilder_setErrorCallback(mBuilder, errorProc, userContext);
+ }
+ AAudioStreamBuilder_setChannelCount(mBuilder, channelCount);
+ AAudioStreamBuilder_setSampleRate(mBuilder, sampSampleRate);
+ AAudioStreamBuilder_setFormat(mBuilder, format);
+
+ // Open an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStreamBuilder_openStream() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ goto finish1;
+ }
+
+ printf("AAudioStream_getFramesPerBurst() = %d\n",
+ AAudioStream_getFramesPerBurst(mStream));
+ printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+ AAudioStream_getBufferSizeInFrames(mStream));
+ printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+ AAudioStream_getBufferCapacityInFrames(mStream));
+ return result;
+
+ finish1:
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ return result;
+ }
+
+ aaudio_result_t close() {
+ if (mStream != nullptr) {
+ printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
+ AAudioStream_close(mStream);
+ mStream = nullptr;
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ }
+ return AAUDIO_OK;
+ }
+
+ // Write zero data to fill up the buffer and prevent underruns.
+ aaudio_result_t prime() {
+ int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
+ const int numFrames = 32; // arbitrary
+ float zeros[numFrames * samplesPerFrame];
+ memset(zeros, 0, sizeof(zeros));
+ aaudio_result_t result = numFrames;
+ while (result == numFrames) {
+ result = AAudioStream_write(mStream, zeros, numFrames, 0);
+ }
+ return result;
+ }
+
+ // Start the stream. AAudio will start calling your callback function.
+ aaudio_result_t start() {
+ aaudio_result_t result = AAudioStream_requestStart(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ // Stop the stream. AAudio will stop calling your callback function.
+ aaudio_result_t stop() {
+ aaudio_result_t result = AAudioStream_requestStop(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ // Pause the stream. AAudio will stop calling your callback function.
+ aaudio_result_t pause() {
+ aaudio_result_t result = AAudioStream_requestPause(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestPause() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ AAudioStream *getStream() const {
+ return mStream;
+ }
+
+private:
+ AAudioStreamBuilder *mBuilder = nullptr;
+ AAudioStream *mStream = nullptr;
+ aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+ aaudio_performance_mode_t mRequestedPerformanceMode = PERFORMANCE_MODE;
+};
+
+// Application data that gets passed to the callback.
+typedef struct PeakTrackerData {
+ float peakLevel;
+} PeakTrackerData_t;
+
+#define DECAY_FACTOR 0.999
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t SimpleRecorderDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+ ) {
+
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ fprintf(stderr, "ERROR - SimpleRecorderDataCallbackProc needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ PeakTrackerData_t *data = (PeakTrackerData_t *) userData;
+ // printf("MyCallbackProc(): frameCount = %d\n", numFrames);
+ int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
+ float sample;
+ // This code assume mono or stereo.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Peak follower
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ sample = audioBuffer[frameIndex * samplesPerFrame] * (1.0/32768);
+ data->peakLevel *= DECAY_FACTOR;
+ if (sample > data->peakLevel) {
+ data->peakLevel = sample;
+ }
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Peak follower
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ sample = audioBuffer[frameIndex * samplesPerFrame];
+ data->peakLevel *= DECAY_FACTOR;
+ if (sample > data->peakLevel) {
+ data->peakLevel = sample;
+ }
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void SimpleRecorderErrorCallbackProc(
+ AAudioStream *stream __unused,
+ void *userData __unused,
+ aaudio_result_t error)
+{
+ printf("Error Callback, error: %d\n",(int)error);
+}
+
+#endif //AAUDIO_SIMPLE_RECORDER_H
diff --git a/media/libaaudio/examples/utils/SineGenerator.h b/media/libaaudio/examples/utils/SineGenerator.h
new file mode 100644
index 0000000..64b772d
--- /dev/null
+++ b/media/libaaudio/examples/utils/SineGenerator.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SINE_GENERATOR_H
+#define SINE_GENERATOR_H
+
+#include <math.h>
+
+class SineGenerator
+{
+public:
+ SineGenerator() {}
+ virtual ~SineGenerator() = default;
+
+ void setup(double frequency, double frameRate) {
+ mFrameRate = frameRate;
+ mPhaseIncrement = frequency * M_PI * 2 / frameRate;
+ }
+
+ void setSweep(double frequencyLow, double frequencyHigh, double seconds) {
+ mPhaseIncrementLow = frequencyLow * M_PI * 2 / mFrameRate;
+ mPhaseIncrementHigh = frequencyHigh * M_PI * 2 / mFrameRate;
+
+ double numFrames = seconds * mFrameRate;
+ mUpScaler = pow((frequencyHigh / frequencyLow), (1.0 / numFrames));
+ mDownScaler = 1.0 / mUpScaler;
+ mGoingUp = true;
+ mSweeping = true;
+ }
+
+ void render(int16_t *buffer, int32_t channelStride, int32_t numFrames) {
+ int sampleIndex = 0;
+ for (int i = 0; i < numFrames; i++) {
+ buffer[sampleIndex] = (int16_t) (32767 * sin(mPhase) * mAmplitude);
+ sampleIndex += channelStride;
+ advancePhase();
+ }
+ }
+ void render(float *buffer, int32_t channelStride, int32_t numFrames) {
+ int sampleIndex = 0;
+ for (int i = 0; i < numFrames; i++) {
+ buffer[sampleIndex] = sin(mPhase) * mAmplitude;
+ sampleIndex += channelStride;
+ advancePhase();
+ }
+ }
+
+private:
+ void advancePhase() {
+ mPhase += mPhaseIncrement;
+ if (mPhase > M_PI * 2) {
+ mPhase -= M_PI * 2;
+ }
+ if (mSweeping) {
+ if (mGoingUp) {
+ mPhaseIncrement *= mUpScaler;
+ if (mPhaseIncrement > mPhaseIncrementHigh) {
+ mGoingUp = false;
+ }
+ } else {
+ mPhaseIncrement *= mDownScaler;
+ if (mPhaseIncrement < mPhaseIncrementLow) {
+ mGoingUp = true;
+ }
+ }
+ }
+ }
+
+ double mAmplitude = 0.05; // unitless scaler
+ double mPhase = 0.0;
+ double mPhaseIncrement = 440 * M_PI * 2 / 48000;
+ double mFrameRate = 48000;
+ double mPhaseIncrementLow;
+ double mPhaseIncrementHigh;
+ double mUpScaler = 1.0;
+ double mDownScaler = 1.0;
+ bool mGoingUp = false;
+ bool mSweeping = false;
+};
+
+#endif /* SINE_GENERATOR_H */
+
diff --git a/media/libaaudio/examples/write_sine/Android.mk b/media/libaaudio/examples/write_sine/Android.mk
new file mode 100644
index 0000000..5053e7d
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/write_sine/README.md b/media/libaaudio/examples/write_sine/README.md
new file mode 100644
index 0000000..b150471
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/README.md
@@ -0,0 +1,7 @@
+# cd to this directory
+mkdir -p jni/include/aaudio
+ln -s $PLATFORM/frameworks/av/media/liboboe/include/aaudio/*.h jni/include/aaudio
+ln -s $PLATFORM/out/target/product/$TARGET_PRODUCT/symbols/out/soong/ndk/platforms/android-current/arch-arm64/usr/lib/liboboe.so jni
+$NDK/ndk-build
+adb push libs/arm64-v8a/write_sine_threaded /data
+adb shell /data/write_sine_threaded
diff --git a/media/libaaudio/examples/write_sine/jni/Android.mk b/media/libaaudio/examples/write_sine/jni/Android.mk
new file mode 100644
index 0000000..c306ed3
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/jni/Android.mk
@@ -0,0 +1,33 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src \
+ frameworks/av/media/libaaudio/examples/utils
+
+# NDK recommends using this kind of relative path instead of an absolute path.
+LOCAL_SRC_FILES:= ../src/write_sine.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := write_sine_ndk
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
+
+LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := write_sine_callback_ndk
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := libaaudio_prebuilt
+LOCAL_SRC_FILES := libaaudio.so
+LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
+include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/write_sine/jni/Application.mk b/media/libaaudio/examples/write_sine/jni/Application.mk
new file mode 100644
index 0000000..ba44f37
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/jni/Application.mk
@@ -0,0 +1 @@
+APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
new file mode 100644
index 0000000..6522ba4
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using AAudio.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
+
+#define SAMPLE_RATE 48000
+#define NUM_SECONDS 20
+
+#define MMAP_POLICY AAUDIO_UNSPECIFIED
+//#define MMAP_POLICY AAUDIO_POLICY_NEVER
+//#define MMAP_POLICY AAUDIO_POLICY_AUTO
+//#define MMAP_POLICY AAUDIO_POLICY_ALWAYS
+
+#define REQUESTED_FORMAT AAUDIO_FORMAT_PCM_I16
+
+#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+//#define REQUESTED_SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+
+ AAudioSimplePlayer player;
+ SineThreadedData_t myData;
+ aaudio_result_t result = AAUDIO_OK;
+
+ const int requestedChannelCount = 2;
+ int actualChannelCount = 0;
+ const int requestedSampleRate = SAMPLE_RATE;
+ int actualSampleRate = 0;
+ aaudio_format_t requestedDataFormat = REQUESTED_FORMAT;
+ aaudio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
+
+ AAudioStream *aaudioStream = nullptr;
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ int32_t framesPerBurst = 0;
+ int32_t framesPerWrite = 0;
+ int32_t bufferCapacity = 0;
+ int32_t framesToPlay = 0;
+ int32_t framesLeft = 0;
+ int32_t xRunCount = 0;
+ float *floatData = nullptr;
+ int16_t *shortData = nullptr;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("%s - Play a sine wave using AAudio\n", argv[0]);
+
+ AAudio_setMMapPolicy(MMAP_POLICY);
+ printf("requested MMapPolicy = %d\n", AAudio_getMMapPolicy());
+
+ player.setSharingMode(REQUESTED_SHARING_MODE);
+
+ result = player.open(requestedChannelCount, requestedSampleRate, requestedDataFormat,
+ nullptr, nullptr, &myData);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.open() returned %d\n", result);
+ goto finish;
+ }
+
+ aaudioStream = player.getStream();
+ // Request stream properties.
+
+ state = AAudioStream_getState(aaudioStream);
+ printf("after open, state = %s\n", AAudio_convertStreamStateToText(state));
+
+ // Check to see what kind of stream we actually got.
+ actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
+ printf("SampleRate: requested = %d, actual = %d\n", requestedSampleRate, actualSampleRate);
+
+ myData.sineOsc1.setup(440.0, actualSampleRate);
+ myData.sineOsc2.setup(660.0, actualSampleRate);
+
+ actualChannelCount = AAudioStream_getChannelCount(aaudioStream);
+ printf("ChannelCount: requested = %d, actual = %d\n",
+ requestedChannelCount, actualChannelCount);
+
+ actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
+ printf("SharingMode: requested = %s, actual = %s\n",
+ getSharingModeText(REQUESTED_SHARING_MODE),
+ getSharingModeText(actualSharingMode));
+
+ // This is the number of frames that are read in one chunk by a DMA controller
+ // or a DSP or a mixer.
+ framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
+ printf("Buffer: bufferSize = %d\n", AAudioStream_getBufferSizeInFrames(aaudioStream));
+ bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
+ printf("Buffer: bufferCapacity = %d, remainder = %d\n",
+ bufferCapacity, bufferCapacity % framesPerBurst);
+
+ // Some DMA might use very short bursts of 16 frames. We don't need to write such small
+ // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+ framesPerWrite = framesPerBurst;
+ while (framesPerWrite < 48) {
+ framesPerWrite *= 2;
+ }
+ printf("Buffer: framesPerBurst = %d\n",framesPerBurst);
+ printf("Buffer: framesPerWrite = %d\n",framesPerWrite);
+
+ printf("PerformanceMode = %d\n", AAudioStream_getPerformanceMode(aaudioStream));
+ printf("is MMAP used? = %s\n", AAudioStream_isMMapUsed(aaudioStream) ? "yes" : "no");
+
+ actualDataFormat = AAudioStream_getFormat(aaudioStream);
+ printf("DataFormat: requested = %d, actual = %d\n", REQUESTED_FORMAT, actualDataFormat);
+ // TODO handle other data formats
+
+ // Allocate a buffer for the audio data.
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ floatData = new float[framesPerWrite * actualChannelCount];
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ shortData = new int16_t[framesPerWrite * actualChannelCount];
+ } else {
+ printf("ERROR Unsupported data format!\n");
+ goto finish;
+ }
+
+ // Start the stream.
+ printf("call player.start()\n");
+ result = player.start();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+ goto finish;
+ }
+
+ state = AAudioStream_getState(aaudioStream);
+ printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
+
+ // Play for a while.
+ framesToPlay = actualSampleRate * NUM_SECONDS;
+ framesLeft = framesToPlay;
+ while (framesLeft > 0) {
+
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ // Render sine waves to left and right channels.
+ myData.sineOsc1.render(&floatData[0], actualChannelCount, framesPerWrite);
+ if (actualChannelCount > 1) {
+ myData.sineOsc2.render(&floatData[1], actualChannelCount, framesPerWrite);
+ }
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ // Render sine waves to left and right channels.
+ myData.sineOsc1.render(&shortData[0], actualChannelCount, framesPerWrite);
+ if (actualChannelCount > 1) {
+ myData.sineOsc2.render(&shortData[1], actualChannelCount, framesPerWrite);
+ }
+ }
+
+ // Write audio data to the stream.
+ int64_t timeoutNanos = 1000 * NANOS_PER_MILLISECOND;
+ int32_t minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
+ int32_t actual = 0;
+ if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ actual = AAudioStream_write(aaudioStream, floatData, minFrames, timeoutNanos);
+ } else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
+ actual = AAudioStream_write(aaudioStream, shortData, minFrames, timeoutNanos);
+ }
+ if (actual < 0) {
+ fprintf(stderr, "ERROR - AAudioStream_write() returned %d\n", actual);
+ goto finish;
+ } else if (actual == 0) {
+ fprintf(stderr, "WARNING - AAudioStream_write() returned %d\n", actual);
+ goto finish;
+ }
+ framesLeft -= actual;
+
+ // Use timestamp to estimate latency.
+ /*
+ {
+ int64_t presentationFrame;
+ int64_t presentationTime;
+ result = AAudioStream_getTimestamp(aaudioStream,
+ CLOCK_MONOTONIC,
+ &presentationFrame,
+ &presentationTime
+ );
+ if (result == AAUDIO_OK) {
+ int64_t elapsedNanos = getNanoseconds() - presentationTime;
+ int64_t elapsedFrames = actualSampleRate * elapsedNanos / NANOS_PER_SECOND;
+ int64_t currentFrame = presentationFrame + elapsedFrames;
+ int64_t framesWritten = AAudioStream_getFramesWritten(aaudioStream);
+ int64_t estimatedLatencyFrames = framesWritten - currentFrame;
+ int64_t estimatedLatencyMillis = estimatedLatencyFrames * 1000 / actualSampleRate;
+ printf("estimatedLatencyMillis %d\n", (int)estimatedLatencyMillis);
+ }
+ }
+ */
+ }
+
+ xRunCount = AAudioStream_getXRunCount(aaudioStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+ printf("call stop()\n");
+ result = player.stop();
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
+finish:
+ player.close();
+ delete[] floatData;
+ delete[] shortData;
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
new file mode 100644
index 0000000..69145aa
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an AAudio callback.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sched.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudio.h>
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
+
+#define NUM_SECONDS 5
+
+// Application data that gets passed to the callback.
+#define MAX_FRAME_COUNT_RECORDS 256
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+ AAudioSimplePlayer player;
+ SineThreadedData_t myData;
+ aaudio_result_t result;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+ printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);
+
+ myData.schedulerChecked = false;
+
+ result = player.open(2, 44100, AAUDIO_FORMAT_PCM_FLOAT,
+ SimplePlayerDataCallbackProc, SimplePlayerErrorCallbackProc, &myData);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.open() returned %d\n", result);
+ goto error;
+ }
+ printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+ printf("player.getChannelCount() = %d\n", player.getChannelCount());
+ myData.sineOsc1.setup(440.0, 48000);
+ myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
+ myData.sineOsc2.setup(660.0, 48000);
+ myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
+
+#if 0
+ result = player.prime(); // FIXME crashes AudioTrack.cpp
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.prime() returned %d\n", result);
+ goto error;
+ }
+#endif
+
+ result = player.start();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.start() returned %d\n", result);
+ goto error;
+ }
+
+ printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
+ for (int second = 0; second < NUM_SECONDS; second++)
+ {
+ const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
+ (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+
+ aaudio_stream_state_t state;
+ result = AAudioStream_waitForStateChange(player.getStream(),
+ AAUDIO_STREAM_STATE_CLOSED,
+ &state,
+ 0);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_waitForStateChange() returned %d\n", result);
+ goto error;
+ }
+ if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
+ printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
+ break;
+ }
+ printf("framesWritten = %d\n", (int) AAudioStream_getFramesWritten(player.getStream()));
+ }
+ printf("Woke up now.\n");
+
+ printf("call stop()\n");
+ result = player.stop();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+ printf("call close()\n");
+ result = player.close();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ if (myData.schedulerChecked) {
+ printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
+ myData.scheduler,
+ SCHED_FIFO);
+ }
+
+ printf("SUCCESS\n");
+ return EXIT_SUCCESS;
+error:
+ player.close();
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
new file mode 100644
index 0000000..40dca34
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/static/Android.mk
@@ -0,0 +1,38 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := examples
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/src \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
+
+# NDK recommends using this kind of relative path instead of an absolute path.
+LOCAL_SRC_FILES:= ../src/write_sine.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+ libbinder libcutils libutils \
+ libaudioclient liblog libtinyalsa libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := write_sine
+include $(BUILD_EXECUTABLE)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/examples/utils
+
+LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+ libbinder libcutils libutils \
+ libaudioclient liblog libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := write_sine_callback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/write_sine/static/README.md b/media/libaaudio/examples/write_sine/static/README.md
new file mode 100644
index 0000000..6e26d7b
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/static/README.md
@@ -0,0 +1,2 @@
+Makefile for building simple command line examples.
+They link with AAudio as a static library.
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
new file mode 100644
index 0000000..e1886ac
--- /dev/null
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @addtogroup Audio
+ * @{
+ */
+
+/**
+ * @file AAudio.h
+ */
+
+/**
+ * This is the 'C' API for AAudio.
+ */
+#ifndef AAUDIO_AAUDIO_H
+#define AAUDIO_AAUDIO_H
+
+#include <time.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * This is used to represent a value that has not been specified.
+ * For example, an application could use AAUDIO_UNSPECIFIED to indicate
+ * that is did not not care what the specific value of a parameter was
+ * and would accept whatever it was given.
+ */
+#define AAUDIO_UNSPECIFIED 0
+
+enum {
+ AAUDIO_DIRECTION_OUTPUT,
+ AAUDIO_DIRECTION_INPUT
+};
+typedef int32_t aaudio_direction_t;
+
+enum {
+ AAUDIO_FORMAT_INVALID = -1,
+ AAUDIO_FORMAT_UNSPECIFIED = 0,
+ AAUDIO_FORMAT_PCM_I16,
+ AAUDIO_FORMAT_PCM_FLOAT
+};
+typedef int32_t aaudio_format_t;
+
+enum {
+ AAUDIO_OK,
+ AAUDIO_ERROR_BASE = -900, // TODO review
+ AAUDIO_ERROR_DISCONNECTED,
+ AAUDIO_ERROR_ILLEGAL_ARGUMENT,
+ // reserved
+ AAUDIO_ERROR_INTERNAL = AAUDIO_ERROR_ILLEGAL_ARGUMENT + 2,
+ AAUDIO_ERROR_INVALID_STATE,
+ // reserved
+ // reserved
+ AAUDIO_ERROR_INVALID_HANDLE = AAUDIO_ERROR_INVALID_STATE + 3,
+ // reserved
+ AAUDIO_ERROR_UNIMPLEMENTED = AAUDIO_ERROR_INVALID_HANDLE + 2,
+ AAUDIO_ERROR_UNAVAILABLE,
+ AAUDIO_ERROR_NO_FREE_HANDLES,
+ AAUDIO_ERROR_NO_MEMORY,
+ AAUDIO_ERROR_NULL,
+ AAUDIO_ERROR_TIMEOUT,
+ AAUDIO_ERROR_WOULD_BLOCK,
+ AAUDIO_ERROR_INVALID_FORMAT,
+ AAUDIO_ERROR_OUT_OF_RANGE,
+ AAUDIO_ERROR_NO_SERVICE,
+ AAUDIO_ERROR_INVALID_RATE
+};
+typedef int32_t aaudio_result_t;
+
+enum
+{
+ AAUDIO_STREAM_STATE_UNINITIALIZED = 0,
+ AAUDIO_STREAM_STATE_UNKNOWN,
+ AAUDIO_STREAM_STATE_OPEN,
+ AAUDIO_STREAM_STATE_STARTING,
+ AAUDIO_STREAM_STATE_STARTED,
+ AAUDIO_STREAM_STATE_PAUSING,
+ AAUDIO_STREAM_STATE_PAUSED,
+ AAUDIO_STREAM_STATE_FLUSHING,
+ AAUDIO_STREAM_STATE_FLUSHED,
+ AAUDIO_STREAM_STATE_STOPPING,
+ AAUDIO_STREAM_STATE_STOPPED,
+ AAUDIO_STREAM_STATE_CLOSING,
+ AAUDIO_STREAM_STATE_CLOSED,
+ AAUDIO_STREAM_STATE_DISCONNECTED
+};
+typedef int32_t aaudio_stream_state_t;
+
+
+enum {
+ /**
+ * This will be the only stream using a particular source or sink.
+ * This mode will provide the lowest possible latency.
+ * You should close EXCLUSIVE streams immediately when you are not using them.
+ */
+ AAUDIO_SHARING_MODE_EXCLUSIVE,
+ /**
+ * Multiple applications will be mixed by the AAudio Server.
+ * This will have higher latency than the EXCLUSIVE mode.
+ */
+ AAUDIO_SHARING_MODE_SHARED
+};
+typedef int32_t aaudio_sharing_mode_t;
+
+
+enum {
+ /**
+ * No particular performance needs. Default.
+ */
+ AAUDIO_PERFORMANCE_MODE_NONE = 10,
+
+ /**
+ * Extending battery life is most important.
+ */
+ AAUDIO_PERFORMANCE_MODE_POWER_SAVING,
+
+ /**
+ * Reducing latency is most important.
+ */
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
+};
+typedef int32_t aaudio_performance_mode_t;
+
+typedef struct AAudioStreamStruct AAudioStream;
+typedef struct AAudioStreamBuilderStruct AAudioStreamBuilder;
+
+#ifndef AAUDIO_API
+#define AAUDIO_API /* export this symbol */
+#endif
+
+// ============================================================
+// Audio System
+// ============================================================
+
+/**
+ * The text is the ASCII symbol corresponding to the returnCode,
+ * or an English message saying the returnCode is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an AAudio result code.
+ */
+AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode);
+
+/**
+ * The text is the ASCII symbol corresponding to the stream state,
+ * or an English message saying the state is unrecognized.
+ * This is intended for developers to use when debugging.
+ * It is not for display to users.
+ *
+ * @return pointer to a text representation of an AAudio state.
+ */
+AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state);
+
+// ============================================================
+// StreamBuilder
+// ============================================================
+
+/**
+ * Create a StreamBuilder that can be used to open a Stream.
+ *
+ * The deviceId is initially unspecified, meaning that the current default device will be used.
+ *
+ * The default direction is AAUDIO_DIRECTION_OUTPUT.
+ * The default sharing mode is AAUDIO_SHARING_MODE_SHARED.
+ * The data format, samplesPerFrames and sampleRate are unspecified and will be
+ * chosen by the device when it is opened.
+ *
+ * AAudioStreamBuilder_delete() must be called when you are done using the builder.
+ */
+AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder);
+
+/**
+ * Request an audio device identified device using an ID.
+ * On Android, for example, the ID could be obtained from the Java AudioManager.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED,
+ * in which case the primary device will be used.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param deviceId device identifier or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
+ int32_t deviceId);
+
+/**
+ * Request a sample rate in Hertz.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ * An optimal value will then be chosen when the stream is opened.
+ * After opening a stream with an unspecified value, the application must
+ * query for the actual value, which may vary by device.
+ *
+ * If an exact value is specified then an opened stream will use that value.
+ * If a stream cannot be opened with the specified value then the open will fail.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param sampleRate frames per second. Common rates include 44100 and 48000 Hz.
+ */
+AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
+ int32_t sampleRate);
+
+/**
+ * Request a number of channels for the stream.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ * An optimal value will then be chosen when the stream is opened.
+ * After opening a stream with an unspecified value, the application must
+ * query for the actual value, which may vary by device.
+ *
+ * If an exact value is specified then an opened stream will use that value.
+ * If a stream cannot be opened with the specified value then the open will fail.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param channelCount Number of channels desired.
+ */
+AAUDIO_API void AAudioStreamBuilder_setChannelCount(AAudioStreamBuilder* builder,
+ int32_t channelCount);
+
+/**
+ *
+ * @deprecated use AAudioStreamBuilder_setChannelCount()
+ */
+// TODO remove as soon as the NDK and OS are in sync, before RC1
+AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
+ int32_t samplesPerFrame);
+
+/**
+ * Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ * An optimal value will then be chosen when the stream is opened.
+ * After opening a stream with an unspecified value, the application must
+ * query for the actual value, which may vary by device.
+ *
+ * If an exact value is specified then an opened stream will use that value.
+ * If a stream cannot be opened with the specified value then the open will fail.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param format common formats are AAUDIO_FORMAT_PCM_FLOAT and AAUDIO_FORMAT_PCM_I16.
+ */
+AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
+ aaudio_format_t format);
+
+/**
+ * Request a mode for sharing the device.
+ *
+ * The default, if you do not call this function, is AAUDIO_SHARING_MODE_SHARED.
+ *
+ * The requested sharing mode may not be available.
+ * The application can query for the actual mode after the stream is opened.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param sharingMode AAUDIO_SHARING_MODE_SHARED or AAUDIO_SHARING_MODE_EXCLUSIVE
+ */
+AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
+ aaudio_sharing_mode_t sharingMode);
+
+/**
+ * Request the direction for a stream.
+ *
+ * The default, if you do not call this function, is AAUDIO_DIRECTION_OUTPUT.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
+ */
+AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
+ aaudio_direction_t direction);
+
+/**
+ * Set the requested buffer capacity in frames.
+ * The final AAudioStream capacity may differ, but will probably be at least this big.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param numFrames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
+ int32_t numFrames);
+
+/**
+ * Set the requested performance mode.
+ *
+ * The default, if you do not call this function, is AAUDIO_PERFORMANCE_MODE_NONE.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param mode the desired performance mode, eg. AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
+ */
+AAUDIO_API void AAudioStreamBuilder_setPerformanceMode(AAudioStreamBuilder* builder,
+ aaudio_performance_mode_t mode);
+
+/**
+ * Return one of these values from the data callback function.
+ */
+enum {
+
+ /**
+ * Continue calling the callback.
+ */
+ AAUDIO_CALLBACK_RESULT_CONTINUE = 0,
+
+ /**
+ * Stop calling the callback.
+ *
+ * The application will still need to call AAudioStream_requestPause()
+ * or AAudioStream_requestStop().
+ */
+ AAUDIO_CALLBACK_RESULT_STOP,
+
+};
+typedef int32_t aaudio_data_callback_result_t;
+
+/**
+ * Prototype for the data function that is passed to AAudioStreamBuilder_setDataCallback().
+ *
+ * For an output stream, this function should render and write numFrames of data
+ * in the streams current data format to the audioData buffer.
+ *
+ * For an input stream, this function should read and process numFrames of data
+ * from the audioData buffer.
+ *
+ * Note that this callback function should be considered a "real-time" function.
+ * It must not do anything that could cause an unbounded delay because that can cause the
+ * audio to glitch or pop.
+ *
+ * These are things the function should NOT do:
+ * <ul>
+ * <li>allocate memory using, for example, malloc() or new</li>
+ * <li>any file operations such as opening, closing, reading or writing</li>
+ * <li>any network operations such as streaming</li>
+ * <li>use any mutexes or other synchronization primitives</li>
+ * <li>sleep</li>
+ * </ul>
+ *
+ * If you need to move data, eg. MIDI commands, in or out of the callback function then
+ * we recommend the use of non-blocking techniques such as an atomic FIFO.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param userData the same address that was passed to AAudioStreamBuilder_setCallback()
+ * @param audioData a pointer to the audio data
+ * @param numFrames the number of frames to be processed
+ * @return AAUDIO_CALLBACK_RESULT_*
+ */
+typedef aaudio_data_callback_result_t (*AAudioStream_dataCallback)(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames);
+
+/**
+ * Request that AAudio call this functions when the stream is running.
+ *
+ * Note that when using this callback, the audio data will be passed in or out
+ * of the function as an argument.
+ * So you cannot call AAudioStream_write() or AAudioStream_read() on the same stream
+ * that has an active data callback.
+ *
+ * The callback function will start being called after AAudioStream_requestStart() is called.
+ * It will stop being called after AAudioStream_requestPause() or
+ * AAudioStream_requestStop() is called.
+ *
+ * This callback function will be called on a real-time thread owned by AAudio. See
+ * {@link AAudioStream_dataCallback} for more information.
+ *
+ * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param callback pointer to a function that will process audio data.
+ * @param userData pointer to an application data structure that will be passed
+ * to the callback functions.
+ */
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
+ AAudioStream_dataCallback callback,
+ void *userData);
+
+/**
+ * Set the requested data callback buffer size in frames.
+ * See {@link AAudioStream_dataCallback}.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * For the lowest possible latency, do not call this function. AAudio will then
+ * call the dataProc callback function with whatever size is optimal.
+ * That size may vary from one callback to another.
+ *
+ * Only use this function if the application requires a specific number of frames for processing.
+ * The application might, for example, be using an FFT that requires
+ * a specific power-of-two sized buffer.
+ *
+ * AAudio may need to add additional buffering in order to adapt between the internal
+ * buffer size and the requested buffer size.
+ *
+ * If you do call this function then the requested size should be less than
+ * half the buffer capacity, to allow double buffering.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param numFrames the desired buffer size in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
+ int32_t numFrames);
+
+/**
+ * Prototype for the callback function that is passed to
+ * AAudioStreamBuilder_setErrorCallback().
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param userData the same address that was passed to AAudioStreamBuilder_setErrorCallback()
+ * @param error an AAUDIO_ERROR_* value.
+ */
+typedef void (*AAudioStream_errorCallback)(
+ AAudioStream *stream,
+ void *userData,
+ aaudio_result_t error);
+
+/**
+ * Request that AAudio call this functions if any error occurs on a callback thread.
+ *
+ * It will be called, for example, if a headset or a USB device is unplugged causing the stream's
+ * device to be unavailable.
+ * In response, this function could signal or launch another thread to reopen a
+ * stream on another device. Do not reopen the stream in this callback.
+ *
+ * This will not be called because of actions by the application, such as stopping
+ * or closing a stream.
+ *
+ * Another possible cause of error would be a timeout or an unanticipated internal error.
+ *
+ * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param callback pointer to a function that will be called if an error occurs.
+ * @param userData pointer to an application data structure that will be passed
+ * to the callback functions.
+ */
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
+ AAudioStream_errorCallback callback,
+ void *userData);
+
+/**
+ * Open a stream based on the options in the StreamBuilder.
+ *
+ * AAudioStream_close must be called when finished with the stream to recover
+ * the memory and to free the associated resources.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param stream pointer to a variable to receive the new stream reference
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
+ AAudioStream** stream);
+
+/**
+ * Delete the resources associated with the StreamBuilder.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder* builder);
+
+// ============================================================
+// Stream Control
+// ============================================================
+
+/**
+ * Free the resources associated with a stream created by AAudioStreamBuilder_openStream()
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_close(AAudioStream* stream);
+
+/**
+ * Asynchronously request to start playing the stream. For output streams, one should
+ * write to the stream to fill the buffer before starting.
+ * Otherwise it will underflow.
+ * After this call the state will be in AAUDIO_STREAM_STATE_STARTING or AAUDIO_STREAM_STATE_STARTED.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_requestStart(AAudioStream* stream);
+
+/**
+ * Asynchronous request for the stream to pause.
+ * Pausing a stream will freeze the data flow but not flush any buffers.
+ * Use AAudioStream_Start() to resume playback after a pause.
+ * After this call the state will be in AAUDIO_STREAM_STATE_PAUSING or AAUDIO_STREAM_STATE_PAUSED.
+ *
+ * This will return AAUDIO_ERROR_UNIMPLEMENTED for input streams.
+ * For input streams use AAudioStream_requestStop().
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_requestPause(AAudioStream* stream);
+
+/**
+ * Asynchronous request for the stream to flush.
+ * Flushing will discard any pending data.
+ * This call only works if the stream is pausing or paused. TODO review
+ * Frame counters are not reset by a flush. They may be advanced.
+ * After this call the state will be in AAUDIO_STREAM_STATE_FLUSHING or AAUDIO_STREAM_STATE_FLUSHED.
+ *
+ * This will return AAUDIO_ERROR_UNIMPLEMENTED for input streams.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_requestFlush(AAudioStream* stream);
+
+/**
+ * Asynchronous request for the stream to stop.
+ * The stream will stop after all of the data currently buffered has been played.
+ * After this call the state will be in AAUDIO_STREAM_STATE_STOPPING or AAUDIO_STREAM_STATE_STOPPED.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_requestStop(AAudioStream* stream);
+
+/**
+ * Query the current state of the client, eg. AAUDIO_STREAM_STATE_PAUSING
+ *
+ * This function will immediately return the state without updating the state.
+ * If you want to update the client state based on the server state then
+ * call AAudioStream_waitForStateChange() with currentState
+ * set to AAUDIO_STREAM_STATE_UNKNOWN and a zero timeout.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ */
+AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* stream);
+
+/**
+ * Wait until the current state no longer matches the input state.
+ *
+ * This will update the current client state.
+ *
+ * <pre><code>
+ * aaudio_stream_state_t currentState;
+ * aaudio_result_t result = AAudioStream_getState(stream, ¤tState);
+ * while (result == AAUDIO_OK && currentState != AAUDIO_STREAM_STATE_PAUSING) {
+ * result = AAudioStream_waitForStateChange(
+ * stream, currentState, ¤tState, MY_TIMEOUT_NANOS);
+ * }
+ * </code></pre>
+ *
+ * @param stream A reference provided by AAudioStreamBuilder_openStream()
+ * @param inputState The state we want to avoid.
+ * @param nextState Pointer to a variable that will be set to the new state.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return AAUDIO_OK or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream* stream,
+ aaudio_stream_state_t inputState,
+ aaudio_stream_state_t *nextState,
+ int64_t timeoutNanoseconds);
+
+// ============================================================
+// Stream I/O
+// ============================================================
+
+/**
+ * Read data from the stream.
+ *
+ * The call will wait until the read is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for data.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to read. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually read or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream* stream,
+ void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds);
+
+/**
+ * Write data to the stream.
+ *
+ * The call will wait until the write is complete or until it runs out of time.
+ * If timeoutNanos is zero then this call will not wait.
+ *
+ * Note that timeoutNanoseconds is a relative duration in wall clock time.
+ * Time will not stop if the thread is asleep.
+ * So it will be implemented using CLOCK_BOOTTIME.
+ *
+ * This call is "strong non-blocking" unless it has to wait for room in the buffer.
+ *
+ * @param stream A stream created using AAudioStreamBuilder_openStream().
+ * @param buffer The address of the first sample.
+ * @param numFrames Number of frames to write. Only complete frames will be written.
+ * @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
+ * @return The number of frames actually written or a negative error.
+ */
+AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream* stream,
+ const void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds);
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+/**
+ * This can be used to adjust the latency of the buffer by changing
+ * the threshold where blocking will occur.
+ * By combining this with AAudioStream_getXRunCount(), the latency can be tuned
+ * at run-time for each device.
+ *
+ * This cannot be set higher than AAudioStream_getBufferCapacityInFrames().
+ *
+ * Note that you will probably not get the exact size you request.
+ * Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param numFrames requested number of frames that can be filled without blocking
+ * @return actual buffer size in frames or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* stream,
+ int32_t numFrames);
+
+/**
+ * Query the maximum number of frames that can be filled without blocking.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return buffer size in frames.
+ */
+AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream* stream);
+
+/**
+ * Query the number of frames that the application should read or write at
+ * one time for optimal performance. It is OK if an application writes
+ * a different number of frames. But the buffer size may need to be larger
+ * in order to avoid underruns or overruns.
+ *
+ * Note that this may or may not match the actual device burst size.
+ * For some endpoints, the burst size can vary dynamically.
+ * But these tend to be devices with high latency.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return burst size
+ */
+AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream* stream);
+
+/**
+ * Query maximum buffer capacity in frames.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return buffer capacity in frames
+ */
+AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream);
+
+/**
+ * Query the size of the buffer that will be passed to the dataProc callback
+ * in the numFrames parameter.
+ *
+ * This call can be used if the application needs to know the value of numFrames before
+ * the stream is started. This is not normally necessary.
+ *
+ * If a specific size was requested by calling AAudioStreamBuilder_setCallbackSizeInFrames()
+ * then this will be the same size.
+ *
+ * If AAudioStreamBuilder_setCallbackSizeInFrames() was not called then this will
+ * return the size chosen by AAudio, or AAUDIO_UNSPECIFIED.
+ *
+ * AAUDIO_UNSPECIFIED indicates that the callback buffer size for this stream
+ * may vary from one dataProc callback to the next.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return callback buffer size in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream);
+
+/**
+ * An XRun is an Underrun or an Overrun.
+ * During playing, an underrun will occur if the stream is not written in time
+ * and the system runs out of valid data.
+ * During recording, an overrun will occur if the stream is not read in time
+ * and there is no place to put the incoming data so it is discarded.
+ *
+ * An underrun or overrun can cause an audible "pop" or "glitch".
+ *
+ * Note that some INPUT devices may not support this function.
+ * In that case a 0 will always be returned.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return the underrun or overrun count
+ */
+AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream* stream);
+
+/**
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual sample rate
+ */
+AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream* stream);
+
+/**
+ * A stream has one or more channels of data.
+ * A frame will contain one sample for each channel.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual number of channels
+ */
+AAUDIO_API int32_t AAudioStream_getChannelCount(AAudioStream* stream);
+
+/**
+ * The samplesPerFrame is also known as channelCount.
+ *
+ * @deprecated use AAudioStream_getChannelCount()
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual samples per frame
+ */
+AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream* stream);
+
+/**
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual device ID
+ */
+AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream* stream);
+
+/**
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual data format
+ */
+AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream);
+
+/**
+ * Provide actual sharing mode.
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual sharing mode
+ */
+AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream* stream);
+
+/**
+ * Get the performance mode used by the stream.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ */
+AAUDIO_API aaudio_performance_mode_t AAudioStream_getPerformanceMode(AAudioStream* stream);
+
+/**
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return direction
+ */
+AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream* stream);
+
+/**
+ * Passes back the number of frames that have been written since the stream was created.
+ * For an output stream, this will be advanced by the application calling write().
+ * For an input stream, this will be advanced by the endpoint.
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return frames written
+ */
+AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream);
+
+/**
+ * Passes back the number of frames that have been read since the stream was created.
+ * For an output stream, this will be advanced by the endpoint.
+ * For an input stream, this will be advanced by the application calling read().
+ *
+ * The frame position is monotonically increasing.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return frames read
+ */
+AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* stream);
+
+/**
+ * Passes back the time at which a particular frame was presented.
+ * This can be used to synchronize audio with video or MIDI.
+ * It can also be used to align a recorded stream with a playback stream.
+ *
+ * Timestamps are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
+ * AAUDIO_ERROR_INVALID_STATE will be returned if the stream is not started.
+ * Note that because requestStart() is asynchronous, timestamps will not be valid until
+ * a short time after calling requestStart().
+ * So AAUDIO_ERROR_INVALID_STATE should not be considered a fatal error.
+ * Just try calling again later.
+ *
+ * If an error occurs, then the position and time will not be modified.
+ *
+ * The position and time passed back are monotonically increasing.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param clockid AAUDIO_CLOCK_MONOTONIC or AAUDIO_CLOCK_BOOTTIME
+ * @param framePosition pointer to a variable to receive the position
+ * @param timeNanoseconds pointer to a variable to receive the time
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* stream,
+ clockid_t clockid,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //AAUDIO_AAUDIO_H
+
+/** @} */
diff --git a/media/libaaudio/include/aaudio/AAudioTesting.h b/media/libaaudio/include/aaudio/AAudioTesting.h
new file mode 100644
index 0000000..02ec411
--- /dev/null
+++ b/media/libaaudio/include/aaudio/AAudioTesting.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This is test support for AAudio.
+ */
+#ifndef AAUDIO_AAUDIO_TESTING_H
+#define AAUDIO_AAUDIO_TESTING_H
+
+#include <aaudio/AAudio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************************************************************************
+ * The definitions below are only for testing. Do not use them in an application.
+ * They may change or be removed at any time.
+ ************************************************************************************/
+
+enum {
+ /**
+ * Related feature is disabled and never used.
+ */
+ AAUDIO_POLICY_NEVER = 1,
+
+ /**
+ * If related feature works then use it. Otherwise fall back to something else.
+ */
+ AAUDIO_POLICY_AUTO,
+
+ /**
+ * Related feature must be used. If not available then fail.
+ */
+ AAUDIO_POLICY_ALWAYS
+};
+typedef int32_t aaudio_policy_t;
+
+/**
+ * Control whether AAudioStreamBuilder_openStream() will use the new MMAP data path
+ * or the older "Legacy" data path.
+ *
+ * This will only affect the current process.
+ *
+ * If unspecified then the policy will be based on system properties or configuration.
+ *
+ * @note This is only for testing. Do not use this in an application.
+ * It may change or be removed at any time.
+ *
+ * @param policy AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER, AAUDIO_POLICY_AUTO, or AAUDIO_POLICY_ALWAYS
+ * @return AAUDIO_OK or a negative error
+ */
+AAUDIO_API aaudio_result_t AAudio_setMMapPolicy(aaudio_policy_t policy);
+
+/**
+ * Get the current MMAP policy set by AAudio_setMMapPolicy().
+ *
+ * @note This is only for testing. Do not use this in an application.
+ * It may change or be removed at any time.
+ *
+ * @return current policy
+ */
+AAUDIO_API aaudio_policy_t AAudio_getMMapPolicy();
+
+/**
+ * Return true if the stream uses the MMAP data path versus the legacy path.
+ *
+ * @note This is only for testing. Do not use this in an application.
+ * It may change or be removed at any time.
+ *
+ * @return true if the stream uses ther MMAP data path
+ */
+AAUDIO_API bool AAudioStream_isMMapUsed(AAudioStream* stream);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //AAUDIO_AAUDIO_TESTING_H
+
+/** @} */
diff --git a/media/libaaudio/include/aaudio/NOTICE b/media/libaaudio/include/aaudio/NOTICE
new file mode 100644
index 0000000..d6c0922
--- /dev/null
+++ b/media/libaaudio/include/aaudio/NOTICE
@@ -0,0 +1,13 @@
+Copyright (C) 2016 The Android Open Source Project
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/media/libaaudio/include/aaudio/README.md b/media/libaaudio/include/aaudio/README.md
new file mode 100644
index 0000000..8c4ae51
--- /dev/null
+++ b/media/libaaudio/include/aaudio/README.md
@@ -0,0 +1,4 @@
+AAudio Audio headers
+
+This folder contains the public header files.
+
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
new file mode 100644
index 0000000..2ba5250
--- /dev/null
+++ b/media/libaaudio/libaaudio.map.txt
@@ -0,0 +1,51 @@
+LIBAAUDIO {
+ global:
+ AAudio_convertResultToText;
+ AAudio_convertStreamStateToText;
+ AAudio_createStreamBuilder;
+ AAudio_getMMapPolicy;
+ AAudio_setMMapPolicy;
+ AAudioStreamBuilder_setPerformanceMode;
+ AAudioStreamBuilder_setDeviceId;
+ AAudioStreamBuilder_setDataCallback;
+ AAudioStreamBuilder_setErrorCallback;
+ AAudioStreamBuilder_setFramesPerDataCallback;
+ AAudioStreamBuilder_setSampleRate;
+ AAudioStreamBuilder_setSamplesPerFrame;
+ AAudioStreamBuilder_setChannelCount;
+ AAudioStreamBuilder_setFormat;
+ AAudioStreamBuilder_setSharingMode;
+ AAudioStreamBuilder_setDirection;
+ AAudioStreamBuilder_setBufferCapacityInFrames;
+ AAudioStreamBuilder_openStream;
+ AAudioStreamBuilder_delete;
+ AAudioStream_close;
+ AAudioStream_requestStart;
+ AAudioStream_requestPause;
+ AAudioStream_requestFlush;
+ AAudioStream_requestStop;
+ AAudioStream_getState;
+ AAudioStream_waitForStateChange;
+ AAudioStream_read;
+ AAudioStream_write;
+ AAudioStream_setBufferSizeInFrames;
+ AAudioStream_getBufferSizeInFrames;
+ AAudioStream_getFramesPerDataCallback;
+ AAudioStream_getFramesPerBurst;
+ AAudioStream_getBufferCapacityInFrames;
+ AAudioStream_getXRunCount;
+ AAudioStream_getSampleRate;
+ AAudioStream_getSamplesPerFrame;
+ AAudioStream_getChannelCount;
+ AAudioStream_getPerformanceMode;
+ AAudioStream_getDeviceId;
+ AAudioStream_getFormat;
+ AAudioStream_getSharingMode;
+ AAudioStream_getDirection;
+ AAudioStream_getFramesWritten;
+ AAudioStream_getFramesRead;
+ AAudioStream_getTimestamp;
+ AAudioStream_isMMapUsed;
+ local:
+ *;
+};
diff --git a/media/libaaudio/scripts/convert_oboe_aaudio.sh b/media/libaaudio/scripts/convert_oboe_aaudio.sh
new file mode 100755
index 0000000..2bf025a
--- /dev/null
+++ b/media/libaaudio/scripts/convert_oboe_aaudio.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Use SED to convert the Oboe API to the AAudio API
+
+echo "Convert Oboe names to AAudio names"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBOBOE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/liboboe
+echo "LIBOBOE_DIR is ${LIBOBOE_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+function convertPathPattern {
+ path=$1
+ pattern=$2
+ find $path -type f -name $pattern -exec sed -i -f ${LIBOBOE_DIR}/scripts/oboe_to_aaudio.sed {} \;
+}
+
+function convertPath {
+ path=$1
+ convertPathPattern $1 '*.cpp'
+ convertPathPattern $1 '*.h'
+ # the mk match does not work!
+ convertPathPattern $1 '*.mk'
+ convertPathPattern $1 '*.md'
+ convertPathPattern $1 '*.bp'
+}
+
+#convertPath ${LIBOBOE_DIR}/examples
+#convertPath ${LIBOBOE_DIR}/include
+#convertPath ${LIBOBOE_DIR}/src
+#convertPath ${LIBOBOE_DIR}/tests
+convertPath ${LIBOBOE_DIR}
+convertPathPattern ${LIBOBOE_DIR} Android.mk
+convertPathPattern ${LIBOBOE_DIR} liboboe.map.txt
+
+convertPath ${OBOESERVICE_DIR}
+convertPathPattern ${OBOESERVICE_DIR} Android.mk
+
+convertPathPattern ${OBOETEST_DIR} test_aaudio.cpp
+
+mv ${LIBOBOE_DIR}/include/oboe ${LIBOBOE_DIR}/include/aaudio
+mv ${LIBOBOE_DIR}/include/aaudio/OboeAudio.h ${LIBOBOE_DIR}/include/aaudio/AAudio.h
+mv ${OBOESERVICE_DIR}/OboeService.h ${OBOESERVICE_DIR}/AAudioServiceDefinitions.h
+mv ${LIBOBOE_DIR}/tests/test_oboe_api.cpp ${LIBOBOE_DIR}/tests/test_aaudio_api.cpp
+
+# Rename files with Oboe in the name.
+find -name "*OboeAudioService*.cpp" | rename -v "s/OboeAudioService/AAudioService/g"
+find -name "*OboeAudioService*.h" | rename -v "s/OboeAudioService/AAudioService/g"
+find -name "*Oboe*.cpp" | rename -v "s/Oboe/AAudio/g"
+find -name "*Oboe*.h" | rename -v "s/Oboe/AAudio/g"
diff --git a/media/libaaudio/scripts/convert_typedefs_int32.sh b/media/libaaudio/scripts/convert_typedefs_int32.sh
new file mode 100755
index 0000000..7bdbe3a
--- /dev/null
+++ b/media/libaaudio/scripts/convert_typedefs_int32.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+echo "Use SED to convert typedefs in AAudio API"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBAAUDIO_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/libaaudio
+echo "LIBAAUDIO_DIR is ${LIBAAUDIO_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+function convertPathPattern {
+ path=$1
+ pattern=$2
+ find $path -type f -name $pattern -exec sed -i -f ${LIBAAUDIO_DIR}/scripts/typedefs_to_int32.sed {} \;
+}
+
+function convertPath {
+ path=$1
+ convertPathPattern $1 '*.cpp'
+ convertPathPattern $1 '*.h'
+}
+
+convertPath ${LIBAAUDIO_DIR}
+convertPath ${OBOESERVICE_DIR}
+convertPathPattern ${OBOETEST_DIR} test_aaudio.cpp
+
diff --git a/media/libaaudio/scripts/oboe_to_aaudio.sed b/media/libaaudio/scripts/oboe_to_aaudio.sed
new file mode 100644
index 0000000..7da85a0
--- /dev/null
+++ b/media/libaaudio/scripts/oboe_to_aaudio.sed
@@ -0,0 +1,16 @@
+s/liboboe/libclarinet/g
+s/oboeservice/clarinetservice/g
+
+s/OboeAudio\.h/AAudio\.h/g
+s/OboeService\.h/AAudioServiceDefinitions\.h/g
+s/OboeAudioService/AAudioService/g
+s/LOG_TAG "OboeAudio"/LOG_TAG "AAudio"/g
+s/OBOE_AUDIO_FORMAT/AAUDIO_FORMAT/g
+s/OBOEAUDIO/AAUDIO/g
+
+s/oboe/aaudio/g
+s/Oboe/AAudio/g
+s/OBOE/AAUDIO/g
+
+s/libclarinet/liboboe/g
+s/clarinetservice/oboeservice/g
diff --git a/media/libaaudio/scripts/revert_all_aaudio.sh b/media/libaaudio/scripts/revert_all_aaudio.sh
new file mode 100755
index 0000000..19c7f81
--- /dev/null
+++ b/media/libaaudio/scripts/revert_all_aaudio.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+echo "Revert typedefs"
+
+echo "Top is ${ANDROID_BUILD_TOP}"
+LIBAAUDIO_DIR=${ANDROID_BUILD_TOP}/frameworks/av/media/libaaudio
+echo "LIBAAUDIO_DIR is ${LIBAAUDIO_DIR}"
+OBOESERVICE_DIR=${ANDROID_BUILD_TOP}/frameworks/av/services/oboeservice
+echo "OBOESERVICE_DIR is ${OBOESERVICE_DIR}"
+OBOETEST_DIR=${ANDROID_BUILD_TOP}/cts/tests/tests/nativemedia/aaudio/src/
+echo "OBOETEST_DIR is ${OBOETEST_DIR}"
+
+git checkout -- ${LIBAAUDIO_DIR}/examples
+git checkout -- ${LIBAAUDIO_DIR}/include
+git checkout -- ${LIBAAUDIO_DIR}/src
+git checkout -- ${LIBAAUDIO_DIR}/tests
+git checkout -- ${OBOESERVICE_DIR}
+
diff --git a/media/libaaudio/scripts/typedefs_to_int32.sed b/media/libaaudio/scripts/typedefs_to_int32.sed
new file mode 100644
index 0000000..392c9a0
--- /dev/null
+++ b/media/libaaudio/scripts/typedefs_to_int32.sed
@@ -0,0 +1,8 @@
+s/aaudio_device_id_t/int32_t/g
+s/aaudio_sample_rate_t/int32_t/g
+s/aaudio_size_frames_t/int32_t/g
+s/aaudio_size_bytes_t/int32_t/g
+s/aaudio_sample_rate_t/int32_t/g
+
+s/aaudio_position_frames_t/int64_t/g
+s/aaudio_nanoseconds_t/int64_t/g
diff --git a/media/libaaudio/src/Android.mk b/media/libaaudio/src/Android.mk
new file mode 100644
index 0000000..28c4d7f
--- /dev/null
+++ b/media/libaaudio/src/Android.mk
@@ -0,0 +1,126 @@
+LOCAL_PATH:= $(call my-dir)
+
+# ======================= STATIC LIBRARY ==========================
+# This is being built because it make AAudio testing very easy with a complete executable.
+# TODO Remove this target later, when not needed.
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libaaudio
+LOCAL_MODULE_TAGS := optional
+
+LIBAAUDIO_DIR := $(TOP)/frameworks/av/media/libaaudio
+LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
+
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/native/include \
+ system/core/base/include \
+ frameworks/native/media/libaaudio/include/include \
+ frameworks/av/media/libaaudio/include \
+ frameworks/native/include \
+ frameworks/av/media/libaudioclient/include \
+ $(LOCAL_PATH) \
+ $(LOCAL_PATH)/binding \
+ $(LOCAL_PATH)/client \
+ $(LOCAL_PATH)/core \
+ $(LOCAL_PATH)/fifo \
+ $(LOCAL_PATH)/legacy \
+ $(LOCAL_PATH)/utility
+
+# If you add a file here then also add it below in the SHARED target
+LOCAL_SRC_FILES = \
+ core/AudioStream.cpp \
+ core/AudioStreamBuilder.cpp \
+ core/AAudioAudio.cpp \
+ legacy/AudioStreamLegacy.cpp \
+ legacy/AudioStreamRecord.cpp \
+ legacy/AudioStreamTrack.cpp \
+ utility/HandleTracker.cpp \
+ utility/AAudioUtilities.cpp \
+ utility/FixedBlockAdapter.cpp \
+ utility/FixedBlockReader.cpp \
+ utility/FixedBlockWriter.cpp \
+ utility/LinearRamp.cpp \
+ fifo/FifoBuffer.cpp \
+ fifo/FifoControllerBase.cpp \
+ client/AudioEndpoint.cpp \
+ client/AudioStreamInternal.cpp \
+ client/AudioStreamInternalCapture.cpp \
+ client/AudioStreamInternalPlay.cpp \
+ client/IsochronousClockModel.cpp \
+ binding/AudioEndpointParcelable.cpp \
+ binding/AAudioBinderClient.cpp \
+ binding/AAudioStreamRequest.cpp \
+ binding/AAudioStreamConfiguration.cpp \
+ binding/IAAudioService.cpp \
+ binding/RingBufferParcelable.cpp \
+ binding/SharedMemoryParcelable.cpp \
+ binding/SharedRegionParcelable.cpp
+
+LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
+
+# By default, all symbols are hidden.
+# LOCAL_CFLAGS += -fvisibility=hidden
+# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
+
+include $(BUILD_STATIC_LIBRARY)
+
+# ======================= SHARED LIBRARY ==========================
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libaaudio
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/native/include \
+ system/core/base/include \
+ frameworks/native/media/libaaudio/include/include \
+ frameworks/av/media/libaaudio/include \
+ $(LOCAL_PATH) \
+ $(LOCAL_PATH)/binding \
+ $(LOCAL_PATH)/client \
+ $(LOCAL_PATH)/core \
+ $(LOCAL_PATH)/fifo \
+ $(LOCAL_PATH)/legacy \
+ $(LOCAL_PATH)/utility
+
+LOCAL_SRC_FILES = core/AudioStream.cpp \
+ core/AudioStreamBuilder.cpp \
+ core/AAudioAudio.cpp \
+ legacy/AudioStreamLegacy.cpp \
+ legacy/AudioStreamRecord.cpp \
+ legacy/AudioStreamTrack.cpp \
+ utility/HandleTracker.cpp \
+ utility/AAudioUtilities.cpp \
+ utility/FixedBlockAdapter.cpp \
+ utility/FixedBlockReader.cpp \
+ utility/FixedBlockWriter.cpp \
+ utility/LinearRamp.cpp \
+ fifo/FifoBuffer.cpp \
+ fifo/FifoControllerBase.cpp \
+ client/AudioEndpoint.cpp \
+ client/AudioStreamInternal.cpp \
+ client/AudioStreamInternalCapture.cpp \
+ client/AudioStreamInternalPlay.cpp \
+ client/IsochronousClockModel.cpp \
+ binding/AudioEndpointParcelable.cpp \
+ binding/AAudioBinderClient.cpp \
+ binding/AAudioStreamRequest.cpp \
+ binding/AAudioStreamConfiguration.cpp \
+ binding/IAAudioService.cpp \
+ binding/RingBufferParcelable.cpp \
+ binding/SharedMemoryParcelable.cpp \
+ binding/SharedRegionParcelable.cpp
+
+LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
+
+# By default, all symbols are hidden.
+# LOCAL_CFLAGS += -fvisibility=hidden
+# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
+
+LOCAL_SHARED_LIBRARIES := libaudioclient liblog libcutils libutils libbinder libaudiomanager
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
new file mode 100644
index 0000000..435b30f
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <binder/IServiceManager.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+#include <utils/Singleton.h>
+
+#include <aaudio/AAudio.h>
+
+#include "AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+
+#include "AAudioBinderClient.h"
+#include "AAudioServiceInterface.h"
+
+using android::String16;
+using android::IServiceManager;
+using android::defaultServiceManager;
+using android::interface_cast;
+using android::IAAudioService;
+using android::Mutex;
+using android::sp;
+
+using namespace aaudio;
+
+static android::Mutex gServiceLock;
+static sp<IAAudioService> gAAudioService;
+
+ANDROID_SINGLETON_STATIC_INSTANCE(AAudioBinderClient);
+
+// TODO Share code with other service clients.
+// Helper function to get access to the "AAudioService" service.
+// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
+static const sp<IAAudioService> getAAudioService() {
+ sp<IBinder> binder;
+ Mutex::Autolock _l(gServiceLock);
+ if (gAAudioService == 0) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ // Try several times to get the service.
+ int retries = 4;
+ do {
+ binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while.
+ if (binder != 0) {
+ break;
+ }
+ } while (retries-- > 0);
+
+ if (binder != 0) {
+ // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp
+ // TODO Create a DeathRecipient that disconnects all active streams.
+ gAAudioService = interface_cast<IAAudioService>(binder);
+ } else {
+ ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME);
+ }
+ }
+ return gAAudioService;
+}
+
+static void dropAAudioService() {
+ Mutex::Autolock _l(gServiceLock);
+ gAAudioService.clear(); // force a reconnect
+}
+
+AAudioBinderClient::AAudioBinderClient()
+ : AAudioServiceInterface()
+ , Singleton<AAudioBinderClient>() {}
+
+AAudioBinderClient::~AAudioBinderClient() {}
+
+/**
+* @param request info needed to create the stream
+* @param configuration contains information about the created stream
+* @return handle to the stream or a negative error
+*/
+aaudio_handle_t AAudioBinderClient::openStream(const AAudioStreamRequest &request,
+ AAudioStreamConfiguration &configurationOutput) {
+ aaudio_handle_t stream;
+ for (int i = 0; i < 2; i++) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) {
+ return AAUDIO_ERROR_NO_SERVICE;
+ }
+
+ stream = service->openStream(request, configurationOutput);
+
+ if (stream == AAUDIO_ERROR_NO_SERVICE) {
+ ALOGE("AAudioBinderClient: lost connection to AAudioService.");
+ dropAAudioService(); // force a reconnect
+ } else {
+ break;
+ }
+ }
+ return stream;
+}
+
+aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->closeStream(streamHandle);
+}
+
+/* Get an immutable description of the in-memory queues
+* used to communicate with the underlying HAL or Service.
+*/
+aaudio_result_t AAudioBinderClient::getStreamDescription(aaudio_handle_t streamHandle,
+ AudioEndpointParcelable &parcelable) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->getStreamDescription(streamHandle, parcelable);
+}
+
+aaudio_result_t AAudioBinderClient::startStream(aaudio_handle_t streamHandle) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->startStream(streamHandle);
+}
+
+aaudio_result_t AAudioBinderClient::pauseStream(aaudio_handle_t streamHandle) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->pauseStream(streamHandle);
+}
+
+aaudio_result_t AAudioBinderClient::stopStream(aaudio_handle_t streamHandle) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->stopStream(streamHandle);
+}
+
+aaudio_result_t AAudioBinderClient::flushStream(aaudio_handle_t streamHandle) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->flushStream(streamHandle);
+}
+
+/**
+* Manage the specified thread as a low latency audio thread.
+*/
+aaudio_result_t AAudioBinderClient::registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->registerAudioThread(streamHandle,
+ clientProcessId,
+ clientThreadId,
+ periodNanoseconds);
+}
+
+aaudio_result_t AAudioBinderClient::unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId) {
+ const sp<IAAudioService> &service = getAAudioService();
+ if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
+ return service->unregisterAudioThread(streamHandle,
+ clientProcessId,
+ clientThreadId);
+}
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
new file mode 100644
index 0000000..e223376
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_AAUDIO_BINDER_CLIENT_H
+#define ANDROID_AAUDIO_AAUDIO_BINDER_CLIENT_H
+
+#include <utils/Singleton.h>
+
+#include <aaudio/AAudio.h>
+#include "AAudioServiceDefinitions.h"
+#include "AAudioServiceInterface.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AudioEndpointParcelable.h"
+
+/**
+ * Implements the AAudioServiceInterface by talking to the actual service through Binder.
+ */
+
+namespace aaudio {
+
+class AAudioBinderClient : public AAudioServiceInterface
+ , public android::Singleton<AAudioBinderClient> {
+
+public:
+
+ AAudioBinderClient();
+
+ virtual ~AAudioBinderClient();
+
+ /**
+ * @param request info needed to create the stream
+ * @param configuration contains resulting information about the created stream
+ * @return handle to the stream or a negative error
+ */
+ aaudio_handle_t openStream(const AAudioStreamRequest &request,
+ AAudioStreamConfiguration &configurationOutput) override;
+
+ aaudio_result_t closeStream(aaudio_handle_t streamHandle) override;
+
+ /* Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+ aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+ AudioEndpointParcelable &parcelable) override;
+
+ /**
+ * Start the flow of data.
+ * This is asynchronous. When complete, the service will send a STARTED event.
+ */
+ aaudio_result_t startStream(aaudio_handle_t streamHandle) override;
+
+ /**
+ * Stop the flow of data such that start() can resume without loss of data.
+ * This is asynchronous. When complete, the service will send a PAUSED event.
+ */
+ aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t stopStream(aaudio_handle_t streamHandle) override;
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ * This is asynchronous. When complete, the service will send a FLUSHED event.
+ */
+ aaudio_result_t flushStream(aaudio_handle_t streamHandle) override;
+
+ /**
+ * Manage the specified thread as a low latency audio thread.
+ * TODO Consider passing this information as part of the startStream() call.
+ */
+ aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) override;
+
+ aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId) override;
+};
+
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AAUDIO_BINDER_CLIENT_H
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
new file mode 100644
index 0000000..638544e
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BINDING_AAUDIOSERVICEDEFINITIONS_H
+#define BINDING_AAUDIOSERVICEDEFINITIONS_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <aaudio/AAudio.h>
+
+using android::NO_ERROR;
+using android::IBinder;
+
+namespace android {
+
+enum aaudio_commands_t {
+ OPEN_STREAM = IBinder::FIRST_CALL_TRANSACTION,
+ CLOSE_STREAM,
+ GET_STREAM_DESCRIPTION,
+ START_STREAM,
+ PAUSE_STREAM,
+ STOP_STREAM,
+ FLUSH_STREAM,
+ REGISTER_AUDIO_THREAD,
+ UNREGISTER_AUDIO_THREAD
+};
+
+} // namespace android
+
+namespace aaudio {
+
+typedef int32_t aaudio_handle_t;
+
+#define AAUDIO_HANDLE_INVALID ((aaudio_handle_t) -1)
+
+// This must be a fixed width so it can be in shared memory.
+enum RingbufferFlags : uint32_t {
+ NONE = 0,
+ RATE_ISOCHRONOUS = 0x0001,
+ RATE_ASYNCHRONOUS = 0x0002,
+ COHERENCY_DMA = 0x0004,
+ COHERENCY_ACQUIRE_RELEASE = 0x0008,
+ COHERENCY_AUTO = 0x0010,
+};
+
+// This is not passed through Binder.
+// Client side code will convert Binder data and fill this descriptor.
+typedef struct RingBufferDescriptor_s {
+ uint8_t* dataAddress; // offset from read or write block
+ int64_t* writeCounterAddress;
+ int64_t* readCounterAddress;
+ int32_t bytesPerFrame; // index is in frames
+ int32_t framesPerBurst; // for ISOCHRONOUS queues
+ int32_t capacityInFrames; // zero if unused
+ RingbufferFlags flags;
+} RingBufferDescriptor;
+
+// This is not passed through Binder.
+// Client side code will convert Binder data and fill this descriptor.
+typedef struct EndpointDescriptor_s {
+ // Set capacityInFrames to zero if Queue is unused.
+ RingBufferDescriptor upMessageQueueDescriptor; // server to client
+ RingBufferDescriptor downMessageQueueDescriptor; // client to server
+ RingBufferDescriptor dataQueueDescriptor; // playback or capture
+} EndpointDescriptor;
+
+} // namespace aaudio
+
+#endif //BINDING_AAUDIOSERVICEDEFINITIONS_H
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
new file mode 100644
index 0000000..824e5bc
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
+#define ANDROID_AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AudioEndpointParcelable.h"
+
+/**
+ * This has the same methods as IAAudioService but without the Binder features.
+ *
+ * It allows us to abstract the Binder interface and use an AudioStreamInternal
+ * both in the client and in the service.
+ */
+namespace aaudio {
+
+class AAudioServiceInterface {
+public:
+
+ AAudioServiceInterface() {};
+ virtual ~AAudioServiceInterface() = default;
+
+ /**
+ * @param request info needed to create the stream
+ * @param configuration contains information about the created stream
+ * @return handle to the stream or a negative error
+ */
+ virtual aaudio_handle_t openStream(const AAudioStreamRequest &request,
+ AAudioStreamConfiguration &configuration) = 0;
+
+ virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) = 0;
+
+ /* Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+ virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+ AudioEndpointParcelable &parcelable) = 0;
+
+ /**
+ * Start the flow of data.
+ */
+ virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Stop the flow of data such that start() can resume without loss of data.
+ */
+ virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Stop the flow of data after data currently inthe buffer has played.
+ */
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ */
+ virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Manage the specified thread as a low latency audio thread.
+ */
+ virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) = 0;
+
+ virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId) = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_BINDING_AAUDIO_SERVICE_INTERFACE_H
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
new file mode 100644
index 0000000..b4377fb
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_AAUDIO_SERVICE_MESSAGE_H
+#define ANDROID_AAUDIO_AAUDIO_SERVICE_MESSAGE_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+
+namespace aaudio {
+
+// TODO move this to an "include" folder for the service.
+
+// Used to send information about the HAL to the client.
+struct AAudioMessageTimestamp {
+ int64_t position; // number of frames transferred so far
+ int64_t deviceOffset; // add to client position to get device position
+ int64_t timestamp; // time when that position was reached
+};
+
+typedef enum aaudio_service_event_e : uint32_t {
+ AAUDIO_SERVICE_EVENT_STARTED,
+ AAUDIO_SERVICE_EVENT_PAUSED,
+ AAUDIO_SERVICE_EVENT_STOPPED,
+ AAUDIO_SERVICE_EVENT_FLUSHED,
+ AAUDIO_SERVICE_EVENT_CLOSED,
+ AAUDIO_SERVICE_EVENT_DISCONNECTED,
+ AAUDIO_SERVICE_EVENT_VOLUME
+} aaudio_service_event_t;
+
+struct AAudioMessageEvent {
+ aaudio_service_event_t event;
+ double dataDouble;
+ int64_t dataLong;
+};
+
+typedef struct AAudioServiceMessage_s {
+ enum class code : uint32_t {
+ NOTHING,
+ TIMESTAMP,
+ EVENT,
+ };
+
+ code what;
+ union {
+ AAudioMessageTimestamp timestamp; // what == TIMESTAMP
+ AAudioMessageEvent event; // what == EVENT
+ };
+} AAudioServiceMessage;
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AAUDIO_SERVICE_MESSAGE_H
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
new file mode 100644
index 0000000..44edb1d
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <aaudio/AAudio.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioStreamConfiguration.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+AAudioStreamConfiguration::AAudioStreamConfiguration() {}
+AAudioStreamConfiguration::~AAudioStreamConfiguration() {}
+
+status_t AAudioStreamConfiguration::writeToParcel(Parcel* parcel) const {
+ status_t status;
+ status = parcel->writeInt32(mDeviceId);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32(mSampleRate);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32(mSamplesPerFrame);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) mSharingMode);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) mAudioFormat);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32(mBufferCapacity);
+ if (status != NO_ERROR) goto error;
+ return NO_ERROR;
+error:
+ ALOGE("AAudioStreamConfiguration.writeToParcel(): write failed = %d", status);
+ return status;
+}
+
+status_t AAudioStreamConfiguration::readFromParcel(const Parcel* parcel) {
+ status_t status = parcel->readInt32(&mDeviceId);
+ if (status != NO_ERROR) goto error;
+ status = parcel->readInt32(&mSampleRate);
+ if (status != NO_ERROR) goto error;
+ status = parcel->readInt32(&mSamplesPerFrame);
+ if (status != NO_ERROR) goto error;
+ status = parcel->readInt32(&mSharingMode);
+ if (status != NO_ERROR) goto error;
+ status = parcel->readInt32(&mAudioFormat);
+ if (status != NO_ERROR) goto error;
+ status = parcel->readInt32(&mBufferCapacity);
+ if (status != NO_ERROR) goto error;
+ return NO_ERROR;
+error:
+ ALOGE("AAudioStreamConfiguration.readFromParcel(): read failed = %d", status);
+ return status;
+}
+
+aaudio_result_t AAudioStreamConfiguration::validate() const {
+ // Validate results of the open.
+ if (mSampleRate < 0 || mSampleRate >= 8 * 48000) { // TODO review limits
+ ALOGE("AAudioStreamConfiguration.validate(): invalid sampleRate = %d", mSampleRate);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+
+ if (mSamplesPerFrame < 1 || mSamplesPerFrame >= 32) { // TODO review limits
+ ALOGE("AAudioStreamConfiguration.validate() invalid samplesPerFrame = %d", mSamplesPerFrame);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+
+ switch (mAudioFormat) {
+ case AAUDIO_FORMAT_PCM_I16:
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ break;
+ default:
+ ALOGE("AAudioStreamConfiguration.validate() invalid audioFormat = %d", mAudioFormat);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+
+ if (mBufferCapacity < 0) {
+ ALOGE("AAudioStreamConfiguration.validate() invalid mBufferCapacity = %d", mBufferCapacity);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ return AAUDIO_OK;
+}
+
+void AAudioStreamConfiguration::dump() const {
+ ALOGD("AAudioStreamConfiguration mDeviceId = %d", mDeviceId);
+ ALOGD("AAudioStreamConfiguration mSampleRate = %d", mSampleRate);
+ ALOGD("AAudioStreamConfiguration mSamplesPerFrame = %d", mSamplesPerFrame);
+ ALOGD("AAudioStreamConfiguration mSharingMode = %d", (int)mSharingMode);
+ ALOGD("AAudioStreamConfiguration mAudioFormat = %d", (int)mAudioFormat);
+ ALOGD("AAudioStreamConfiguration mBufferCapacity = %d", mBufferCapacity);
+}
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
new file mode 100644
index 0000000..144595a
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BINDING_AAUDIO_STREAM_CONFIGURATION_H
+#define ANDROID_BINDING_AAUDIO_STREAM_CONFIGURATION_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+class AAudioStreamConfiguration : public Parcelable {
+public:
+ AAudioStreamConfiguration();
+ virtual ~AAudioStreamConfiguration();
+
+ int32_t getDeviceId() const {
+ return mDeviceId;
+ }
+
+ void setDeviceId(int32_t deviceId) {
+ mDeviceId = deviceId;
+ }
+
+ int32_t getSampleRate() const {
+ return mSampleRate;
+ }
+
+ void setSampleRate(int32_t sampleRate) {
+ mSampleRate = sampleRate;
+ }
+
+ int32_t getSamplesPerFrame() const {
+ return mSamplesPerFrame;
+ }
+
+ void setSamplesPerFrame(int32_t samplesPerFrame) {
+ mSamplesPerFrame = samplesPerFrame;
+ }
+
+ aaudio_format_t getAudioFormat() const {
+ return mAudioFormat;
+ }
+
+ void setAudioFormat(aaudio_format_t audioFormat) {
+ mAudioFormat = audioFormat;
+ }
+
+ aaudio_sharing_mode_t getSharingMode() const {
+ return mSharingMode;
+ }
+
+ void setSharingMode(aaudio_sharing_mode_t sharingMode) {
+ mSharingMode = sharingMode;
+ }
+
+ int32_t getBufferCapacity() const {
+ return mBufferCapacity;
+ }
+
+ void setBufferCapacity(int32_t frames) {
+ mBufferCapacity = frames;
+ }
+
+ virtual status_t writeToParcel(Parcel* parcel) const override;
+
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+ aaudio_result_t validate() const;
+
+ void dump() const;
+
+private:
+ int32_t mDeviceId = AAUDIO_UNSPECIFIED;
+ int32_t mSampleRate = AAUDIO_UNSPECIFIED;
+ int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ aaudio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_BINDING_AAUDIO_STREAM_CONFIGURATION_H
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
new file mode 100644
index 0000000..a5c27b9
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/AAudioStreamRequest.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+AAudioStreamRequest::AAudioStreamRequest()
+ : mConfiguration()
+ {}
+
+AAudioStreamRequest::~AAudioStreamRequest() {}
+
+status_t AAudioStreamRequest::writeToParcel(Parcel* parcel) const {
+ status_t status = parcel->writeInt32((int32_t) mUserId);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) mProcessId);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32((int32_t) mDirection);
+ if (status != NO_ERROR) goto error;
+
+ status = parcel->writeBool(mSharingModeMatchRequired);
+ if (status != NO_ERROR) goto error;
+
+ status = mConfiguration.writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ return NO_ERROR;
+
+error:
+ ALOGE("AAudioStreamRequest.writeToParcel(): write failed = %d", status);
+ return status;
+}
+
+status_t AAudioStreamRequest::readFromParcel(const Parcel* parcel) {
+ int32_t temp;
+ status_t status = parcel->readInt32(&temp);
+ if (status != NO_ERROR) goto error;
+ mUserId = (uid_t) temp;
+
+ status = parcel->readInt32(&temp);
+ if (status != NO_ERROR) goto error;
+ mProcessId = (pid_t) temp;
+
+ status = parcel->readInt32(&temp);
+ if (status != NO_ERROR) goto error;
+ mDirection = (aaudio_direction_t) temp;
+
+ status = parcel->readBool(&mSharingModeMatchRequired);
+ if (status != NO_ERROR) goto error;
+
+ status = mConfiguration.readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ return NO_ERROR;
+
+error:
+ ALOGE("AAudioStreamRequest.readFromParcel(): read failed = %d", status);
+ return status;
+}
+
+aaudio_result_t AAudioStreamRequest::validate() const {
+ return mConfiguration.validate();
+}
+
+void AAudioStreamRequest::dump() const {
+ ALOGD("AAudioStreamRequest mUserId = %d", mUserId);
+ ALOGD("AAudioStreamRequest mProcessId = %d", mProcessId);
+ ALOGD("AAudioStreamRequest mDirection = %d", mDirection);
+ mConfiguration.dump();
+}
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
new file mode 100644
index 0000000..77138da
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BINDING_AAUDIO_STREAM_REQUEST_H
+#define ANDROID_BINDING_AAUDIO_STREAM_REQUEST_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioStreamConfiguration.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+class AAudioStreamRequest : public Parcelable {
+public:
+ AAudioStreamRequest();
+ virtual ~AAudioStreamRequest();
+
+ uid_t getUserId() const {
+ return mUserId;
+ }
+
+ void setUserId(uid_t userId) {
+ mUserId = userId;
+ }
+
+ pid_t getProcessId() const {
+ return mProcessId;
+ }
+
+ void setProcessId(pid_t processId) {
+ mProcessId = processId;
+ }
+
+ aaudio_direction_t getDirection() const {
+ return mDirection;
+ }
+
+ void setDirection(aaudio_direction_t direction) {
+ mDirection = direction;
+ }
+
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
+ void setSharingModeMatchRequired(bool required) {
+ mSharingModeMatchRequired = required;
+ }
+
+
+ const AAudioStreamConfiguration &getConstantConfiguration() const {
+ return mConfiguration;
+ }
+
+ AAudioStreamConfiguration &getConfiguration() {
+ return mConfiguration;
+ }
+
+ virtual status_t writeToParcel(Parcel* parcel) const override;
+
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+ aaudio_result_t validate() const;
+
+ void dump() const;
+
+protected:
+ AAudioStreamConfiguration mConfiguration;
+ uid_t mUserId;
+ pid_t mProcessId;
+ aaudio_direction_t mDirection;
+ bool mSharingModeMatchRequired = false;
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_BINDING_AAUDIO_STREAM_REQUEST_H
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
new file mode 100644
index 0000000..d05abb0
--- /dev/null
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <utility/AAudioUtilities.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+/**
+ * Container for information about the message queues plus
+ * general stream information needed by AAudio clients.
+ * It contains no addresses, just sizes, offsets and file descriptors for
+ * shared memory that can be passed through Binder.
+ */
+AudioEndpointParcelable::AudioEndpointParcelable() {}
+
+AudioEndpointParcelable::~AudioEndpointParcelable() {}
+
+/**
+ * Add the file descriptor to the table.
+ * @return index in table or negative error
+ */
+int32_t AudioEndpointParcelable::addFileDescriptor(int fd, int32_t sizeInBytes) {
+ if (mNumSharedMemories >= MAX_SHARED_MEMORIES) {
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ int32_t index = mNumSharedMemories++;
+ mSharedMemories[index].setup(fd, sizeInBytes);
+ return index;
+}
+
+/**
+ * The read and write must be symmetric.
+ */
+status_t AudioEndpointParcelable::writeToParcel(Parcel* parcel) const {
+ parcel->writeInt32(mNumSharedMemories);
+ for (int i = 0; i < mNumSharedMemories; i++) {
+ mSharedMemories[i].writeToParcel(parcel);
+ }
+ mUpMessageQueueParcelable.writeToParcel(parcel);
+ mDownMessageQueueParcelable.writeToParcel(parcel);
+ mUpDataQueueParcelable.writeToParcel(parcel);
+ mDownDataQueueParcelable.writeToParcel(parcel);
+ return NO_ERROR; // TODO check for errors above
+}
+
+status_t AudioEndpointParcelable::readFromParcel(const Parcel* parcel) {
+ parcel->readInt32(&mNumSharedMemories);
+ for (int i = 0; i < mNumSharedMemories; i++) {
+ mSharedMemories[i].readFromParcel(parcel);
+ }
+ mUpMessageQueueParcelable.readFromParcel(parcel);
+ mDownMessageQueueParcelable.readFromParcel(parcel);
+ mUpDataQueueParcelable.readFromParcel(parcel);
+ mDownDataQueueParcelable.readFromParcel(parcel);
+ return NO_ERROR; // TODO check for errors above
+}
+
+aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
+ aaudio_result_t result = mUpMessageQueueParcelable.resolve(mSharedMemories,
+ &descriptor->upMessageQueueDescriptor);
+ if (result != AAUDIO_OK) return result;
+ result = mDownMessageQueueParcelable.resolve(mSharedMemories,
+ &descriptor->downMessageQueueDescriptor);
+ if (result != AAUDIO_OK) return result;
+
+ result = mDownDataQueueParcelable.resolve(mSharedMemories,
+ &descriptor->dataQueueDescriptor);
+ return result;
+}
+
+aaudio_result_t AudioEndpointParcelable::close() {
+ int err = 0;
+ for (int i = 0; i < mNumSharedMemories; i++) {
+ int lastErr = mSharedMemories[i].close();
+ if (lastErr < 0) err = lastErr;
+ }
+ return AAudioConvert_androidToAAudioResult(err);
+}
+
+aaudio_result_t AudioEndpointParcelable::validate() {
+ aaudio_result_t result;
+ if (mNumSharedMemories < 0 || mNumSharedMemories >= MAX_SHARED_MEMORIES) {
+ ALOGE("AudioEndpointParcelable invalid mNumSharedMemories = %d", mNumSharedMemories);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ for (int i = 0; i < mNumSharedMemories; i++) {
+ result = mSharedMemories[i].validate();
+ if (result != AAUDIO_OK) {
+ ALOGE("AudioEndpointParcelable invalid mSharedMemories[%d] = %d", i, result);
+ return result;
+ }
+ }
+ if ((result = mUpMessageQueueParcelable.validate()) != AAUDIO_OK) {
+ ALOGE("AudioEndpointParcelable invalid mUpMessageQueueParcelable = %d", result);
+ return result;
+ }
+ if ((result = mDownMessageQueueParcelable.validate()) != AAUDIO_OK) {
+ ALOGE("AudioEndpointParcelable invalid mDownMessageQueueParcelable = %d", result);
+ return result;
+ }
+ if ((result = mUpDataQueueParcelable.validate()) != AAUDIO_OK) {
+ ALOGE("AudioEndpointParcelable invalid mUpDataQueueParcelable = %d", result);
+ return result;
+ }
+ if ((result = mDownDataQueueParcelable.validate()) != AAUDIO_OK) {
+ ALOGE("AudioEndpointParcelable invalid mDownDataQueueParcelable = %d", result);
+ return result;
+ }
+ return AAUDIO_OK;
+}
+
+void AudioEndpointParcelable::dump() {
+ ALOGD("AudioEndpointParcelable ======================================= BEGIN");
+ ALOGD("AudioEndpointParcelable mNumSharedMemories = %d", mNumSharedMemories);
+ for (int i = 0; i < mNumSharedMemories; i++) {
+ mSharedMemories[i].dump();
+ }
+ ALOGD("AudioEndpointParcelable mUpMessageQueueParcelable =========");
+ mUpMessageQueueParcelable.dump();
+ ALOGD("AudioEndpointParcelable mDownMessageQueueParcelable =======");
+ mDownMessageQueueParcelable.dump();
+ ALOGD("AudioEndpointParcelable mUpDataQueueParcelable ============");
+ mUpDataQueueParcelable.dump();
+ ALOGD("AudioEndpointParcelable mDownDataQueueParcelable ==========");
+ mDownDataQueueParcelable.dump();
+ ALOGD("AudioEndpointParcelable ======================================= END");
+}
+
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
new file mode 100644
index 0000000..993075c
--- /dev/null
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BINDING_AUDIO_ENDPOINT_PARCELABLE_H
+#define ANDROID_BINDING_AUDIO_ENDPOINT_PARCELABLE_H
+
+#include <stdint.h>
+
+//#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/RingBufferParcelable.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+/**
+ * Container for information about the message queues plus
+ * general stream information needed by AAudio clients.
+ * It contains no addresses, just sizes, offsets and file descriptors for
+ * shared memory that can be passed through Binder.
+ */
+class AudioEndpointParcelable : public Parcelable {
+public:
+ AudioEndpointParcelable();
+ virtual ~AudioEndpointParcelable();
+
+ /**
+ * Add the file descriptor to the table.
+ * @return index in table or negative error
+ */
+ int32_t addFileDescriptor(int fd, int32_t sizeInBytes);
+
+ virtual status_t writeToParcel(Parcel* parcel) const override;
+
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+ aaudio_result_t resolve(EndpointDescriptor *descriptor);
+
+ aaudio_result_t validate();
+
+ aaudio_result_t close();
+
+ void dump();
+
+public: // TODO add getters
+ // Set capacityInFrames to zero if Queue is unused.
+ RingBufferParcelable mUpMessageQueueParcelable; // server to client
+ RingBufferParcelable mDownMessageQueueParcelable; // to server
+ RingBufferParcelable mUpDataQueueParcelable; // eg. record, could share same queue
+ RingBufferParcelable mDownDataQueueParcelable; // eg. playback
+
+private:
+ int32_t mNumSharedMemories = 0;
+ SharedMemoryParcelable mSharedMemories[MAX_SHARED_MEMORIES];
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_BINDING_AUDIO_ENDPOINT_PARCELABLE_H
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
new file mode 100644
index 0000000..b8ef611
--- /dev/null
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <aaudio/AAudio.h>
+
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "utility/AAudioUtilities.h"
+
+namespace android {
+
+using aaudio::aaudio_handle_t;
+
+/**
+ * This is used by the AAudio Client to talk to the AAudio Service.
+ *
+ * The order of parameters in the Parcels must match with code in AAudioService.cpp.
+ */
+class BpAAudioService : public BpInterface<IAAudioService>
+{
+public:
+ explicit BpAAudioService(const sp<IBinder>& impl)
+ : BpInterface<IAAudioService>(impl)
+ {
+ }
+
+ virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ ALOGV("BpAAudioService::client openStream --------------------");
+ // request.dump();
+ request.writeToParcel(&data);
+ status_t err = remote()->transact(OPEN_STREAM, data, &reply);
+ ALOGV("BpAAudioService::client openStream returned %d", err);
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client openStream transact failed %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_handle_t stream;
+ err = reply.readInt32(&stream);
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ } else if (stream < 0) {
+ ALOGE("BpAAudioService::client OPEN_STREAM passed stream %d", stream);
+ return stream;
+ }
+ err = configurationOutput.readFromParcel(&reply);
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client openStream readFromParcel failed %d", err);
+ closeStream(stream);
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ return stream;
+ }
+
+ virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client closeStream transact failed %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+ aaudio::AudioEndpointParcelable &parcelable) {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) returns %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t result;
+ err = reply.readInt32(&result);
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) readInt %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ } else if (result != AAUDIO_OK) {
+ ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION passed result %d", result);
+ return result;
+ }
+ err = parcelable.readFromParcel(&reply);;
+ if (err != NO_ERROR) {
+ ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) read endpoint %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ //parcelable.dump();
+ result = parcelable.validate();
+ if (result != AAUDIO_OK) {
+ ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION validation fails %d", result);
+ return result;
+ }
+ return result;
+ }
+
+ // TODO should we wait for a reply?
+ virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(START_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(PAUSE_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(STOP_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ status_t err = remote()->transact(FLUSH_STREAM, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds)
+ override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ data.writeInt32((int32_t) clientProcessId);
+ data.writeInt32((int32_t) clientThreadId);
+ data.writeInt64(periodNanoseconds);
+ status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+ virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId)
+ override {
+ Parcel data, reply;
+ // send command
+ data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
+ data.writeInt32(streamHandle);
+ data.writeInt32((int32_t) clientProcessId);
+ data.writeInt32((int32_t) clientThreadId);
+ status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
+ if (err != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ // parse reply
+ aaudio_result_t res;
+ reply.readInt32(&res);
+ return res;
+ }
+
+};
+
+// Implement an interface to the service.
+// This is here so that you don't have to link with libaaudio static library.
+IMPLEMENT_META_INTERFACE(AAudioService, "IAAudioService");
+
+// The order of parameters in the Parcels must match with code in BpAAudioService
+
+status_t BnAAudioService::onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags) {
+ aaudio_handle_t stream;
+ aaudio::AAudioStreamRequest request;
+ aaudio::AAudioStreamConfiguration configuration;
+ pid_t pid;
+ pid_t tid;
+ int64_t nanoseconds;
+ aaudio_result_t result;
+ ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
+ data.checkInterface(this);
+
+ switch(code) {
+ case OPEN_STREAM: {
+ request.readFromParcel(&data);
+
+ //ALOGD("BnAAudioService::client openStream request dump --------------------");
+ //request.dump();
+
+ stream = openStream(request, configuration);
+ //ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X", stream);
+ reply->writeInt32(stream);
+ configuration.writeToParcel(reply);
+ return NO_ERROR;
+ } break;
+
+ case CLOSE_STREAM: {
+ data.readInt32(&stream);
+ result = closeStream(stream);
+ //ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X, result = %d",
+ // stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case GET_STREAM_DESCRIPTION: {
+ data.readInt32(&stream);
+ aaudio::AudioEndpointParcelable parcelable;
+ result = getStreamDescription(stream, parcelable);
+ if (result != AAUDIO_OK) {
+ return AAudioConvert_aaudioToAndroidStatus(result);
+ }
+ result = parcelable.validate();
+ if (result != AAUDIO_OK) {
+ ALOGE("BnAAudioService::onTransact getStreamDescription() returns %d", result);
+ parcelable.dump();
+ return AAudioConvert_aaudioToAndroidStatus(result);
+ }
+ reply->writeInt32(result);
+ parcelable.writeToParcel(reply);
+ return NO_ERROR;
+ } break;
+
+ case START_STREAM: {
+ data.readInt32(&stream);
+ result = startStream(stream);
+ ALOGV("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case PAUSE_STREAM: {
+ data.readInt32(&stream);
+ result = pauseStream(stream);
+ ALOGV("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case STOP_STREAM: {
+ data.readInt32(&stream);
+ result = stopStream(stream);
+ ALOGV("BnAAudioService::onTransact STOP_STREAM 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case FLUSH_STREAM: {
+ data.readInt32(&stream);
+ result = flushStream(stream);
+ ALOGV("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case REGISTER_AUDIO_THREAD: {
+ data.readInt32(&stream);
+ data.readInt32(&pid);
+ data.readInt32(&tid);
+ data.readInt64(&nanoseconds);
+ result = registerAudioThread(stream, pid, tid, nanoseconds);
+ ALOGV("BnAAudioService::onTransact REGISTER_AUDIO_THREAD 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ case UNREGISTER_AUDIO_THREAD: {
+ data.readInt32(&stream);
+ data.readInt32(&pid);
+ data.readInt32(&tid);
+ result = unregisterAudioThread(stream, pid, tid);
+ ALOGV("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
+ stream, result);
+ reply->writeInt32(result);
+ return NO_ERROR;
+ } break;
+
+ default:
+ // ALOGW("BnAAudioService::onTransact not handled %u", code);
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+} /* namespace android */
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
new file mode 100644
index 0000000..44a5e12
--- /dev/null
+++ b/media/libaaudio/src/binding/IAAudioService.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_IAAUDIO_SERVICE_H
+#define ANDROID_AAUDIO_IAAUDIO_SERVICE_H
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+#include <binder/TextOutput.h>
+#include <binder/IInterface.h>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "utility/HandleTracker.h"
+
+namespace android {
+
+#define AAUDIO_SERVICE_NAME "media.aaudio"
+
+// Interface (our AIDL) - Shared by server and client
+class IAAudioService : public IInterface {
+public:
+
+ DECLARE_META_INTERFACE(AAudioService);
+
+ /**
+ * @param request info needed to create the stream
+ * @param configuration contains information about the created stream
+ * @return handle to the stream or a negative error
+ */
+ virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
+
+ virtual aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) = 0;
+
+ /* Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+ virtual aaudio_result_t getStreamDescription(aaudio::aaudio_handle_t streamHandle,
+ aaudio::AudioEndpointParcelable &parcelable) = 0;
+
+ /**
+ * Start the flow of data.
+ * This is asynchronous. When complete, the service will send a STARTED event.
+ */
+ virtual aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Stop the flow of data such that start() can resume without loss of data.
+ * This is asynchronous. When complete, the service will send a PAUSED event.
+ */
+ virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Stop the flow of data such that the data currently in the buffer is played.
+ * This is asynchronous. When complete, the service will send a STOPPED event.
+ */
+ virtual aaudio_result_t stopStream(aaudio::aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ * This is asynchronous. When complete, the service will send a FLUSHED event.
+ */
+ virtual aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) = 0;
+
+ /**
+ * Manage the specified thread as a low latency audio thread.
+ * TODO Consider passing this information as part of the startStream() call.
+ */
+ virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) = 0;
+
+ virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId) = 0;
+};
+
+class BnAAudioService : public BnInterface<IAAudioService> {
+public:
+ virtual status_t onTransact(uint32_t code, const Parcel& data,
+ Parcel* reply, uint32_t flags = 0);
+
+};
+
+} /* namespace android */
+
+#endif //ANDROID_AAUDIO_IAAUDIO_SERVICE_H
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
new file mode 100644
index 0000000..6b74b21
--- /dev/null
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/SharedRegionParcelable.h"
+#include "binding/RingBufferParcelable.h"
+
+using namespace aaudio;
+
+RingBufferParcelable::RingBufferParcelable() {}
+RingBufferParcelable::~RingBufferParcelable() {}
+
+// TODO This assumes that all three use the same SharedMemoryParcelable
+void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
+ int32_t dataMemoryOffset,
+ int32_t dataSizeInBytes,
+ int32_t readCounterOffset,
+ int32_t writeCounterOffset,
+ int32_t counterSizeBytes) {
+ mReadCounterParcelable.setup(sharedMemoryIndex, readCounterOffset, counterSizeBytes);
+ mWriteCounterParcelable.setup(sharedMemoryIndex, writeCounterOffset, counterSizeBytes);
+ mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
+}
+
+void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
+ int32_t dataMemoryOffset,
+ int32_t dataSizeInBytes) {
+ mReadCounterParcelable.setup(sharedMemoryIndex, 0, 0);
+ mWriteCounterParcelable.setup(sharedMemoryIndex, 0, 0);
+ mDataParcelable.setup(sharedMemoryIndex, dataMemoryOffset, dataSizeInBytes);
+}
+
+int32_t RingBufferParcelable::getBytesPerFrame() {
+ return mBytesPerFrame;
+}
+
+void RingBufferParcelable::setBytesPerFrame(int32_t bytesPerFrame) {
+ mBytesPerFrame = bytesPerFrame;
+}
+
+int32_t RingBufferParcelable::getFramesPerBurst() {
+ return mFramesPerBurst;
+}
+
+void RingBufferParcelable::setFramesPerBurst(int32_t framesPerBurst) {
+ mFramesPerBurst = framesPerBurst;
+}
+
+int32_t RingBufferParcelable::getCapacityInFrames() {
+ return mCapacityInFrames;
+}
+
+void RingBufferParcelable::setCapacityInFrames(int32_t capacityInFrames) {
+ mCapacityInFrames = capacityInFrames;
+}
+
+/**
+ * The read and write must be symmetric.
+ */
+status_t RingBufferParcelable::writeToParcel(Parcel* parcel) const {
+ status_t status = parcel->writeInt32(mCapacityInFrames);
+ if (status != NO_ERROR) goto error;
+ if (mCapacityInFrames > 0) {
+ status = parcel->writeInt32(mBytesPerFrame);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32(mFramesPerBurst);
+ if (status != NO_ERROR) goto error;
+ status = parcel->writeInt32(mFlags);
+ if (status != NO_ERROR) goto error;
+ status = mReadCounterParcelable.writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mWriteCounterParcelable.writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mDataParcelable.writeToParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ }
+ return NO_ERROR;
+error:
+ ALOGE("RingBufferParcelable::writeToParcel() error = %d", status);
+ return status;
+}
+
+status_t RingBufferParcelable::readFromParcel(const Parcel* parcel) {
+ status_t status = parcel->readInt32(&mCapacityInFrames);
+ if (status != NO_ERROR) goto error;
+ if (mCapacityInFrames > 0) {
+ status = parcel->readInt32(&mBytesPerFrame);
+ if (status != NO_ERROR) goto error;
+ status = parcel->readInt32(&mFramesPerBurst);
+ if (status != NO_ERROR) goto error;
+ status = parcel->readInt32((int32_t *)&mFlags);
+ if (status != NO_ERROR) goto error;
+ status = mReadCounterParcelable.readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mWriteCounterParcelable.readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ status = mDataParcelable.readFromParcel(parcel);
+ if (status != NO_ERROR) goto error;
+ }
+ return NO_ERROR;
+error:
+ ALOGE("RingBufferParcelable::readFromParcel() error = %d", status);
+ return status;
+}
+
+aaudio_result_t RingBufferParcelable::resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor) {
+ aaudio_result_t result;
+
+ result = mReadCounterParcelable.resolve(memoryParcels,
+ (void **) &descriptor->readCounterAddress);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ result = mWriteCounterParcelable.resolve(memoryParcels,
+ (void **) &descriptor->writeCounterAddress);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ result = mDataParcelable.resolve(memoryParcels, (void **) &descriptor->dataAddress);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ descriptor->bytesPerFrame = mBytesPerFrame;
+ descriptor->framesPerBurst = mFramesPerBurst;
+ descriptor->capacityInFrames = mCapacityInFrames;
+ descriptor->flags = mFlags;
+ return AAUDIO_OK;
+}
+
+aaudio_result_t RingBufferParcelable::validate() {
+ aaudio_result_t result;
+ if (mCapacityInFrames < 0 || mCapacityInFrames >= 32 * 1024) {
+ ALOGE("RingBufferParcelable invalid mCapacityInFrames = %d", mCapacityInFrames);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ if (mBytesPerFrame < 0 || mBytesPerFrame >= 256) {
+ ALOGE("RingBufferParcelable invalid mBytesPerFrame = %d", mBytesPerFrame);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ if (mFramesPerBurst < 0 || mFramesPerBurst >= 16 * 1024) {
+ ALOGE("RingBufferParcelable invalid mFramesPerBurst = %d", mFramesPerBurst);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ if ((result = mReadCounterParcelable.validate()) != AAUDIO_OK) {
+ ALOGE("RingBufferParcelable invalid mReadCounterParcelable = %d", result);
+ return result;
+ }
+ if ((result = mWriteCounterParcelable.validate()) != AAUDIO_OK) {
+ ALOGE("RingBufferParcelable invalid mWriteCounterParcelable = %d", result);
+ return result;
+ }
+ if ((result = mDataParcelable.validate()) != AAUDIO_OK) {
+ ALOGE("RingBufferParcelable invalid mDataParcelable = %d", result);
+ return result;
+ }
+ return AAUDIO_OK;
+}
+
+
+void RingBufferParcelable::dump() {
+ ALOGD("RingBufferParcelable mCapacityInFrames = %d ---------", mCapacityInFrames);
+ if (mCapacityInFrames > 0) {
+ ALOGD("RingBufferParcelable mBytesPerFrame = %d", mBytesPerFrame);
+ ALOGD("RingBufferParcelable mFramesPerBurst = %d", mFramesPerBurst);
+ ALOGD("RingBufferParcelable mFlags = %u", mFlags);
+ mReadCounterParcelable.dump();
+ mWriteCounterParcelable.dump();
+ mDataParcelable.dump();
+ }
+}
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
new file mode 100644
index 0000000..bd562f2
--- /dev/null
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_RINGBUFFER_PARCELABLE_H
+#define ANDROID_AAUDIO_RINGBUFFER_PARCELABLE_H
+
+#include <stdint.h>
+
+#include <binder/Parcelable.h>
+
+#include "binding/AAudioServiceDefinitions.h"
+#include "binding/SharedRegionParcelable.h"
+
+namespace aaudio {
+
+class RingBufferParcelable : public Parcelable {
+public:
+ RingBufferParcelable();
+ virtual ~RingBufferParcelable();
+
+ // TODO This assumes that all three use the same SharedMemoryParcelable
+ void setupMemory(int32_t sharedMemoryIndex,
+ int32_t dataMemoryOffset,
+ int32_t dataSizeInBytes,
+ int32_t readCounterOffset,
+ int32_t writeCounterOffset,
+ int32_t counterSizeBytes);
+
+ void setupMemory(int32_t sharedMemoryIndex,
+ int32_t dataMemoryOffset,
+ int32_t dataSizeInBytes);
+
+ int32_t getBytesPerFrame();
+
+ void setBytesPerFrame(int32_t bytesPerFrame);
+
+ int32_t getFramesPerBurst();
+
+ void setFramesPerBurst(int32_t framesPerBurst);
+
+ int32_t getCapacityInFrames();
+
+ void setCapacityInFrames(int32_t capacityInFrames);
+
+ bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
+
+ /**
+ * The read and write must be symmetric.
+ */
+ virtual status_t writeToParcel(Parcel* parcel) const override;
+
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+ aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
+
+ aaudio_result_t validate();
+
+ void dump();
+
+private:
+ SharedRegionParcelable mReadCounterParcelable;
+ SharedRegionParcelable mWriteCounterParcelable;
+ SharedRegionParcelable mDataParcelable;
+ int32_t mBytesPerFrame = 0; // index is in frames
+ int32_t mFramesPerBurst = 0; // for ISOCHRONOUS queues
+ int32_t mCapacityInFrames = 0; // zero if unused
+ RingbufferFlags mFlags = RingbufferFlags::NONE;
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_RINGBUFFER_PARCELABLE_H
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
new file mode 100644
index 0000000..899eb04
--- /dev/null
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <sys/mman.h>
+#include <aaudio/AAudio.h>
+
+#include <binder/Parcelable.h>
+#include <utility/AAudioUtilities.h>
+
+#include "binding/SharedMemoryParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+SharedMemoryParcelable::SharedMemoryParcelable() {}
+SharedMemoryParcelable::~SharedMemoryParcelable() {};
+
+void SharedMemoryParcelable::setup(int fd, int32_t sizeInBytes) {
+ mFd = fd;
+ mSizeInBytes = sizeInBytes;
+
+}
+
+status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
+ status_t status = parcel->writeInt32(mSizeInBytes);
+ if (status != NO_ERROR) return status;
+ if (mSizeInBytes > 0) {
+ status = parcel->writeDupFileDescriptor(mFd);
+ ALOGE_IF(status != NO_ERROR, "SharedMemoryParcelable writeDupFileDescriptor failed : %d",
+ status);
+ }
+ return status;
+}
+
+status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
+ status_t status = parcel->readInt32(&mSizeInBytes);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ if (mSizeInBytes > 0) {
+ // Keep the original FD until you are done with the mFd.
+ // If you close it in here then it will prevent mFd from working.
+ mOriginalFd = parcel->readFileDescriptor();
+ ALOGV("SharedMemoryParcelable::readFromParcel() LEAK? mOriginalFd = %d\n", mOriginalFd);
+ mFd = fcntl(mOriginalFd, F_DUPFD_CLOEXEC, 0);
+ ALOGV("SharedMemoryParcelable::readFromParcel() LEAK? mFd = %d\n", mFd);
+ if (mFd == -1) {
+ status = -errno;
+ ALOGE("SharedMemoryParcelable readFromParcel fcntl() failed : %d", status);
+ }
+ }
+ return status;
+}
+
+aaudio_result_t SharedMemoryParcelable::close() {
+ if (mResolvedAddress != MMAP_UNRESOLVED_ADDRESS) {
+ int err = munmap(mResolvedAddress, mSizeInBytes);
+ if (err < 0) {
+ ALOGE("SharedMemoryParcelable::close() munmap() failed %d", err);
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
+ }
+ if (mFd != -1) {
+ ALOGV("SharedMemoryParcelable::close() LEAK? mFd = %d\n", mFd);
+ ::close(mFd);
+ mFd = -1;
+ }
+ if (mOriginalFd != -1) {
+ ALOGV("SharedMemoryParcelable::close() LEAK? mOriginalFd = %d\n", mOriginalFd);
+ ::close(mOriginalFd);
+ mOriginalFd = -1;
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t SharedMemoryParcelable::resolve(int32_t offsetInBytes, int32_t sizeInBytes,
+ void **regionAddressPtr) {
+
+ if (offsetInBytes < 0) {
+ ALOGE("SharedMemoryParcelable illegal offsetInBytes = %d", offsetInBytes);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ } else if ((offsetInBytes + sizeInBytes) > mSizeInBytes) {
+ ALOGE("SharedMemoryParcelable out of range, offsetInBytes = %d, "
+ "sizeInBytes = %d, mSizeInBytes = %d",
+ offsetInBytes, sizeInBytes, mSizeInBytes);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
+ mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ|PROT_WRITE,
+ MAP_SHARED, mFd, 0);
+ if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
+ ALOGE("SharedMemoryParcelable mmap failed for fd = %d, errno = %s",
+ mFd, strerror(errno));
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ }
+ *regionAddressPtr = mResolvedAddress + offsetInBytes;
+ ALOGV("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+ ALOGV("SharedMemoryParcelable offset by %d, *regionAddressPtr = %p",
+ offsetInBytes, *regionAddressPtr);
+ return AAUDIO_OK;
+}
+
+int32_t SharedMemoryParcelable::getSizeInBytes() {
+ return mSizeInBytes;
+}
+
+aaudio_result_t SharedMemoryParcelable::validate() {
+ if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE_BYTES) {
+ ALOGE("SharedMemoryParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (mSizeInBytes > 0) {
+ if (mFd == -1) {
+ ALOGE("SharedMemoryParcelable uninitialized mFd = %d", mFd);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ }
+ return AAUDIO_OK;
+}
+
+void SharedMemoryParcelable::dump() {
+ ALOGD("SharedMemoryParcelable mFd = %d", mFd);
+ ALOGD("SharedMemoryParcelable mSizeInBytes = %d", mSizeInBytes);
+ ALOGD("SharedMemoryParcelable mResolvedAddress = %p", mResolvedAddress);
+}
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
new file mode 100644
index 0000000..4b94b46
--- /dev/null
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_SHARED_MEMORY_PARCELABLE_H
+#define ANDROID_AAUDIO_SHARED_MEMORY_PARCELABLE_H
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+// Arbitrary limits for sanity checks. TODO remove after debugging.
+#define MAX_SHARED_MEMORIES (32)
+#define MAX_MMAP_OFFSET_BYTES (32 * 1024 * 8)
+#define MAX_MMAP_SIZE_BYTES (32 * 1024 * 8)
+
+/**
+ * This is a parcelable description of a shared memory referenced by a file descriptor.
+ * It may be divided into several regions.
+ */
+class SharedMemoryParcelable : public Parcelable {
+public:
+ SharedMemoryParcelable();
+ virtual ~SharedMemoryParcelable();
+
+ void setup(int fd, int32_t sizeInBytes);
+
+ virtual status_t writeToParcel(Parcel* parcel) const override;
+
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+ // mmap() shared memory
+ aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
+
+ // munmap() any mapped memory
+ aaudio_result_t close();
+
+ bool isFileDescriptorSafe();
+
+ int32_t getSizeInBytes();
+
+ aaudio_result_t validate();
+
+ void dump();
+
+protected:
+
+#define MMAP_UNRESOLVED_ADDRESS reinterpret_cast<uint8_t*>(MAP_FAILED)
+
+ int mFd = -1;
+ int mOriginalFd = -1;
+ int32_t mSizeInBytes = 0;
+ uint8_t *mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_SHARED_MEMORY_PARCELABLE_H
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
new file mode 100644
index 0000000..7381dcb
--- /dev/null
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcelable.h>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/SharedMemoryParcelable.h"
+#include "binding/SharedRegionParcelable.h"
+
+using android::NO_ERROR;
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+using namespace aaudio;
+
+SharedRegionParcelable::SharedRegionParcelable() {}
+SharedRegionParcelable::~SharedRegionParcelable() {}
+
+void SharedRegionParcelable::setup(int32_t sharedMemoryIndex,
+ int32_t offsetInBytes,
+ int32_t sizeInBytes) {
+ mSharedMemoryIndex = sharedMemoryIndex;
+ mOffsetInBytes = offsetInBytes;
+ mSizeInBytes = sizeInBytes;
+}
+
+status_t SharedRegionParcelable::writeToParcel(Parcel* parcel) const {
+ parcel->writeInt32(mSizeInBytes);
+ if (mSizeInBytes > 0) {
+ parcel->writeInt32(mSharedMemoryIndex);
+ parcel->writeInt32(mOffsetInBytes);
+ }
+ return NO_ERROR; // TODO check for errors above
+}
+
+status_t SharedRegionParcelable::readFromParcel(const Parcel* parcel) {
+ parcel->readInt32(&mSizeInBytes);
+ if (mSizeInBytes > 0) {
+ parcel->readInt32(&mSharedMemoryIndex);
+ parcel->readInt32(&mOffsetInBytes);
+ }
+ return NO_ERROR; // TODO check for errors above
+}
+
+aaudio_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
+ void **regionAddressPtr) {
+ if (mSizeInBytes == 0) {
+ *regionAddressPtr = nullptr;
+ return AAUDIO_OK;
+ }
+ if (mSharedMemoryIndex < 0) {
+ ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ SharedMemoryParcelable *memoryParcel = &memoryParcels[mSharedMemoryIndex];
+ return memoryParcel->resolve(mOffsetInBytes, mSizeInBytes, regionAddressPtr);
+}
+
+aaudio_result_t SharedRegionParcelable::validate() {
+ if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE_BYTES) {
+ ALOGE("SharedRegionParcelable invalid mSizeInBytes = %d", mSizeInBytes);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (mSizeInBytes > 0) {
+ if (mOffsetInBytes < 0 || mOffsetInBytes >= MAX_MMAP_OFFSET_BYTES) {
+ ALOGE("SharedRegionParcelable invalid mOffsetInBytes = %d", mOffsetInBytes);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (mSharedMemoryIndex < 0 || mSharedMemoryIndex >= MAX_SHARED_MEMORIES) {
+ ALOGE("SharedRegionParcelable invalid mSharedMemoryIndex = %d", mSharedMemoryIndex);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ }
+ return AAUDIO_OK;
+}
+
+void SharedRegionParcelable::dump() {
+ ALOGD("SharedRegionParcelable mSizeInBytes = %d -----", mSizeInBytes);
+ if (mSizeInBytes > 0) {
+ ALOGD("SharedRegionParcelable mSharedMemoryIndex = %d", mSharedMemoryIndex);
+ ALOGD("SharedRegionParcelable mOffsetInBytes = %d", mOffsetInBytes);
+ }
+}
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.h b/media/libaaudio/src/binding/SharedRegionParcelable.h
new file mode 100644
index 0000000..f6babfd
--- /dev/null
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_SHARED_REGION_PARCELABLE_H
+#define ANDROID_AAUDIO_SHARED_REGION_PARCELABLE_H
+
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <binder/Parcelable.h>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/SharedMemoryParcelable.h"
+
+using android::status_t;
+using android::Parcel;
+using android::Parcelable;
+
+namespace aaudio {
+
+class SharedRegionParcelable : public Parcelable {
+public:
+ SharedRegionParcelable();
+ virtual ~SharedRegionParcelable();
+
+ void setup(int32_t sharedMemoryIndex, int32_t offsetInBytes, int32_t sizeInBytes);
+
+ virtual status_t writeToParcel(Parcel* parcel) const override;
+
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+ aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
+
+ bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
+
+ aaudio_result_t validate();
+
+ void dump();
+
+protected:
+ int32_t mSharedMemoryIndex = -1;
+ int32_t mOffsetInBytes = 0;
+ int32_t mSizeInBytes = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_SHARED_REGION_PARCELABLE_H
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
new file mode 100644
index 0000000..5cb642b
--- /dev/null
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <cassert>
+#include <aaudio/AAudio.h>
+
+#include "AudioEndpointParcelable.h"
+#include "AudioEndpoint.h"
+#include "AAudioServiceMessage.h"
+
+using namespace android;
+using namespace aaudio;
+
+#define RIDICULOUSLY_LARGE_BUFFER_CAPACITY (256 * 1024)
+#define RIDICULOUSLY_LARGE_FRAME_SIZE 4096
+
+AudioEndpoint::AudioEndpoint()
+ : mFreeRunning(false)
+ , mDataReadCounter(0)
+ , mDataWriteCounter(0)
+{
+}
+
+AudioEndpoint::~AudioEndpoint()
+{
+}
+
+static aaudio_result_t AudioEndpoint_validateQueueDescriptor(const char *type,
+ const RingBufferDescriptor *descriptor) {
+ if (descriptor == nullptr) {
+ ALOGE("AudioEndpoint_validateQueueDescriptor() NULL descriptor");
+ return AAUDIO_ERROR_NULL;
+ }
+
+ if (descriptor->capacityInFrames < 1
+ || descriptor->capacityInFrames > RIDICULOUSLY_LARGE_BUFFER_CAPACITY) {
+ ALOGE("AudioEndpoint_validateQueueDescriptor() bad capacityInFrames = %d",
+ descriptor->capacityInFrames);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+
+ // Reject extreme values to catch bugs and prevent numeric overflows.
+ if (descriptor->bytesPerFrame < 1
+ || descriptor->bytesPerFrame > RIDICULOUSLY_LARGE_FRAME_SIZE) {
+ ALOGE("AudioEndpoint_validateQueueDescriptor() bad bytesPerFrame = %d",
+ descriptor->bytesPerFrame);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+
+ if (descriptor->dataAddress == nullptr) {
+ ALOGE("AudioEndpoint_validateQueueDescriptor() NULL dataAddress");
+ return AAUDIO_ERROR_NULL;
+ }
+ ALOGV("AudioEndpoint_validateQueueDescriptor %s, dataAddress at %p ====================",
+ type,
+ descriptor->dataAddress);
+ ALOGV("AudioEndpoint_validateQueueDescriptor readCounter at %p, writeCounter at %p",
+ descriptor->readCounterAddress,
+ descriptor->writeCounterAddress);
+
+ // Try to READ from the data area.
+ // This code will crash if the mmap failed.
+ uint8_t value = descriptor->dataAddress[0];
+ ALOGV("AudioEndpoint_validateQueueDescriptor() dataAddress[0] = %d, then try to write",
+ (int) value);
+ // Try to WRITE to the data area.
+ descriptor->dataAddress[0] = value * 3;
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote successfully");
+
+ if (descriptor->readCounterAddress) {
+ fifo_counter_t counter = *descriptor->readCounterAddress;
+ ALOGV("AudioEndpoint_validateQueueDescriptor() *readCounterAddress = %d, now write",
+ (int) counter);
+ *descriptor->readCounterAddress = counter;
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote readCounterAddress successfully");
+ }
+
+ if (descriptor->writeCounterAddress) {
+ fifo_counter_t counter = *descriptor->writeCounterAddress;
+ ALOGV("AudioEndpoint_validateQueueDescriptor() *writeCounterAddress = %d, now write",
+ (int) counter);
+ *descriptor->writeCounterAddress = counter;
+ ALOGV("AudioEndpoint_validateQueueDescriptor() wrote writeCounterAddress successfully");
+ }
+
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioEndpoint_validateDescriptor(const EndpointDescriptor *pEndpointDescriptor) {
+ aaudio_result_t result = AudioEndpoint_validateQueueDescriptor("messages",
+ &pEndpointDescriptor->upMessageQueueDescriptor);
+ if (result == AAUDIO_OK) {
+ result = AudioEndpoint_validateQueueDescriptor("data",
+ &pEndpointDescriptor->dataQueueDescriptor);
+ }
+ return result;
+}
+
+aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
+{
+ aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
+ if (result != AAUDIO_OK) {
+ ALOGE("AudioEndpoint_validateQueueDescriptor returned %d %s",
+ result, AAudio_convertResultToText(result));
+ return result;
+ }
+
+ // ============================ up message queue =============================
+ const RingBufferDescriptor *descriptor = &pEndpointDescriptor->upMessageQueueDescriptor;
+ if(descriptor->bytesPerFrame != sizeof(AAudioServiceMessage)) {
+ ALOGE("AudioEndpoint::configure() bytesPerFrame != sizeof(AAudioServiceMessage) = %d",
+ descriptor->bytesPerFrame);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+
+ if(descriptor->readCounterAddress == nullptr || descriptor->writeCounterAddress == nullptr) {
+ ALOGE("AudioEndpoint_validateQueueDescriptor() NULL counter address");
+ return AAUDIO_ERROR_NULL;
+ }
+
+ mUpCommandQueue = new FifoBuffer(
+ descriptor->bytesPerFrame,
+ descriptor->capacityInFrames,
+ descriptor->readCounterAddress,
+ descriptor->writeCounterAddress,
+ descriptor->dataAddress
+ );
+
+ // ============================ down data queue =============================
+ descriptor = &pEndpointDescriptor->dataQueueDescriptor;
+ ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
+ ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
+ mFreeRunning = descriptor->readCounterAddress == nullptr;
+ ALOGV("AudioEndpoint::configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
+ int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
+ ? &mDataReadCounter
+ : descriptor->readCounterAddress;
+ int64_t *writeCounterAddress = (descriptor->writeCounterAddress == nullptr)
+ ? &mDataWriteCounter
+ : descriptor->writeCounterAddress;
+
+ mDataQueue = new FifoBuffer(
+ descriptor->bytesPerFrame,
+ descriptor->capacityInFrames,
+ readCounterAddress,
+ writeCounterAddress,
+ descriptor->dataAddress
+ );
+ uint32_t threshold = descriptor->capacityInFrames / 2;
+ mDataQueue->setThreshold(threshold);
+ return result;
+}
+
+aaudio_result_t AudioEndpoint::readUpCommand(AAudioServiceMessage *commandPtr)
+{
+ return mUpCommandQueue->read(commandPtr, 1);
+}
+
+aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
+{
+ return mDataQueue->write(buffer, numFrames);
+}
+
+void AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
+ mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+}
+
+int32_t AudioEndpoint::getEmptyFramesAvailable()
+{
+ return mDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
+}
+
+void AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
+{
+ return mDataQueue->getFullDataAvailable(wrappingBuffer);
+}
+
+int32_t AudioEndpoint::getFullFramesAvailable()
+{
+ return mDataQueue->getFifoControllerBase()->getFullFramesAvailable();
+}
+
+void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
+ mDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
+}
+
+void AudioEndpoint::advanceReadIndex(int32_t deltaFrames) {
+ mDataQueue->getFifoControllerBase()->advanceReadIndex(deltaFrames);
+}
+
+void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead)
+{
+ mDataQueue->setReadCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDataReadCounter()
+{
+ return mDataQueue->getReadCounter();
+}
+
+void AudioEndpoint::setDataWriteCounter(fifo_counter_t framesRead)
+{
+ mDataQueue->setWriteCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDataWriteCounter()
+{
+ return mDataQueue->getWriteCounter();
+}
+
+int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
+ int32_t *actualFrames)
+{
+ if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
+ requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
+ }
+ mDataQueue->setThreshold(requestedFrames);
+ *actualFrames = mDataQueue->getThreshold();
+ return AAUDIO_OK;
+}
+
+int32_t AudioEndpoint::getBufferSizeInFrames() const
+{
+ return mDataQueue->getThreshold();
+}
+
+int32_t AudioEndpoint::getBufferCapacityInFrames() const
+{
+ return (int32_t)mDataQueue->getBufferCapacityInFrames();
+}
+
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
new file mode 100644
index 0000000..53ba033
--- /dev/null
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_AUDIO_ENDPOINT_H
+#define ANDROID_AAUDIO_AUDIO_ENDPOINT_H
+
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceMessage.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "fifo/FifoBuffer.h"
+
+namespace aaudio {
+
+#define ENDPOINT_DATA_QUEUE_SIZE_MIN 48
+
+/**
+ * A sink for audio.
+ * Used by the client code.
+ */
+class AudioEndpoint {
+
+public:
+ AudioEndpoint();
+ virtual ~AudioEndpoint();
+
+ /**
+ * Configure based on the EndPointDescriptor_t.
+ */
+ aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
+
+ /**
+ * Read from a command passed up from the Server.
+ * @return 1 if command received, 0 for no command, or negative error.
+ */
+ aaudio_result_t readUpCommand(AAudioServiceMessage *commandPtr);
+
+ /**
+ * Non-blocking write.
+ * @return framesWritten or a negative error code.
+ */
+ aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
+
+ void getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+
+ int32_t getEmptyFramesAvailable();
+
+ void getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+
+ int32_t getFullFramesAvailable();
+
+ void advanceReadIndex(int32_t deltaFrames);
+
+ void advanceWriteIndex(int32_t deltaFrames);
+
+ /**
+ * Set the read index in the downData queue.
+ * This is needed if the reader is not updating the index itself.
+ */
+ void setDataReadCounter(android::fifo_counter_t framesRead);
+
+ android::fifo_counter_t getDataReadCounter();
+
+ void setDataWriteCounter(android::fifo_counter_t framesWritten);
+
+ android::fifo_counter_t getDataWriteCounter();
+
+ /**
+ * The result is not valid until after configure() is called.
+ *
+ * @return true if the output buffer read position is not updated, eg. DMA
+ */
+ bool isFreeRunning() const { return mFreeRunning; }
+
+ int32_t setBufferSizeInFrames(int32_t requestedFrames,
+ int32_t *actualFrames);
+ int32_t getBufferSizeInFrames() const;
+
+ int32_t getBufferCapacityInFrames() const;
+
+private:
+ android::FifoBuffer *mUpCommandQueue;
+ android::FifoBuffer *mDataQueue;
+ bool mFreeRunning;
+ android::fifo_counter_t mDataReadCounter; // only used if free-running
+ android::fifo_counter_t mDataWriteCounter; // only used if free-running
+};
+
+} // namespace aaudio
+
+#endif //ANDROID_AAUDIO_AUDIO_ENDPOINT_H
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
new file mode 100644
index 0000000..3a827f0
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file is used in both client and server processes.
+// This is needed to make sense of the logs more easily.
+#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
+#include <stdint.h>
+#include <assert.h>
+
+#include <binder/IServiceManager.h>
+
+#include <aaudio/AAudio.h>
+#include <utils/String16.h>
+#include <utils/Trace.h>
+
+#include "AudioClock.h"
+#include "AudioEndpointParcelable.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+#include "core/AudioStreamBuilder.h"
+#include "fifo/FifoBuffer.h"
+#include "utility/LinearRamp.h"
+
+#include "AudioStreamInternal.h"
+
+using android::String16;
+using android::Mutex;
+using android::WrappingBuffer;
+
+using namespace aaudio;
+
+#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
+
+// Wait at least this many times longer than the operation should take.
+#define MIN_TIMEOUT_OPERATIONS 4
+
+#define LOG_TIMESTAMPS 0
+
+AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
+ : AudioStream()
+ , mClockModel()
+ , mAudioEndpoint()
+ , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
+ , mFramesPerBurst(16)
+ , mServiceInterface(serviceInterface)
+ , mInService(inService) {
+}
+
+AudioStreamInternal::~AudioStreamInternal() {
+}
+
+aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
+
+ aaudio_result_t result = AAUDIO_OK;
+ AAudioStreamRequest request;
+ AAudioStreamConfiguration configuration;
+
+ result = AudioStream::open(builder);
+ if (result < 0) {
+ return result;
+ }
+
+ // We have to do volume scaling. So we prefer FLOAT format.
+ if (getFormat() == AAUDIO_FORMAT_UNSPECIFIED) {
+ setFormat(AAUDIO_FORMAT_PCM_FLOAT);
+ }
+ // Request FLOAT for the shared mixer.
+ request.getConfiguration().setAudioFormat(AAUDIO_FORMAT_PCM_FLOAT);
+
+ // Build the request to send to the server.
+ request.setUserId(getuid());
+ request.setProcessId(getpid());
+ request.setDirection(getDirection());
+ request.setSharingModeMatchRequired(isSharingModeMatchRequired());
+
+ request.getConfiguration().setDeviceId(getDeviceId());
+ request.getConfiguration().setSampleRate(getSampleRate());
+ request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
+ request.getConfiguration().setSharingMode(getSharingMode());
+
+ request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
+
+ mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
+ if (mServiceStreamHandle < 0) {
+ result = mServiceStreamHandle;
+ ALOGE("AudioStreamInternal.open(): openStream() returned %d", result);
+ } else {
+ result = configuration.validate();
+ if (result != AAUDIO_OK) {
+ close();
+ return result;
+ }
+ // Save results of the open.
+ setSampleRate(configuration.getSampleRate());
+ setSamplesPerFrame(configuration.getSamplesPerFrame());
+ setDeviceId(configuration.getDeviceId());
+
+ // Save device format so we can do format conversion and volume scaling together.
+ mDeviceFormat = configuration.getAudioFormat();
+
+ result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
+ if (result != AAUDIO_OK) {
+ mServiceInterface.closeStream(mServiceStreamHandle);
+ return result;
+ }
+
+ // resolve parcelable into a descriptor
+ result = mEndPointParcelable.resolve(&mEndpointDescriptor);
+ if (result != AAUDIO_OK) {
+ mServiceInterface.closeStream(mServiceStreamHandle);
+ return result;
+ }
+
+ // Configure endpoint based on descriptor.
+ mAudioEndpoint.configure(&mEndpointDescriptor);
+
+ mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+ int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
+
+ // Validate result from server.
+ if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
+ ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
+ ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+
+ mClockModel.setSampleRate(getSampleRate());
+ mClockModel.setFramesPerBurst(mFramesPerBurst);
+
+ if (getDataCallbackProc()) {
+ mCallbackFrames = builder.getFramesPerDataCallback();
+ if (mCallbackFrames > getBufferCapacity() / 2) {
+ ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
+ mCallbackFrames, getBufferCapacity());
+ mServiceInterface.closeStream(mServiceStreamHandle);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+
+ } else if (mCallbackFrames < 0) {
+ ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
+ mServiceInterface.closeStream(mServiceStreamHandle);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+
+ }
+ if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
+ mCallbackFrames = mFramesPerBurst;
+ }
+
+ int32_t bytesPerFrame = getSamplesPerFrame()
+ * AAudioConvert_formatToSizeInBytes(getFormat());
+ int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
+ mCallbackBuffer = new uint8_t[callbackBufferSize];
+ }
+
+ setState(AAUDIO_STREAM_STATE_OPEN);
+ }
+ return result;
+}
+
+aaudio_result_t AudioStreamInternal::close() {
+ ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
+ mServiceStreamHandle);
+ if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
+ // Don't close a stream while it is running.
+ aaudio_stream_state_t currentState = getState();
+ if (isActive()) {
+ requestStop();
+ aaudio_stream_state_t nextState;
+ int64_t timeoutNanoseconds = MIN_TIMEOUT_NANOS;
+ aaudio_result_t result = waitForStateChange(currentState, &nextState,
+ timeoutNanoseconds);
+ if (result != AAUDIO_OK) {
+ ALOGE("AudioStreamInternal::close() waitForStateChange() returned %d %s",
+ result, AAudio_convertResultToText(result));
+ }
+ }
+ aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
+ mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
+
+ mServiceInterface.closeStream(serviceStreamHandle);
+ delete[] mCallbackBuffer;
+ mCallbackBuffer = nullptr;
+ return mEndPointParcelable.close();
+ } else {
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+}
+
+
+static void *aaudio_callback_thread_proc(void *context)
+{
+ AudioStreamInternal *stream = (AudioStreamInternal *)context;
+ //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
+ if (stream != NULL) {
+ return stream->callbackLoop();
+ } else {
+ return NULL;
+ }
+}
+
+aaudio_result_t AudioStreamInternal::requestStart()
+{
+ int64_t startTime;
+ ALOGD("AudioStreamInternal(): start()");
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ startTime = AudioClock::getNanoseconds();
+ mClockModel.start(startTime);
+ setState(AAUDIO_STREAM_STATE_STARTING);
+ aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
+
+ if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
+ // Launch the callback loop thread.
+ int64_t periodNanos = mCallbackFrames
+ * AAUDIO_NANOS_PER_SECOND
+ / getSampleRate();
+ mCallbackEnabled.store(true);
+ result = createThread(periodNanos, aaudio_callback_thread_proc, this);
+ }
+ return result;
+}
+
+int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
+
+ // Wait for at least a second or some number of callbacks to join the thread.
+ int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
+ * framesPerOperation
+ * AAUDIO_NANOS_PER_SECOND)
+ / getSampleRate();
+ if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
+ timeoutNanoseconds = MIN_TIMEOUT_NANOS;
+ }
+ return timeoutNanoseconds;
+}
+
+int64_t AudioStreamInternal::calculateReasonableTimeout() {
+ return calculateReasonableTimeout(getFramesPerBurst());
+}
+
+aaudio_result_t AudioStreamInternal::stopCallback()
+{
+ if (isDataCallbackActive()) {
+ mCallbackEnabled.store(false);
+ return joinThread(NULL);
+ } else {
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_result_t AudioStreamInternal::requestPauseInternal()
+{
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ mClockModel.stop(AudioClock::getNanoseconds());
+ setState(AAUDIO_STREAM_STATE_PAUSING);
+ return mServiceInterface.pauseStream(mServiceStreamHandle);
+}
+
+aaudio_result_t AudioStreamInternal::requestPause()
+{
+ aaudio_result_t result = stopCallback();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+ result = requestPauseInternal();
+ return result;
+}
+
+aaudio_result_t AudioStreamInternal::requestFlush() {
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ setState(AAUDIO_STREAM_STATE_FLUSHING);
+ return mServiceInterface.flushStream(mServiceStreamHandle);
+}
+
+// TODO for Play only
+void AudioStreamInternal::onFlushFromServer() {
+ ALOGD("AudioStreamInternal(): onFlushFromServer()");
+ int64_t readCounter = mAudioEndpoint.getDataReadCounter();
+ int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
+
+ // Bump offset so caller does not see the retrograde motion in getFramesRead().
+ int64_t framesFlushed = writeCounter - readCounter;
+ mFramesOffsetFromService += framesFlushed;
+
+ // Flush written frames by forcing writeCounter to readCounter.
+ // This is because we cannot move the read counter in the hardware.
+ mAudioEndpoint.setDataWriteCounter(readCounter);
+}
+
+aaudio_result_t AudioStreamInternal::requestStopInternal()
+{
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
+ mServiceStreamHandle);
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ mClockModel.stop(AudioClock::getNanoseconds());
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+ return mServiceInterface.stopStream(mServiceStreamHandle);
+}
+
+aaudio_result_t AudioStreamInternal::requestStop()
+{
+ aaudio_result_t result = stopCallback();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+ result = requestStopInternal();
+ return result;
+}
+
+aaudio_result_t AudioStreamInternal::registerThread() {
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return mServiceInterface.registerAudioThread(mServiceStreamHandle,
+ getpid(),
+ gettid(),
+ getPeriodNanoseconds());
+}
+
+aaudio_result_t AudioStreamInternal::unregisterThread() {
+ if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
+}
+
+aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) {
+ // TODO Generate in server and pass to client. Return latest.
+ int64_t time = AudioClock::getNanoseconds();
+ *framePosition = mClockModel.convertTimeToPosition(time);
+ // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
+ *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
+ if (isDataCallbackActive()) {
+ return AAUDIO_OK; // state is getting updated by the callback thread read/write call
+ }
+ return processCommands();
+}
+
+#if LOG_TIMESTAMPS
+static void AudioStreamInternal_logTimestamp(AAudioServiceMessage &command) {
+ static int64_t oldPosition = 0;
+ static int64_t oldTime = 0;
+ int64_t framePosition = command.timestamp.position;
+ int64_t nanoTime = command.timestamp.timestamp;
+ ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %lld",
+ (long long) framePosition,
+ (long long) nanoTime);
+ int64_t nanosDelta = nanoTime - oldTime;
+ if (nanosDelta > 0 && oldTime > 0) {
+ int64_t framesDelta = framePosition - oldPosition;
+ int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
+ ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
+ ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
+ ALOGD("AudioStreamInternal() - measured rate = %lld", (long long) rate);
+ }
+ oldPosition = framePosition;
+ oldTime = nanoTime;
+}
+#endif
+
+aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
+#if LOG_TIMESTAMPS
+ AudioStreamInternal_logTimestamp(*message);
+#endif
+ processTimestamp(message->timestamp.position, message->timestamp.timestamp);
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
+ aaudio_result_t result = AAUDIO_OK;
+ switch (message->event.event) {
+ case AAUDIO_SERVICE_EVENT_STARTED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+ if (getState() == AAUDIO_STREAM_STATE_STARTING) {
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ }
+ break;
+ case AAUDIO_SERVICE_EVENT_PAUSED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+ if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
+ setState(AAUDIO_STREAM_STATE_PAUSED);
+ }
+ break;
+ case AAUDIO_SERVICE_EVENT_STOPPED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
+ if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ }
+ break;
+ case AAUDIO_SERVICE_EVENT_FLUSHED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+ if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
+ setState(AAUDIO_STREAM_STATE_FLUSHED);
+ onFlushFromServer();
+ }
+ break;
+ case AAUDIO_SERVICE_EVENT_CLOSED:
+ ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+ setState(AAUDIO_STREAM_STATE_CLOSED);
+ break;
+ case AAUDIO_SERVICE_EVENT_DISCONNECTED:
+ result = AAUDIO_ERROR_DISCONNECTED;
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
+ break;
+ case AAUDIO_SERVICE_EVENT_VOLUME:
+ mVolumeRamp.setTarget((float) message->event.dataDouble);
+ ALOGD("processCommands() AAUDIO_SERVICE_EVENT_VOLUME %lf",
+ message->event.dataDouble);
+ break;
+ default:
+ ALOGW("WARNING - processCommands() Unrecognized event = %d",
+ (int) message->event.event);
+ break;
+ }
+ return result;
+}
+
+// Process all the commands coming from the server.
+aaudio_result_t AudioStreamInternal::processCommands() {
+ aaudio_result_t result = AAUDIO_OK;
+
+ while (result == AAUDIO_OK) {
+ //ALOGD("AudioStreamInternal::processCommands() - looping, %d", result);
+ AAudioServiceMessage message;
+ if (mAudioEndpoint.readUpCommand(&message) != 1) {
+ break; // no command this time, no problem
+ }
+ switch (message.what) {
+ case AAudioServiceMessage::code::TIMESTAMP:
+ result = onTimestampFromServer(&message);
+ break;
+
+ case AAudioServiceMessage::code::EVENT:
+ result = onEventFromServer(&message);
+ break;
+
+ default:
+ ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+ (int) message.what);
+ result = AAUDIO_ERROR_INTERNAL;
+ break;
+ }
+ }
+ return result;
+}
+
+// Read or write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
+{
+ const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
+ ATRACE_BEGIN(traceName);
+ aaudio_result_t result = AAUDIO_OK;
+ int32_t loopCount = 0;
+ uint8_t* audioData = (uint8_t*)buffer;
+ int64_t currentTimeNanos = AudioClock::getNanoseconds();
+ int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+ int32_t framesLeft = numFrames;
+
+ int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+ if (ATRACE_ENABLED()) {
+ const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
+ ATRACE_INT(traceName, fullFrames);
+ }
+
+ // Loop until all the data has been processed or until a timeout occurs.
+ while (framesLeft > 0) {
+ // The call to processDataNow() will not block. It will just read as much as it can.
+ int64_t wakeTimeNanos = 0;
+ aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
+ currentTimeNanos, &wakeTimeNanos);
+ if (framesProcessed < 0) {
+ ALOGE("AudioStreamInternal::processData() loop: framesProcessed = %d", framesProcessed);
+ result = framesProcessed;
+ break;
+ }
+ framesLeft -= (int32_t) framesProcessed;
+ audioData += framesProcessed * getBytesPerFrame();
+
+ // Should we block?
+ if (timeoutNanoseconds == 0) {
+ break; // don't block
+ } else if (framesLeft > 0) {
+ // clip the wake time to something reasonable
+ if (wakeTimeNanos < currentTimeNanos) {
+ wakeTimeNanos = currentTimeNanos;
+ }
+ if (wakeTimeNanos > deadlineNanos) {
+ // If we time out, just return the framesWritten so far.
+ // TODO remove after we fix the deadline bug
+ ALOGE("AudioStreamInternal::processData(): timed out after %lld nanos",
+ (long long) timeoutNanoseconds);
+ ALOGE("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
+ (long long) wakeTimeNanos, (long long) deadlineNanos);
+ ALOGE("AudioStreamInternal::processData(): past deadline by %d micros",
+ (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
+ break;
+ }
+
+ int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
+ AudioClock::sleepForNanos(sleepForNanos);
+ currentTimeNanos = AudioClock::getNanoseconds();
+ }
+ }
+
+ // return error or framesProcessed
+ (void) loopCount;
+ ATRACE_END();
+ return (result < 0) ? result : numFrames - framesLeft;
+}
+
+void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
+ mClockModel.processTimestamp(position, time);
+}
+
+aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
+ int32_t actualFrames = 0;
+ // Round to the next highest burst size.
+ if (getFramesPerBurst() > 0) {
+ int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
+ requestedFrames = numBursts * getFramesPerBurst();
+ }
+
+ aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
+ ALOGD("AudioStreamInternal::setBufferSize() req = %d => %d", requestedFrames, actualFrames);
+ if (result < 0) {
+ return result;
+ } else {
+ return (aaudio_result_t) actualFrames;
+ }
+}
+
+int32_t AudioStreamInternal::getBufferSize() const {
+ return mAudioEndpoint.getBufferSizeInFrames();
+}
+
+int32_t AudioStreamInternal::getBufferCapacity() const {
+ return mAudioEndpoint.getBufferCapacityInFrames();
+}
+
+int32_t AudioStreamInternal::getFramesPerBurst() const {
+ return mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+}
+
+aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
+ return AudioStream::joinThread(returnArg, calculateReasonableTimeout(getFramesPerBurst()));
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
new file mode 100644
index 0000000..a11f309
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_H
+#define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/IAAudioService.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioServiceInterface.h"
+#include "client/IsochronousClockModel.h"
+#include "client/AudioEndpoint.h"
+#include "core/AudioStream.h"
+#include "utility/LinearRamp.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+// A stream that talks to the AAudioService or directly to a HAL.
+class AudioStreamInternal : public AudioStream {
+
+public:
+ AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService);
+ virtual ~AudioStreamInternal();
+
+ // =========== Begin ABSTRACT methods ===========================
+ aaudio_result_t requestStart() override;
+
+ aaudio_result_t requestPause() override;
+
+ aaudio_result_t requestFlush() override;
+
+ aaudio_result_t requestStop() override;
+
+ aaudio_result_t getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) override;
+
+ virtual aaudio_result_t updateStateWhileWaiting() override;
+ // =========== End ABSTRACT methods ===========================
+
+ aaudio_result_t open(const AudioStreamBuilder &builder) override;
+
+ aaudio_result_t close() override;
+
+ aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+
+ int32_t getBufferSize() const override;
+
+ int32_t getBufferCapacity() const override;
+
+ int32_t getFramesPerBurst() const override;
+
+ int32_t getXRunCount() const override {
+ return mXRunCount;
+ }
+
+ aaudio_result_t registerThread() override;
+
+ aaudio_result_t unregisterThread() override;
+
+ aaudio_result_t joinThread(void** returnArg);
+
+ // Called internally from 'C'
+ virtual void *callbackLoop() = 0;
+
+
+ bool isMMap() override {
+ return true;
+ }
+
+ // Calculate timeout based on framesPerBurst
+ int64_t calculateReasonableTimeout();
+
+protected:
+
+ aaudio_result_t processData(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds);
+
+/**
+ * Low level data processing that will not block. It will just read or write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames processed or a negative error code.
+ */
+ virtual aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) = 0;
+
+ aaudio_result_t processCommands();
+
+ aaudio_result_t requestPauseInternal();
+ aaudio_result_t requestStopInternal();
+
+ aaudio_result_t stopCallback();
+
+
+ void onFlushFromServer();
+
+ aaudio_result_t onEventFromServer(AAudioServiceMessage *message);
+
+ aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
+
+ // Calculate timeout for an operation involving framesPerOperation.
+ int64_t calculateReasonableTimeout(int32_t framesPerOperation);
+
+ aaudio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+
+ IsochronousClockModel mClockModel; // timing model for chasing the HAL
+
+ AudioEndpoint mAudioEndpoint; // source for reads or sink for writes
+ aaudio_handle_t mServiceStreamHandle; // opaque handle returned from service
+
+ int32_t mFramesPerBurst; // frames per HAL transfer
+ int32_t mXRunCount = 0; // how many underrun events?
+
+ LinearRamp mVolumeRamp;
+
+ // Offset from underlying frame position.
+ int64_t mFramesOffsetFromService = 0; // offset for timestamps
+
+ uint8_t *mCallbackBuffer = nullptr;
+ int32_t mCallbackFrames = 0;
+
+private:
+ /*
+ * Asynchronous write with data conversion.
+ * @param buffer
+ * @param numFrames
+ * @return fdrames written or negative error
+ */
+ aaudio_result_t writeNowWithConversion(const void *buffer,
+ int32_t numFrames);
+
+ // Adjust timing model based on timestamp from service.
+ void processTimestamp(uint64_t position, int64_t time);
+
+ AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
+ EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
+ AAudioServiceInterface &mServiceInterface; // abstract interface to the service
+
+ // The service uses this for SHARED mode.
+ bool mInService = false; // Is this running in the client or the service?
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_H
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
new file mode 100644
index 0000000..93693bd
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <aaudio/AAudio.h>
+
+#include "client/AudioStreamInternalCapture.h"
+#include "utility/AudioClock.h"
+
+using android::WrappingBuffer;
+
+using namespace aaudio;
+
+AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
+ bool inService)
+ : AudioStreamInternal(serviceInterface, inService) {
+
+}
+
+AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
+
+
+// Write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
+{
+ return processData(buffer, numFrames, timeoutNanoseconds);
+}
+
+// Read as much data as we can without blocking.
+aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
+ int64_t currentNanoTime, int64_t *wakeTimePtr) {
+ aaudio_result_t result = processCommands();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ if (mAudioEndpoint.isFreeRunning()) {
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
+ // Update data queue based on the timing model.
+ int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ // TODO refactor, maybe use setRemoteCounter()
+ mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
+ }
+
+ // If the write index passed the read index then consider it an overrun.
+ if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
+ mXRunCount++;
+ }
+
+ // Read some data from the buffer.
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
+ int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
+ // numFrames, framesProcessed);
+
+ // Calculate an ideal time to wake up.
+ if (wakeTimePtr != nullptr && framesProcessed >= 0) {
+ // By default wake up a few milliseconds from now. // TODO review
+ int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
+ aaudio_stream_state_t state = getState();
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
+ // AAudio_convertStreamStateToText(state));
+ switch (state) {
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_STARTING:
+ break;
+ case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ {
+ uint32_t burstSize = mFramesPerBurst;
+ if (burstSize < 32) {
+ burstSize = 32; // TODO review
+ }
+
+ uint64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + burstSize;
+ wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ }
+ break;
+ default:
+ break;
+ }
+ *wakeTimePtr = wakeTime;
+
+ }
+// ALOGD("AudioStreamInternalCapture::readNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// (unsigned long long)currentNanoTime,
+// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
+// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+ return framesProcessed;
+}
+
+aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
+ int32_t numFrames) {
+ // ALOGD("AudioStreamInternalCapture::readNowWithConversion(%p, %d)",
+ // buffer, numFrames);
+ WrappingBuffer wrappingBuffer;
+ uint8_t *destination = (uint8_t *) buffer;
+ int32_t framesLeft = numFrames;
+
+ mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
+
+ // Read data in one or two parts.
+ for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
+ int32_t framesToProcess = framesLeft;
+ int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable <= 0) break;
+
+ if (framesToProcess > framesAvailable) {
+ framesToProcess = framesAvailable;
+ }
+
+ int32_t numBytes = getBytesPerFrame() * framesToProcess;
+ int32_t numSamples = framesToProcess * getSamplesPerFrame();
+
+ // TODO factor this out into a utility function
+ if (mDeviceFormat == getFormat()) {
+ memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
+ && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) wrappingBuffer.data[partIndex],
+ (float *) destination,
+ numSamples,
+ 1.0f);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
+ && getFormat() == AAUDIO_FORMAT_PCM_I16) {
+ AAudioConvert_floatToPcm16(
+ (const float *) wrappingBuffer.data[partIndex],
+ (int16_t *) destination,
+ numSamples,
+ 1.0f);
+ } else {
+ ALOGE("Format conversion not supported!");
+ return AAUDIO_ERROR_INVALID_FORMAT;
+ }
+ destination += numBytes;
+ framesLeft -= framesToProcess;
+ }
+
+ int32_t framesProcessed = numFrames - framesLeft;
+ mAudioEndpoint.advanceReadIndex(framesProcessed);
+ incrementFramesRead(framesProcessed);
+
+ //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
+ return framesProcessed;
+}
+
+int64_t AudioStreamInternalCapture::getFramesWritten()
+{
+ int64_t frames =
+ mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ + mFramesOffsetFromService;
+ // Prevent retrograde motion.
+ if (frames < mLastFramesWritten) {
+ frames = mLastFramesWritten;
+ } else {
+ mLastFramesWritten = frames;
+ }
+ //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld", (long long)frames);
+ return frames;
+}
+
+int64_t AudioStreamInternalCapture::getFramesRead()
+{
+ int64_t frames = mAudioEndpoint.getDataWriteCounter()
+ + mFramesOffsetFromService;
+ //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
+ return frames;
+}
+
+// Read data from the stream and pass it to the callback for processing.
+void *AudioStreamInternalCapture::callbackLoop() {
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ if (appCallback == nullptr) return NULL;
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
+
+ // Read audio data from stream.
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+
+ // This is a BLOCKING READ!
+ result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+ if ((result != mCallbackFrames)) {
+ ALOGE("AudioStreamInternalCapture(): callbackLoop: read() returned %d", result);
+ if (result >= 0) {
+ // Only read some of the frames requested. Must have timed out.
+ result = AAUDIO_ERROR_TIMEOUT;
+ }
+ AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
+ if (errorCallback != nullptr) {
+ (*errorCallback)(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ result);
+ }
+ break;
+ }
+
+ // Call application using the AAudio callback interface.
+ callbackResult = (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ mCallbackBuffer,
+ mCallbackFrames);
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ ALOGD("AudioStreamInternalCapture(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ break;
+ }
+ }
+
+ ALOGD("AudioStreamInternalCapture(): callbackLoop() exiting, result = %d, isActive() = %d",
+ result, (int) isActive());
+ return NULL;
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
new file mode 100644
index 0000000..17f37e8
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
+#define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceInterface.h"
+#include "client/AudioStreamInternal.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+class AudioStreamInternalCapture : public AudioStreamInternal {
+public:
+ AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface, bool inService = false);
+ virtual ~AudioStreamInternalCapture();
+
+ aaudio_result_t read(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) override;
+
+ int64_t getFramesRead() override;
+ int64_t getFramesWritten() override;
+
+ void *callbackLoop() override;
+
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_INPUT;
+ }
+protected:
+
+/**
+ * Low level data processing that will not block. It will just read or write as much as it can.
+ *
+ * It passes back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames processed or a negative error code.
+ */
+ aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) override;
+
+private:
+ /*
+ * Asynchronous read with data conversion.
+ * @param buffer
+ * @param numFrames
+ * @return frames written or negative error
+ */
+ aaudio_result_t readNowWithConversion(void *buffer, int32_t numFrames);
+
+ int64_t mLastFramesWritten = 0; // used to prevent retrograde motion
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
new file mode 100644
index 0000000..fc9766f
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "client/AudioStreamInternalPlay.h"
+#include "utility/AudioClock.h"
+
+using android::WrappingBuffer;
+
+using namespace aaudio;
+
+AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
+ bool inService)
+ : AudioStreamInternal(serviceInterface, inService) {
+
+}
+
+AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
+
+
+// Write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
+
+{
+ return processData((void *)buffer, numFrames, timeoutNanoseconds);
+}
+
+// Write as much data as we can without blocking.
+aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
+ int64_t currentNanoTime, int64_t *wakeTimePtr) {
+ aaudio_result_t result = processCommands();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ if (mAudioEndpoint.isFreeRunning()) {
+ //ALOGD("AudioStreamInternal::processDataNow() - update read counter");
+ // Update data queue based on the timing model.
+ int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
+ }
+ // TODO else query from endpoint cuz set by actual reader, maybe
+
+ // If the read index passed the write index then consider it an underrun.
+ if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+ mXRunCount++;
+ }
+
+ // Write some data to the buffer.
+ //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
+ int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
+ //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
+ // numFrames, framesWritten);
+
+ // Calculate an ideal time to wake up.
+ if (wakeTimePtr != nullptr && framesWritten >= 0) {
+ // By default wake up a few milliseconds from now. // TODO review
+ int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
+ aaudio_stream_state_t state = getState();
+ //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
+ // AAudio_convertStreamStateToText(state));
+ switch (state) {
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_STARTING:
+ if (framesWritten != 0) {
+ // Don't wait to write more data. Just prime the buffer.
+ wakeTime = currentNanoTime;
+ }
+ break;
+ case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ {
+ uint32_t burstSize = mFramesPerBurst;
+ if (burstSize < 32) {
+ burstSize = 32; // TODO review
+ }
+
+ uint64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + burstSize;
+ wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ }
+ break;
+ default:
+ break;
+ }
+ *wakeTimePtr = wakeTime;
+
+ }
+// ALOGD("AudioStreamInternal::processDataNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// (unsigned long long)currentNanoTime,
+// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
+// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+ return framesWritten;
+}
+
+
+aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
+ int32_t numFrames) {
+ // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
+ // buffer, numFrames);
+ WrappingBuffer wrappingBuffer;
+ uint8_t *source = (uint8_t *) buffer;
+ int32_t framesLeft = numFrames;
+
+ mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
+
+ // Read data in one or two parts.
+ int partIndex = 0;
+ while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+ int32_t framesToWrite = framesLeft;
+ int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable > 0) {
+ if (framesToWrite > framesAvailable) {
+ framesToWrite = framesAvailable;
+ }
+ int32_t numBytes = getBytesPerFrame() * framesToWrite;
+ int32_t numSamples = framesToWrite * getSamplesPerFrame();
+ // Data conversion.
+ float levelFrom;
+ float levelTo;
+ bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
+ &levelFrom, &levelTo);
+ // The formats are validated when the stream is opened so we do not have to
+ // check for illegal combinations here.
+ // TODO factor this out into a utility function
+ if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ AAudio_linearRamp(
+ (const float *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+ if (ramping) {
+ AAudioConvert_floatToPcm16(
+ (const float *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_floatToPcm16(
+ (const float *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ numSamples,
+ levelTo);
+ }
+ }
+ } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
+ if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (ramping) {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ numSamples,
+ levelTo);
+ }
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+ AAudio_linearRamp(
+ (const int16_t *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ }
+ }
+ source += numBytes;
+ framesLeft -= framesToWrite;
+ } else {
+ break;
+ }
+ partIndex++;
+ }
+ int32_t framesWritten = numFrames - framesLeft;
+ mAudioEndpoint.advanceWriteIndex(framesWritten);
+
+ if (framesWritten > 0) {
+ incrementFramesWritten(framesWritten);
+ }
+ // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
+ return framesWritten;
+}
+
+
+int64_t AudioStreamInternalPlay::getFramesRead()
+{
+ int64_t framesRead =
+ mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ + mFramesOffsetFromService;
+ // Prevent retrograde motion.
+ if (framesRead < mLastFramesRead) {
+ framesRead = mLastFramesRead;
+ } else {
+ mLastFramesRead = framesRead;
+ }
+ ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+ return framesRead;
+}
+
+int64_t AudioStreamInternalPlay::getFramesWritten()
+{
+ int64_t getFramesWritten = mAudioEndpoint.getDataWriteCounter()
+ + mFramesOffsetFromService;
+ ALOGD("AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
+ return getFramesWritten;
+}
+
+
+// Render audio in the application callback and then write the data to the stream.
+void *AudioStreamInternalPlay::callbackLoop() {
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ if (appCallback == nullptr) return NULL;
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
+ // Call application using the AAudio callback interface.
+ callbackResult = (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ mCallbackBuffer,
+ mCallbackFrames);
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+ // Write audio data to stream.
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+
+ // This is a BLOCKING WRITE!
+ result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+ if ((result != mCallbackFrames)) {
+ ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
+ if (result >= 0) {
+ // Only wrote some of the frames requested. Must have timed out.
+ result = AAUDIO_ERROR_TIMEOUT;
+ }
+ AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
+ if (errorCallback != nullptr) {
+ (*errorCallback)(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ result);
+ }
+ break;
+ }
+ } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ break;
+ }
+ }
+
+ ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
+ result, (int) isActive());
+ return NULL;
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
new file mode 100644
index 0000000..b043f67
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
+#define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceInterface.h"
+#include "client/AudioStreamInternal.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+class AudioStreamInternalPlay : public AudioStreamInternal {
+public:
+ AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface, bool inService = false);
+ virtual ~AudioStreamInternalPlay();
+
+ aaudio_result_t write(const void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) override;
+
+ int64_t getFramesRead() override;
+ int64_t getFramesWritten() override;
+
+ void *callbackLoop() override;
+
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_OUTPUT;
+ }
+
+protected:
+/**
+ * Low level write that will not block. It will just write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames written or a negative error code.
+ */
+ aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) override;
+private:
+ /*
+ * Asynchronous write with data conversion.
+ * @param buffer
+ * @param numFrames
+ * @return fdrames written or negative error
+ */
+ aaudio_result_t writeNowWithConversion(const void *buffer,
+ int32_t numFrames);
+
+ int64_t mLastFramesRead = 0; // used to prevent retrograde motion
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
new file mode 100644
index 0000000..edf6e97
--- /dev/null
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <log/log.h>
+
+#include <stdint.h>
+
+#include "utility/AudioClock.h"
+#include "IsochronousClockModel.h"
+
+#define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
+
+using namespace aaudio;
+
+IsochronousClockModel::IsochronousClockModel()
+ : mMarkerFramePosition(0)
+ , mMarkerNanoTime(0)
+ , mSampleRate(48000)
+ , mFramesPerBurst(64)
+ , mMaxLatenessInNanos(0)
+ , mState(STATE_STOPPED)
+{
+}
+
+IsochronousClockModel::~IsochronousClockModel() {
+}
+
+void IsochronousClockModel::start(int64_t nanoTime) {
+ ALOGD("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
+ mMarkerNanoTime = nanoTime;
+ mState = STATE_STARTING;
+}
+
+void IsochronousClockModel::stop(int64_t nanoTime) {
+ ALOGD("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
+ mMarkerNanoTime = nanoTime;
+ mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
+ mState = STATE_STOPPED;
+}
+
+void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
+ int64_t framesDelta = framePosition - mMarkerFramePosition;
+ int64_t nanosDelta = nanoTime - mMarkerNanoTime;
+ if (nanosDelta < 1000) {
+ return;
+ }
+
+// ALOGD("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
+// (long long)mMarkerFramePosition,
+// (long long)mMarkerNanoTime);
+// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
+// (long long)framePosition,
+// (long long)nanoTime);
+
+ int64_t expectedNanosDelta = convertDeltaPositionToTime(framesDelta);
+// ALOGD("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
+// (long long)expectedNanosDelta,
+// (long long)nanosDelta);
+
+// ALOGD("processTimestamp() - mSampleRate = %d", mSampleRate);
+// ALOGD("processTimestamp() - mState = %d", mState);
+ switch (mState) {
+ case STATE_STOPPED:
+ break;
+ case STATE_STARTING:
+ mMarkerFramePosition = framePosition;
+ mMarkerNanoTime = nanoTime;
+ mState = STATE_SYNCING;
+ break;
+ case STATE_SYNCING:
+ // This will handle a burst of rapid transfer at the beginning.
+ if (nanosDelta < expectedNanosDelta) {
+ mMarkerFramePosition = framePosition;
+ mMarkerNanoTime = nanoTime;
+ } else {
+// ALOGD("processTimestamp() - advance to STATE_RUNNING");
+ mState = STATE_RUNNING;
+ }
+ break;
+ case STATE_RUNNING:
+ if (nanosDelta < expectedNanosDelta) {
+ // Earlier than expected timestamp.
+ // This data is probably more accurate so use it.
+ // or we may be drifting due to a slow HW clock.
+ mMarkerFramePosition = framePosition;
+ mMarkerNanoTime = nanoTime;
+// ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
+// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+ } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
+ // Later than expected timestamp.
+ mMarkerFramePosition = framePosition;
+ mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
+// ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
+// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
+// (int) (mMaxLatenessInNanos / 1000));
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
+ mSampleRate = sampleRate;
+ update();
+}
+
+void IsochronousClockModel::setFramesPerBurst(int32_t framesPerBurst) {
+ mFramesPerBurst = framesPerBurst;
+ update();
+}
+
+void IsochronousClockModel::update() {
+ int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
+ mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
+}
+
+int64_t IsochronousClockModel::convertDeltaPositionToTime(
+ int64_t framesDelta) const {
+ return (AAUDIO_NANOS_PER_SECOND * framesDelta) / mSampleRate;
+}
+
+int64_t IsochronousClockModel::convertDeltaTimeToPosition(int64_t nanosDelta) const {
+ return (mSampleRate * nanosDelta) / AAUDIO_NANOS_PER_SECOND;
+}
+
+int64_t IsochronousClockModel::convertPositionToTime(int64_t framePosition) const {
+ if (mState == STATE_STOPPED) {
+ return mMarkerNanoTime;
+ }
+ int64_t nextBurstIndex = (framePosition + mFramesPerBurst - 1) / mFramesPerBurst;
+ int64_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
+ int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
+ int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
+ int64_t time = (int64_t) (mMarkerNanoTime + nanosDelta);
+// ALOGD("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
+// (unsigned long long)framePosition,
+// (unsigned long long)time);
+ return time;
+}
+
+int64_t IsochronousClockModel::convertTimeToPosition(int64_t nanoTime) const {
+ if (mState == STATE_STOPPED) {
+ return mMarkerFramePosition;
+ }
+ int64_t nanosDelta = nanoTime - mMarkerNanoTime;
+ int64_t framesDelta = convertDeltaTimeToPosition(nanosDelta);
+ int64_t nextBurstPosition = mMarkerFramePosition + framesDelta;
+ int64_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
+ int64_t position = nextBurstIndex * mFramesPerBurst;
+// ALOGD("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
+// (unsigned long long)nanoTime,
+// (unsigned long long)position);
+// ALOGD("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
+// (long long) framesDelta, mFramesPerBurst);
+ return position;
+}
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
new file mode 100644
index 0000000..0314f55
--- /dev/null
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
+#define ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
+
+#include <stdint.h>
+
+namespace aaudio {
+
+/**
+ * Model an isochronous data stream using occasional timestamps as input.
+ * This can be used to predict the position of the stream at a given time.
+ *
+ * This class is not thread safe and should only be called from one thread.
+ */
+class IsochronousClockModel {
+
+public:
+ IsochronousClockModel();
+ virtual ~IsochronousClockModel();
+
+ void start(int64_t nanoTime);
+ void stop(int64_t nanoTime);
+
+ void processTimestamp(int64_t framePosition, int64_t nanoTime);
+
+ /**
+ * @param sampleRate rate of the stream in frames per second
+ */
+ void setSampleRate(int32_t sampleRate);
+
+ int32_t getSampleRate() const {
+ return mSampleRate;
+ }
+
+ /**
+ * This must be set accurately in order to track the isochronous stream.
+ *
+ * @param framesPerBurst number of frames that stream advance at one time.
+ */
+ void setFramesPerBurst(int32_t framesPerBurst);
+
+ int32_t getFramesPerBurst() const {
+ return mFramesPerBurst;
+ }
+
+ /**
+ * Calculate an estimated time when the stream will be at that position.
+ *
+ * @param framePosition position of the stream in frames
+ * @return time in nanoseconds
+ */
+ int64_t convertPositionToTime(int64_t framePosition) const;
+
+ /**
+ * Calculate an estimated position where the stream will be at the specified time.
+ *
+ * @param nanoTime time of interest
+ * @return position in frames
+ */
+ int64_t convertTimeToPosition(int64_t nanoTime) const;
+
+ /**
+ * @param framesDelta difference in frames
+ * @return duration in nanoseconds
+ */
+ int64_t convertDeltaPositionToTime(int64_t framesDelta) const;
+
+ /**
+ * @param nanosDelta duration in nanoseconds
+ * @return frames that stream will advance in that time
+ */
+ int64_t convertDeltaTimeToPosition(int64_t nanosDelta) const;
+
+private:
+ enum clock_model_state_t {
+ STATE_STOPPED,
+ STATE_STARTING,
+ STATE_SYNCING,
+ STATE_RUNNING
+ };
+
+ int64_t mMarkerFramePosition;
+ int64_t mMarkerNanoTime;
+ int32_t mSampleRate;
+ int32_t mFramesPerBurst;
+ int32_t mMaxLatenessInNanos;
+ clock_model_state_t mState;
+
+ void update();
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
new file mode 100644
index 0000000..76f98fa
--- /dev/null
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -0,0 +1,490 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <time.h>
+#include <pthread.h>
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AudioClock.h"
+#include "client/AudioStreamInternal.h"
+#include "HandleTracker.h"
+
+using namespace aaudio;
+
+
+// Macros for common code that includes a return.
+// TODO Consider using do{}while(0) construct. I tried but it hung AndroidStudio
+#define CONVERT_BUILDER_HANDLE_OR_RETURN() \
+ convertAAudioBuilderToStreamBuilder(builder);
+
+#define COMMON_GET_FROM_BUILDER_OR_RETURN(resultPtr) \
+ CONVERT_BUILDER_HANDLE_OR_RETURN() \
+ if ((resultPtr) == nullptr) { \
+ return AAUDIO_ERROR_NULL; \
+ }
+
+#define AAUDIO_CASE_ENUM(name) case name: return #name
+
+AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
+ switch (returnCode) {
+ AAUDIO_CASE_ENUM(AAUDIO_OK);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_DISCONNECTED);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
+ // reserved
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
+ // reserved
+ // reserved
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
+ // reserved
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNAVAILABLE);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_FREE_HANDLES);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_MEMORY);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_FORMAT);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
+ AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_RATE);
+ }
+ return "Unrecognized AAudio error.";
+}
+
+AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state) {
+ switch (state) {
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNKNOWN);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_DISCONNECTED);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
+ AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
+ }
+ return "Unrecognized AAudio state.";
+}
+
+#undef AAUDIO_CASE_ENUM
+
+
+/******************************************
+ * Static globals.
+ */
+static aaudio_policy_t s_MMapPolicy = AAUDIO_UNSPECIFIED;
+
+
+static AudioStream *convertAAudioStreamToAudioStream(AAudioStream* stream)
+{
+ return (AudioStream*) stream;
+}
+
+static AudioStreamBuilder *convertAAudioBuilderToStreamBuilder(AAudioStreamBuilder* builder)
+{
+ return (AudioStreamBuilder*) builder;
+}
+
+AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder)
+{
+ AudioStreamBuilder *audioStreamBuilder = new(std::nothrow) AudioStreamBuilder();
+ if (audioStreamBuilder == nullptr) {
+ return AAUDIO_ERROR_NO_MEMORY;
+ }
+ *builder = (AAudioStreamBuilder*) audioStreamBuilder;
+ return AAUDIO_OK;
+}
+
+AAUDIO_API void AAudioStreamBuilder_setPerformanceMode(AAudioStreamBuilder* builder,
+ aaudio_performance_mode_t mode)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setPerformanceMode(mode);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
+ int32_t deviceId)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setDeviceId(deviceId);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
+ int32_t sampleRate)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setSampleRate(sampleRate);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setChannelCount(AAudioStreamBuilder* builder,
+ int32_t channelCount)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setSamplesPerFrame(channelCount);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
+ int32_t samplesPerFrame)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setSamplesPerFrame(samplesPerFrame);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
+ aaudio_direction_t direction)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setDirection(direction);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
+ aaudio_format_t format)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setFormat(format);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
+ aaudio_sharing_mode_t sharingMode)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setSharingMode(sharingMode);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
+ int32_t frames)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setBufferCapacity(frames);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
+ AAudioStream_dataCallback callback,
+ void *userData)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setDataCallbackProc(callback);
+ streamBuilder->setDataCallbackUserData(userData);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
+ AAudioStream_errorCallback callback,
+ void *userData)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setErrorCallbackProc(callback);
+ streamBuilder->setErrorCallbackUserData(userData);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
+ int32_t frames)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setFramesPerDataCallback(frames);
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
+ AAudioStream** streamPtr)
+{
+ AudioStream *audioStream = nullptr;
+ // Please leave these logs because they are very helpful when debugging.
+ ALOGD("AAudioStreamBuilder_openStream() called ----------------------------------------");
+ AudioStreamBuilder *streamBuilder = COMMON_GET_FROM_BUILDER_OR_RETURN(streamPtr);
+ aaudio_result_t result = streamBuilder->build(&audioStream);
+ ALOGD("AAudioStreamBuilder_openStream() returns %d = %s for (%p) ----------------",
+ result, AAudio_convertResultToText(result), audioStream);
+ if (result == AAUDIO_OK) {
+ *streamPtr = (AAudioStream*) audioStream;
+ } else {
+ *streamPtr = nullptr;
+ }
+ return result;
+}
+
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ if (streamBuilder != nullptr) {
+ delete streamBuilder;
+ return AAUDIO_OK;
+ }
+ return AAUDIO_ERROR_NULL;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_close(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ ALOGD("AAudioStream_close(%p)", stream);
+ if (audioStream != nullptr) {
+ audioStream->close();
+ delete audioStream;
+ return AAUDIO_OK;
+ }
+ return AAUDIO_ERROR_NULL;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_requestStart(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ ALOGD("AAudioStream_requestStart(%p)", stream);
+ return audioStream->requestStart();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_requestPause(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ ALOGD("AAudioStream_requestPause(%p)", stream);
+ return audioStream->requestPause();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_requestFlush(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ ALOGD("AAudioStream_requestFlush(%p)", stream);
+ return audioStream->requestFlush();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_requestStop(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ ALOGD("AAudioStream_requestStop(%p)", stream);
+ return audioStream->requestStop();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream* stream,
+ aaudio_stream_state_t inputState,
+ aaudio_stream_state_t *nextState,
+ int64_t timeoutNanoseconds)
+{
+
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->waitForStateChange(inputState, nextState, timeoutNanoseconds);
+}
+
+// ============================================================
+// Stream - non-blocking I/O
+// ============================================================
+
+AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream* stream,
+ void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ if (buffer == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ }
+ if (numFrames < 0) {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else if (numFrames == 0) {
+ return 0;
+ }
+
+ aaudio_result_t result = audioStream->read(buffer, numFrames, timeoutNanoseconds);
+
+ return result;
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream* stream,
+ const void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ if (buffer == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ }
+
+ // Don't allow writes when playing with a callback.
+ if (audioStream->getDataCallbackProc() != nullptr && audioStream->isActive()) {
+ ALOGE("Cannot write to a callback stream when running.");
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
+ if (numFrames < 0) {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ } else if (numFrames == 0) {
+ return 0;
+ }
+
+ aaudio_result_t result = audioStream->write(buffer, numFrames, timeoutNanoseconds);
+
+ return result;
+}
+
+// ============================================================
+// Stream - queries
+// ============================================================
+
+AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getSampleRate();
+}
+
+AAUDIO_API int32_t AAudioStream_getChannelCount(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getSamplesPerFrame();
+}
+
+AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getSamplesPerFrame();
+}
+
+AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getState();
+}
+
+AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getFormat();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* stream,
+ int32_t requestedFrames)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->setBufferSize(requestedFrames);
+}
+
+AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getBufferSize();
+}
+
+AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getDirection();
+}
+
+AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getFramesPerBurst();
+}
+
+AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getFramesPerDataCallback();
+}
+
+AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getBufferCapacity();
+}
+
+AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getXRunCount();
+}
+
+AAUDIO_API aaudio_performance_mode_t AAudioStream_getPerformanceMode(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getPerformanceMode();
+}
+
+AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getDeviceId();
+}
+
+AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getSharingMode();
+}
+
+AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getFramesWritten();
+}
+
+AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getFramesRead();
+}
+
+AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* stream,
+ clockid_t clockid,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ if (framePosition == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ } else if (timeNanoseconds == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ } else if (clockid != CLOCK_MONOTONIC && clockid != CLOCK_BOOTTIME) {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+
+ return audioStream->getTimestamp(clockid, framePosition, timeNanoseconds);
+}
+
+AAUDIO_API aaudio_policy_t AAudio_getMMapPolicy() {
+ return s_MMapPolicy;
+}
+
+AAUDIO_API aaudio_result_t AAudio_setMMapPolicy(aaudio_policy_t policy) {
+ aaudio_result_t result = AAUDIO_OK;
+ switch(policy) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_POLICY_NEVER:
+ case AAUDIO_POLICY_AUTO:
+ case AAUDIO_POLICY_ALWAYS:
+ s_MMapPolicy = policy;
+ break;
+ default:
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ break;
+ }
+ return result;
+}
+
+AAUDIO_API bool AAudioStream_isMMapUsed(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->isMMap();
+}
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
new file mode 100644
index 0000000..e1e3c55
--- /dev/null
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <atomic>
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AudioClock.h"
+
+using namespace aaudio;
+
+AudioStream::AudioStream()
+ : mCallbackEnabled(false)
+{
+ // mThread is a pthread_t of unknown size so we need memset.
+ memset(&mThread, 0, sizeof(mThread));
+ setPeriodNanoseconds(0);
+}
+
+aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
+{
+ // Copy parameters from the Builder because the Builder may be deleted after this call.
+ mSamplesPerFrame = builder.getSamplesPerFrame();
+ mSampleRate = builder.getSampleRate();
+ mDeviceId = builder.getDeviceId();
+ mFormat = builder.getFormat();
+ mSharingMode = builder.getSharingMode();
+ mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
+
+ mPerformanceMode = builder.getPerformanceMode();
+
+ // callbacks
+ mFramesPerDataCallback = builder.getFramesPerDataCallback();
+ mDataCallbackProc = builder.getDataCallbackProc();
+ mErrorCallbackProc = builder.getErrorCallbackProc();
+ mDataCallbackUserData = builder.getDataCallbackUserData();
+ mErrorCallbackUserData = builder.getErrorCallbackUserData();
+
+ // This is very helpful for debugging in the future. Please leave it in.
+ ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %d, dir = %s",
+ mSampleRate, mSamplesPerFrame, mFormat, mSharingMode,
+ (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
+ ALOGI("AudioStream::open() device = %d, perfMode = %d, callbackFrames = %d",
+ mDeviceId, mPerformanceMode, mFramesPerDataCallback);
+
+ // Check for values that are ridiculously out of range to prevent math overflow exploits.
+ // The service will do a better check.
+ if (mSamplesPerFrame < 0 || mSamplesPerFrame > 128) {
+ ALOGE("AudioStream::open(): samplesPerFrame out of range = %d", mSamplesPerFrame);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+
+ switch(mFormat) {
+ case AAUDIO_FORMAT_UNSPECIFIED:
+ case AAUDIO_FORMAT_PCM_I16:
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ break; // valid
+ default:
+ ALOGE("AudioStream::open(): audioFormat not valid = %d", mFormat);
+ return AAUDIO_ERROR_INVALID_FORMAT;
+ // break;
+ }
+
+ if (mSampleRate != AAUDIO_UNSPECIFIED && (mSampleRate < 8000 || mSampleRate > 1000000)) {
+ ALOGE("AudioStream::open(): mSampleRate out of range = %d", mSampleRate);
+ return AAUDIO_ERROR_INVALID_RATE;
+ }
+
+ switch(mPerformanceMode) {
+ case AAUDIO_PERFORMANCE_MODE_NONE:
+ case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+ case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+ break;
+ default:
+ ALOGE("AudioStream::open(): illegal performanceMode %d", mPerformanceMode);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+
+ return AAUDIO_OK;
+}
+
+AudioStream::~AudioStream() {
+ close();
+}
+
+aaudio_result_t AudioStream::waitForStateChange(aaudio_stream_state_t currentState,
+ aaudio_stream_state_t *nextState,
+ int64_t timeoutNanoseconds)
+{
+ aaudio_result_t result = updateStateWhileWaiting();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND; // arbitrary
+ aaudio_stream_state_t state = getState();
+ while (state == currentState && timeoutNanoseconds > 0) {
+ if (durationNanos > timeoutNanoseconds) {
+ durationNanos = timeoutNanoseconds;
+ }
+ AudioClock::sleepForNanos(durationNanos);
+ timeoutNanoseconds -= durationNanos;
+
+ aaudio_result_t result = updateStateWhileWaiting();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ state = getState();
+ }
+ if (nextState != nullptr) {
+ *nextState = state;
+ }
+ return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
+}
+
+// This registers the callback thread with the server before
+// passing control to the app. This gives the server an opportunity to boost
+// the thread's performance characteristics.
+void* AudioStream::wrapUserThread() {
+ void* procResult = nullptr;
+ mThreadRegistrationResult = registerThread();
+ if (mThreadRegistrationResult == AAUDIO_OK) {
+ // Run callback loop. This may take a very long time.
+ procResult = mThreadProc(mThreadArg);
+ mThreadRegistrationResult = unregisterThread();
+ }
+ return procResult;
+}
+
+// This is the entry point for the new thread created by createThread().
+// It converts the 'C' function call to a C++ method call.
+static void* AudioStream_internalThreadProc(void* threadArg) {
+ AudioStream *audioStream = (AudioStream *) threadArg;
+ return audioStream->wrapUserThread();
+}
+
+// This is not exposed in the API.
+// But it is still used internally to implement callbacks for MMAP mode.
+aaudio_result_t AudioStream::createThread(int64_t periodNanoseconds,
+ aaudio_audio_thread_proc_t threadProc,
+ void* threadArg)
+{
+ if (mHasThread) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ if (threadProc == nullptr) {
+ return AAUDIO_ERROR_NULL;
+ }
+ // Pass input parameters to the background thread.
+ mThreadProc = threadProc;
+ mThreadArg = threadArg;
+ setPeriodNanoseconds(periodNanoseconds);
+ int err = pthread_create(&mThread, nullptr, AudioStream_internalThreadProc, this);
+ if (err != 0) {
+ return AAudioConvert_androidToAAudioResult(-errno);
+ } else {
+ mHasThread = true;
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_result_t AudioStream::joinThread(void** returnArg, int64_t timeoutNanoseconds)
+{
+ if (!mHasThread) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+#if 0
+ // TODO implement equivalent of pthread_timedjoin_np()
+ struct timespec abstime;
+ int err = pthread_timedjoin_np(mThread, returnArg, &abstime);
+#else
+ int err = pthread_join(mThread, returnArg);
+#endif
+ mHasThread = false;
+ return err ? AAudioConvert_androidToAAudioResult(-errno) : mThreadRegistrationResult;
+}
+
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
new file mode 100644
index 0000000..39c9f9c
--- /dev/null
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AUDIOSTREAM_H
+#define AAUDIO_AUDIOSTREAM_H
+
+#include <atomic>
+#include <mutex>
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "utility/AAudioUtilities.h"
+#include "utility/MonotonicCounter.h"
+
+namespace aaudio {
+
+typedef void *(*aaudio_audio_thread_proc_t)(void *);
+
+class AudioStreamBuilder;
+
+/**
+ * AAudio audio stream.
+ */
+class AudioStream {
+public:
+
+ AudioStream();
+
+ virtual ~AudioStream();
+
+
+ // =========== Begin ABSTRACT methods ===========================
+
+ /* Asynchronous requests.
+ * Use waitForStateChange() to wait for completion.
+ */
+ virtual aaudio_result_t requestStart() = 0;
+ virtual aaudio_result_t requestPause() = 0;
+ virtual aaudio_result_t requestFlush() = 0;
+ virtual aaudio_result_t requestStop() = 0;
+
+ virtual aaudio_result_t getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) = 0;
+
+
+ /**
+ * Update state while in the middle of waitForStateChange()
+ * @return
+ */
+ virtual aaudio_result_t updateStateWhileWaiting() = 0;
+
+
+ // =========== End ABSTRACT methods ===========================
+
+ virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
+ aaudio_stream_state_t *nextState,
+ int64_t timeoutNanoseconds);
+
+ /**
+ * Open the stream using the parameters in the builder.
+ * Allocate the necessary resources.
+ */
+ virtual aaudio_result_t open(const AudioStreamBuilder& builder);
+
+ /**
+ * Close the stream and deallocate any resources from the open() call.
+ * It is safe to call close() multiple times.
+ */
+ virtual aaudio_result_t close() {
+ return AAUDIO_OK;
+ }
+
+ virtual aaudio_result_t setBufferSize(int32_t requestedFrames) {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual aaudio_result_t createThread(int64_t periodNanoseconds,
+ aaudio_audio_thread_proc_t threadProc,
+ void *threadArg);
+
+ aaudio_result_t joinThread(void **returnArg, int64_t timeoutNanoseconds);
+
+ virtual aaudio_result_t registerThread() {
+ return AAUDIO_OK;
+ }
+
+ virtual aaudio_result_t unregisterThread() {
+ return AAUDIO_OK;
+ }
+
+ /**
+ * Internal function used to call the audio thread passed by the user.
+ * It is unfortunately public because it needs to be called by a static 'C' function.
+ */
+ void* wrapUserThread();
+
+ // ============== Queries ===========================
+
+ aaudio_stream_state_t getState() const {
+ return mState;
+ }
+
+ virtual int32_t getBufferSize() const {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual int32_t getBufferCapacity() const {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual int32_t getFramesPerBurst() const {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual int32_t getXRunCount() const {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ bool isActive() const {
+ return mState == AAUDIO_STREAM_STATE_STARTING || mState == AAUDIO_STREAM_STATE_STARTED;
+ }
+
+ virtual bool isMMap() {
+ return false;
+ }
+
+ aaudio_result_t getSampleRate() const {
+ return mSampleRate;
+ }
+
+ aaudio_format_t getFormat() const {
+ return mFormat;
+ }
+
+ aaudio_result_t getSamplesPerFrame() const {
+ return mSamplesPerFrame;
+ }
+
+ virtual int32_t getPerformanceMode() const {
+ return mPerformanceMode;
+ }
+
+ void setPerformanceMode(aaudio_performance_mode_t performanceMode) {
+ mPerformanceMode = performanceMode;
+ }
+
+ int32_t getDeviceId() const {
+ return mDeviceId;
+ }
+
+ aaudio_sharing_mode_t getSharingMode() const {
+ return mSharingMode;
+ }
+
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
+ virtual aaudio_direction_t getDirection() const = 0;
+
+ /**
+ * This is only valid after setSamplesPerFrame() and setFormat() have been called.
+ */
+ int32_t getBytesPerFrame() const {
+ return mSamplesPerFrame * getBytesPerSample();
+ }
+
+ /**
+ * This is only valid after setFormat() has been called.
+ */
+ int32_t getBytesPerSample() const {
+ return AAudioConvert_formatToSizeInBytes(mFormat);
+ }
+
+ virtual int64_t getFramesWritten() {
+ return mFramesWritten.get();
+ }
+
+ virtual int64_t getFramesRead() {
+ return mFramesRead.get();
+ }
+
+ AAudioStream_dataCallback getDataCallbackProc() const {
+ return mDataCallbackProc;
+ }
+ AAudioStream_errorCallback getErrorCallbackProc() const {
+ return mErrorCallbackProc;
+ }
+
+ void *getDataCallbackUserData() const {
+ return mDataCallbackUserData;
+ }
+ void *getErrorCallbackUserData() const {
+ return mErrorCallbackUserData;
+ }
+
+ int32_t getFramesPerDataCallback() const {
+ return mFramesPerDataCallback;
+ }
+
+ bool isDataCallbackActive() {
+ return (mDataCallbackProc != nullptr) && isActive();
+ }
+
+ // ============== I/O ===========================
+ // A Stream will only implement read() or write() depending on its direction.
+ virtual aaudio_result_t write(const void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+ virtual aaudio_result_t read(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) {
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+ }
+
+protected:
+
+ virtual int64_t incrementFramesWritten(int32_t frames) {
+ return mFramesWritten.increment(frames);
+ }
+
+ virtual int64_t incrementFramesRead(int32_t frames) {
+ return mFramesRead.increment(frames);
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setSampleRate(int32_t sampleRate) {
+ mSampleRate = sampleRate;
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setSamplesPerFrame(int32_t samplesPerFrame) {
+ mSamplesPerFrame = samplesPerFrame;
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setSharingMode(aaudio_sharing_mode_t sharingMode) {
+ mSharingMode = sharingMode;
+ }
+
+ /**
+ * This should not be called after the open() call.
+ */
+ void setFormat(aaudio_format_t format) {
+ mFormat = format;
+ }
+
+ void setState(aaudio_stream_state_t state) {
+ mState = state;
+ }
+
+ void setDeviceId(int32_t deviceId) {
+ mDeviceId = deviceId;
+ }
+
+ std::mutex mStreamMutex;
+
+ std::atomic<bool> mCallbackEnabled;
+
+protected:
+ MonotonicCounter mFramesWritten;
+ MonotonicCounter mFramesRead;
+
+ void setPeriodNanoseconds(int64_t periodNanoseconds) {
+ mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
+ }
+
+ int64_t getPeriodNanoseconds() {
+ return mPeriodNanoseconds.load(std::memory_order_acquire);
+ }
+
+private:
+ // These do not change after open().
+ int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ int32_t mSampleRate = AAUDIO_UNSPECIFIED;
+ int32_t mDeviceId = AAUDIO_UNSPECIFIED;
+ aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ bool mSharingModeMatchRequired = false; // must match sharing mode requested
+ aaudio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+
+ aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+
+ // callback ----------------------------------
+
+ AAudioStream_dataCallback mDataCallbackProc = nullptr; // external callback functions
+ void *mDataCallbackUserData = nullptr;
+ int32_t mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+
+ AAudioStream_errorCallback mErrorCallbackProc = nullptr;
+ void *mErrorCallbackUserData = nullptr;
+
+ // background thread ----------------------------------
+ bool mHasThread = false;
+ pthread_t mThread; // initialized in constructor
+
+ // These are set by the application thread and then read by the audio pthread.
+ std::atomic<int64_t> mPeriodNanoseconds; // for tuning SCHED_FIFO threads
+ // TODO make atomic?
+ aaudio_audio_thread_proc_t mThreadProc = nullptr;
+ void* mThreadArg = nullptr;
+ aaudio_result_t mThreadRegistrationResult = AAUDIO_OK;
+
+
+};
+
+} /* namespace aaudio */
+
+#endif /* AAUDIO_AUDIOSTREAM_H */
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
new file mode 100644
index 0000000..4262f27
--- /dev/null
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <new>
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+
+#include "binding/AAudioBinderClient.h"
+#include "client/AudioStreamInternalCapture.h"
+#include "client/AudioStreamInternalPlay.h"
+#include "core/AudioStream.h"
+#include "core/AudioStreamBuilder.h"
+#include "legacy/AudioStreamRecord.h"
+#include "legacy/AudioStreamTrack.h"
+
+using namespace aaudio;
+
+#define AAUDIO_MMAP_POLICY_DEFAULT AAUDIO_POLICY_NEVER
+#define AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT AAUDIO_POLICY_NEVER
+
+/*
+ * AudioStreamBuilder
+ */
+AudioStreamBuilder::AudioStreamBuilder() {
+}
+
+AudioStreamBuilder::~AudioStreamBuilder() {
+}
+
+static aaudio_result_t builder_createStream(aaudio_direction_t direction,
+ aaudio_sharing_mode_t sharingMode,
+ bool tryMMap,
+ AudioStream **audioStreamPtr) {
+ *audioStreamPtr = nullptr;
+ aaudio_result_t result = AAUDIO_OK;
+
+ switch (direction) {
+
+ case AAUDIO_DIRECTION_INPUT:
+ if (tryMMap) {
+ *audioStreamPtr = new AudioStreamInternalCapture(AAudioBinderClient::getInstance(),
+ false);
+ } else {
+ *audioStreamPtr = new AudioStreamRecord();
+ }
+ break;
+
+ case AAUDIO_DIRECTION_OUTPUT:
+ if (tryMMap) {
+ *audioStreamPtr = new AudioStreamInternalPlay(AAudioBinderClient::getInstance(),
+ false);
+ } else {
+ *audioStreamPtr = new AudioStreamTrack();
+ }
+ break;
+
+ default:
+ ALOGE("AudioStreamBuilder(): bad direction = %d", direction);
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ return result;
+}
+
+// Try to open using MMAP path if that is allowed.
+// Fall back to Legacy path if MMAP not available.
+// Exact behavior is controlled by MMapPolicy.
+aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
+ AudioStream *audioStream = nullptr;
+ *streamPtr = nullptr;
+
+ // The API setting is the highest priority.
+ aaudio_policy_t mmapPolicy = AAudio_getMMapPolicy();
+ // If not specified then get from a system property.
+ if (mmapPolicy == AAUDIO_UNSPECIFIED) {
+ mmapPolicy = AAudioProperty_getMMapPolicy();
+ }
+ // If still not specified then use the default.
+ if (mmapPolicy == AAUDIO_UNSPECIFIED) {
+ mmapPolicy = AAUDIO_MMAP_POLICY_DEFAULT;
+ }
+
+ int32_t mapExclusivePolicy = AAudioProperty_getMMapExclusivePolicy();
+ if (mapExclusivePolicy == AAUDIO_UNSPECIFIED) {
+ mapExclusivePolicy = AAUDIO_MMAP_EXCLUSIVE_POLICY_DEFAULT;
+ }
+ ALOGD("AudioStreamBuilder(): mmapPolicy = %d, mapExclusivePolicy = %d",
+ mmapPolicy, mapExclusivePolicy);
+
+ aaudio_sharing_mode_t sharingMode = getSharingMode();
+ if ((sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE)
+ && (mapExclusivePolicy == AAUDIO_POLICY_NEVER)) {
+ ALOGW("AudioStreamBuilder(): EXCLUSIVE sharing mode not supported. Use SHARED.");
+ sharingMode = AAUDIO_SHARING_MODE_SHARED;
+ setSharingMode(sharingMode);
+ }
+
+ bool allowMMap = mmapPolicy != AAUDIO_POLICY_NEVER;
+ bool allowLegacy = mmapPolicy != AAUDIO_POLICY_ALWAYS;
+
+ aaudio_result_t result = builder_createStream(getDirection(), sharingMode,
+ allowMMap, &audioStream);
+ if (result == AAUDIO_OK) {
+ // Open the stream using the parameters from the builder.
+ result = audioStream->open(*this);
+ if (result == AAUDIO_OK) {
+ *streamPtr = audioStream;
+ } else {
+ bool isMMap = audioStream->isMMap();
+ delete audioStream;
+ audioStream = nullptr;
+
+ if (isMMap && allowLegacy) {
+ ALOGD("AudioStreamBuilder.build() MMAP stream did not open so try Legacy path");
+ // If MMAP stream failed to open then TRY using a legacy stream.
+ result = builder_createStream(getDirection(), sharingMode,
+ false, &audioStream);
+ if (result == AAUDIO_OK) {
+ result = audioStream->open(*this);
+ if (result == AAUDIO_OK) {
+ *streamPtr = audioStream;
+ } else {
+ delete audioStream;
+ }
+ }
+ }
+ }
+ }
+
+ return result;
+}
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
new file mode 100644
index 0000000..fd416c4
--- /dev/null
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AUDIO_STREAM_BUILDER_H
+#define AAUDIO_AUDIO_STREAM_BUILDER_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+
+#include "AudioStream.h"
+
+namespace aaudio {
+
+/**
+ * Factory class for an AudioStream.
+ */
+class AudioStreamBuilder {
+public:
+ AudioStreamBuilder();
+
+ ~AudioStreamBuilder();
+
+ int getSamplesPerFrame() const {
+ return mSamplesPerFrame;
+ }
+
+ /**
+ * This is also known as channelCount.
+ */
+ AudioStreamBuilder* setSamplesPerFrame(int samplesPerFrame) {
+ mSamplesPerFrame = samplesPerFrame;
+ return this;
+ }
+
+ aaudio_direction_t getDirection() const {
+ return mDirection;
+ }
+
+ AudioStreamBuilder* setDirection(aaudio_direction_t direction) {
+ mDirection = direction;
+ return this;
+ }
+
+ int32_t getSampleRate() const {
+ return mSampleRate;
+ }
+
+ AudioStreamBuilder* setSampleRate(int32_t sampleRate) {
+ mSampleRate = sampleRate;
+ return this;
+ }
+
+ aaudio_format_t getFormat() const {
+ return mFormat;
+ }
+
+ AudioStreamBuilder *setFormat(aaudio_format_t format) {
+ mFormat = format;
+ return this;
+ }
+
+ aaudio_sharing_mode_t getSharingMode() const {
+ return mSharingMode;
+ }
+
+ AudioStreamBuilder* setSharingMode(aaudio_sharing_mode_t sharingMode) {
+ mSharingMode = sharingMode;
+ return this;
+ }
+
+ bool isSharingModeMatchRequired() const {
+ return mSharingModeMatchRequired;
+ }
+
+ AudioStreamBuilder* setSharingModeMatchRequired(bool required) {
+ mSharingModeMatchRequired = required;
+ return this;
+ }
+
+ int32_t getBufferCapacity() const {
+ return mBufferCapacity;
+ }
+
+ AudioStreamBuilder* setBufferCapacity(int32_t frames) {
+ mBufferCapacity = frames;
+ return this;
+ }
+
+ int32_t getPerformanceMode() const {
+ return mPerformanceMode;
+ }
+
+ AudioStreamBuilder* setPerformanceMode(aaudio_performance_mode_t performanceMode) {
+ mPerformanceMode = performanceMode;
+ return this;
+ }
+
+ int32_t getDeviceId() const {
+ return mDeviceId;
+ }
+
+ AudioStreamBuilder* setDeviceId(int32_t deviceId) {
+ mDeviceId = deviceId;
+ return this;
+ }
+
+ AAudioStream_dataCallback getDataCallbackProc() const {
+ return mDataCallbackProc;
+ }
+
+ AudioStreamBuilder* setDataCallbackProc(AAudioStream_dataCallback proc) {
+ mDataCallbackProc = proc;
+ return this;
+ }
+
+ void *getDataCallbackUserData() const {
+ return mDataCallbackUserData;
+ }
+
+ AudioStreamBuilder* setDataCallbackUserData(void *userData) {
+ mDataCallbackUserData = userData;
+ return this;
+ }
+
+ AAudioStream_errorCallback getErrorCallbackProc() const {
+ return mErrorCallbackProc;
+ }
+
+ AudioStreamBuilder* setErrorCallbackProc(AAudioStream_errorCallback proc) {
+ mErrorCallbackProc = proc;
+ return this;
+ }
+
+ AudioStreamBuilder* setErrorCallbackUserData(void *userData) {
+ mErrorCallbackUserData = userData;
+ return this;
+ }
+
+ void *getErrorCallbackUserData() const {
+ return mErrorCallbackUserData;
+ }
+
+ int32_t getFramesPerDataCallback() const {
+ return mFramesPerDataCallback;
+ }
+
+ AudioStreamBuilder* setFramesPerDataCallback(int32_t sizeInFrames) {
+ mFramesPerDataCallback = sizeInFrames;
+ return this;
+ }
+
+ aaudio_result_t build(AudioStream **streamPtr);
+
+private:
+ int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ int32_t mSampleRate = AAUDIO_UNSPECIFIED;
+ int32_t mDeviceId = AAUDIO_UNSPECIFIED;
+ aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ bool mSharingModeMatchRequired = false; // must match sharing mode requested
+ aaudio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
+ int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
+ aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+
+ AAudioStream_dataCallback mDataCallbackProc = nullptr; // external callback functions
+ void *mDataCallbackUserData = nullptr;
+ int32_t mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+
+ AAudioStream_errorCallback mErrorCallbackProc = nullptr;
+ void *mErrorCallbackUserData = nullptr;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AUDIO_STREAM_BUILDER_H
diff --git a/media/libaaudio/src/core/README.md b/media/libaaudio/src/core/README.md
new file mode 100644
index 0000000..5ce41f3
--- /dev/null
+++ b/media/libaaudio/src/core/README.md
@@ -0,0 +1,2 @@
+The core folder contains the essential AAudio files common to all implementations.
+The AAudioAudio.cpp contains the 'C' API.
diff --git a/media/libaaudio/src/core/VersionExperiment.txt b/media/libaaudio/src/core/VersionExperiment.txt
new file mode 100644
index 0000000..071239b
--- /dev/null
+++ b/media/libaaudio/src/core/VersionExperiment.txt
@@ -0,0 +1,55 @@
+
+// TODO Experiment with versioning. This may be removed or changed dramatically.
+// Please ignore for now. Do not review.
+#define OBOE_VERSION_EXPERIMENT 0
+#if OBOE_VERSION_EXPERIMENT
+
+#define OBOE_EARLIEST_SUPPORTED_VERSION 1
+#define OBOE_CURRENT_VERSION 2
+
+typedef struct OboeInterface_s {
+ int32_t size; // do not use size_t because its size can vary
+ int32_t version;
+ int32_t reserved1;
+ void * reserved2;
+ oboe_result_t (*createStreamBuilder)(OboeStreamBuilder *);
+} OboeInterface_t;
+
+OboeInterface_t s_oboe_template = {
+ .size = sizeof(OboeInterface_t),
+ .version = OBOE_CURRENT_VERSION,
+ .reserved1 = 0,
+ .reserved2 = NULL,
+ .createStreamBuilder = Oboe_createStreamBuilder
+};
+
+oboe_result_t Oboe_Unimplemented(OboeInterface_t *oboe) {
+ (void) oboe;
+ return OBOE_ERROR_UNIMPLEMENTED;
+}
+
+typedef oboe_result_t (*OboeFunction_t)(OboeInterface_t *oboe);
+
+int32_t Oboe_Initialize(OboeInterface_t *oboe, uint32_t flags) {
+ if (oboe->version < OBOE_EARLIEST_SUPPORTED_VERSION) {
+ return OBOE_ERROR_INCOMPATIBLE;
+ }
+ // Fill in callers vector table.
+ uint8_t *start = (uint8_t*)&oboe->reserved1;
+ uint8_t *end;
+ if (oboe->size <= s_oboe_template.size) {
+ end = ((uint8_t *)oboe) + oboe->size;
+ } else {
+ end = ((uint8_t *)oboe) + s_oboe_template.size;
+ // Assume the rest of the structure is vectors.
+ // Point them all to OboeInternal_Unimplemented()
+ // Point to first vector past end of the known structure.
+ OboeFunction_t *next = (OboeFunction_t*)end;
+ while ((((uint8_t *)next) - ((uint8_t *)oboe)) < oboe->size) {
+ *next++ = Oboe_Unimplemented;
+ }
+ }
+ memcpy(&oboe->reserved1, &s_oboe_template.reserved1, end - start);
+ return OBOE_OK;
+}
+#endif /* OBOE_VERSION_EXPERIMENT -------------------------- */
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
new file mode 100644
index 0000000..6b4a772
--- /dev/null
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstring>
+#include <unistd.h>
+
+
+#define LOG_TAG "FifoBuffer"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "FifoControllerBase.h"
+#include "FifoController.h"
+#include "FifoControllerIndirect.h"
+#include "FifoBuffer.h"
+
+using namespace android; // TODO just import names needed
+
+FifoBuffer::FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
+ : mFrameCapacity(capacityInFrames)
+ , mBytesPerFrame(bytesPerFrame)
+ , mStorage(nullptr)
+ , mFramesReadCount(0)
+ , mFramesUnderrunCount(0)
+ , mUnderrunCount(0)
+{
+ // TODO Handle possible failures to allocate. Move out of constructor?
+ mFifo = new FifoController(capacityInFrames, capacityInFrames);
+ // allocate buffer
+ int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
+ mStorage = new uint8_t[bytesPerBuffer];
+ mStorageOwned = true;
+ ALOGD("FifoBuffer: capacityInFrames = %d, bytesPerFrame = %d",
+ capacityInFrames, bytesPerFrame);
+}
+
+FifoBuffer::FifoBuffer( int32_t bytesPerFrame,
+ fifo_frames_t capacityInFrames,
+ fifo_counter_t * readIndexAddress,
+ fifo_counter_t * writeIndexAddress,
+ void * dataStorageAddress
+ )
+ : mFrameCapacity(capacityInFrames)
+ , mBytesPerFrame(bytesPerFrame)
+ , mStorage(static_cast<uint8_t *>(dataStorageAddress))
+ , mFramesReadCount(0)
+ , mFramesUnderrunCount(0)
+ , mUnderrunCount(0)
+{
+ mFifo = new FifoControllerIndirect(capacityInFrames,
+ capacityInFrames,
+ readIndexAddress,
+ writeIndexAddress);
+ mStorageOwned = false;
+}
+
+FifoBuffer::~FifoBuffer() {
+ if (mStorageOwned) {
+ delete[] mStorage;
+ }
+ delete mFifo;
+}
+
+
+int32_t FifoBuffer::convertFramesToBytes(fifo_frames_t frames) {
+ return frames * mBytesPerFrame;
+}
+
+void FifoBuffer::fillWrappingBuffer(WrappingBuffer *wrappingBuffer,
+ int32_t framesAvailable,
+ int32_t startIndex) {
+ wrappingBuffer->data[1] = nullptr;
+ wrappingBuffer->numFrames[1] = 0;
+ if (framesAvailable > 0) {
+
+ uint8_t *source = &mStorage[convertFramesToBytes(startIndex)];
+ // Does the available data cross the end of the FIFO?
+ if ((startIndex + framesAvailable) > mFrameCapacity) {
+ wrappingBuffer->data[0] = source;
+ wrappingBuffer->numFrames[0] = mFrameCapacity - startIndex;
+ wrappingBuffer->data[1] = &mStorage[0];
+ wrappingBuffer->numFrames[1] = mFrameCapacity - startIndex;
+
+ } else {
+ wrappingBuffer->data[0] = source;
+ wrappingBuffer->numFrames[0] = framesAvailable;
+ }
+ } else {
+ wrappingBuffer->data[0] = nullptr;
+ wrappingBuffer->numFrames[0] = 0;
+ }
+
+}
+
+void FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
+ fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
+ fifo_frames_t startIndex = mFifo->getReadIndex();
+ fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+}
+
+void FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
+ fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
+ fifo_frames_t startIndex = mFifo->getWriteIndex();
+ fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+}
+
+fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
+ WrappingBuffer wrappingBuffer;
+ uint8_t *destination = (uint8_t *) buffer;
+ fifo_frames_t framesLeft = numFrames;
+
+ getFullDataAvailable(&wrappingBuffer);
+
+ // Read data in one or two parts.
+ int partIndex = 0;
+ while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+ fifo_frames_t framesToRead = framesLeft;
+ fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable > 0) {
+ if (framesToRead > framesAvailable) {
+ framesToRead = framesAvailable;
+ }
+ int32_t numBytes = convertFramesToBytes(framesToRead);
+ memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
+
+ destination += numBytes;
+ framesLeft -= framesToRead;
+ } else {
+ break;
+ }
+ partIndex++;
+ }
+ fifo_frames_t framesRead = numFrames - framesLeft;
+ mFifo->advanceReadIndex(framesRead);
+ return framesRead;
+}
+
+fifo_frames_t FifoBuffer::write(const void *buffer, fifo_frames_t numFrames) {
+ WrappingBuffer wrappingBuffer;
+ uint8_t *source = (uint8_t *) buffer;
+ fifo_frames_t framesLeft = numFrames;
+
+ getEmptyRoomAvailable(&wrappingBuffer);
+
+ // Read data in one or two parts.
+ int partIndex = 0;
+ while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+ fifo_frames_t framesToWrite = framesLeft;
+ fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable > 0) {
+ if (framesToWrite > framesAvailable) {
+ framesToWrite = framesAvailable;
+ }
+ int32_t numBytes = convertFramesToBytes(framesToWrite);
+ memcpy(wrappingBuffer.data[partIndex], source, numBytes);
+
+ source += numBytes;
+ framesLeft -= framesToWrite;
+ } else {
+ break;
+ }
+ partIndex++;
+ }
+ fifo_frames_t framesWritten = numFrames - framesLeft;
+ mFifo->advanceWriteIndex(framesWritten);
+ return framesWritten;
+}
+
+fifo_frames_t FifoBuffer::readNow(void *buffer, fifo_frames_t numFrames) {
+ mLastReadSize = numFrames;
+ fifo_frames_t framesLeft = numFrames;
+ fifo_frames_t framesRead = read(buffer, numFrames);
+ framesLeft -= framesRead;
+ mFramesReadCount += framesRead;
+ mFramesUnderrunCount += framesLeft;
+ // Zero out any samples we could not set.
+ if (framesLeft > 0) {
+ mUnderrunCount++;
+ int32_t bytesToZero = convertFramesToBytes(framesLeft);
+ memset(buffer, 0, bytesToZero);
+ }
+
+ return framesRead;
+}
+
+fifo_frames_t FifoBuffer::getThreshold() {
+ return mFifo->getThreshold();
+}
+
+void FifoBuffer::setThreshold(fifo_frames_t threshold) {
+ mFifo->setThreshold(threshold);
+}
+
+fifo_frames_t FifoBuffer::getBufferCapacityInFrames() {
+ return mFifo->getCapacity();
+}
+
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
new file mode 100644
index 0000000..2b262a1
--- /dev/null
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_BUFFER_H
+#define FIFO_FIFO_BUFFER_H
+
+#include <stdint.h>
+
+#include "FifoControllerBase.h"
+
+namespace android {
+
+/**
+ * Structure that represents a region in a circular buffer that might be at the
+ * end of the array and split in two.
+ */
+struct WrappingBuffer {
+ enum {
+ SIZE = 2
+ };
+ void *data[SIZE];
+ int32_t numFrames[SIZE];
+};
+
+class FifoBuffer {
+public:
+ FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames);
+
+ FifoBuffer(int32_t bytesPerFrame,
+ fifo_frames_t capacityInFrames,
+ fifo_counter_t *readCounterAddress,
+ fifo_counter_t *writeCounterAddress,
+ void *dataStorageAddress);
+
+ ~FifoBuffer();
+
+ int32_t convertFramesToBytes(fifo_frames_t frames);
+
+ fifo_frames_t read(void *destination, fifo_frames_t framesToRead);
+
+ fifo_frames_t write(const void *source, fifo_frames_t framesToWrite);
+
+ fifo_frames_t getThreshold();
+
+ void setThreshold(fifo_frames_t threshold);
+
+ fifo_frames_t getBufferCapacityInFrames();
+
+ /**
+ * Return pointer to available full frames in data1 and set size in numFrames1.
+ * if the data is split across the end of the FIFO then set data2 and numFrames2.
+ * Other wise set them to null
+ * @param wrappingBuffer
+ */
+ void getFullDataAvailable(WrappingBuffer *wrappingBuffer);
+
+ /**
+ * Return pointer to available empty frames in data1 and set size in numFrames1.
+ * if the room is split across the end of the FIFO then set data2 and numFrames2.
+ * Other wise set them to null
+ * @param wrappingBuffer
+ */
+ void getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
+
+ /**
+ * Copy data from the FIFO into the buffer.
+ * @param buffer
+ * @param numFrames
+ * @return
+ */
+ fifo_frames_t readNow(void *buffer, fifo_frames_t numFrames);
+
+ int64_t getNextReadTime(int32_t frameRate);
+
+ int32_t getUnderrunCount() const { return mUnderrunCount; }
+
+ FifoControllerBase *getFifoControllerBase() { return mFifo; }
+
+ int32_t getBytesPerFrame() {
+ return mBytesPerFrame;
+ }
+
+ fifo_counter_t getReadCounter() {
+ return mFifo->getReadCounter();
+ }
+
+ void setReadCounter(fifo_counter_t n) {
+ mFifo->setReadCounter(n);
+ }
+
+ fifo_counter_t getWriteCounter() {
+ return mFifo->getWriteCounter();
+ }
+
+ void setWriteCounter(fifo_counter_t n) {
+ mFifo->setWriteCounter(n);
+ }
+
+private:
+
+ void fillWrappingBuffer(WrappingBuffer *wrappingBuffer,
+ int32_t framesAvailable, int32_t startIndex);
+
+ const fifo_frames_t mFrameCapacity;
+ const int32_t mBytesPerFrame;
+ uint8_t *mStorage;
+ bool mStorageOwned; // did this object allocate the storage?
+ FifoControllerBase *mFifo;
+ fifo_counter_t mFramesReadCount;
+ fifo_counter_t mFramesUnderrunCount;
+ int32_t mUnderrunCount; // need? just use frames
+ int32_t mLastReadSize;
+};
+
+} // android
+
+#endif //FIFO_FIFO_BUFFER_H
diff --git a/media/libaaudio/src/fifo/FifoController.h b/media/libaaudio/src/fifo/FifoController.h
new file mode 100644
index 0000000..79d98a1
--- /dev/null
+++ b/media/libaaudio/src/fifo/FifoController.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_H
+#define FIFO_FIFO_CONTROLLER_H
+
+#include <stdint.h>
+#include <atomic>
+
+#include "FifoControllerBase.h"
+
+namespace android {
+
+/**
+ * A FIFO with counters contained in the class.
+ */
+class FifoController : public FifoControllerBase
+{
+public:
+ FifoController(fifo_counter_t bufferSize, fifo_counter_t threshold)
+ : FifoControllerBase(bufferSize, threshold)
+ , mReadCounter(0)
+ , mWriteCounter(0)
+ {}
+
+ virtual ~FifoController() {}
+
+ // TODO review use of memory barriers, probably incorrect
+ virtual fifo_counter_t getReadCounter() override {
+ return mReadCounter.load(std::memory_order_acquire);
+ }
+ virtual void setReadCounter(fifo_counter_t n) override {
+ mReadCounter.store(n, std::memory_order_release);
+ }
+ virtual fifo_counter_t getWriteCounter() override {
+ return mWriteCounter.load(std::memory_order_acquire);
+ }
+ virtual void setWriteCounter(fifo_counter_t n) override {
+ mWriteCounter.store(n, std::memory_order_release);
+ }
+
+private:
+ std::atomic<fifo_counter_t> mReadCounter;
+ std::atomic<fifo_counter_t> mWriteCounter;
+};
+
+} // android
+
+#endif //FIFO_FIFO_CONTROLLER_H
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.cpp b/media/libaaudio/src/fifo/FifoControllerBase.cpp
new file mode 100644
index 0000000..14a2be1
--- /dev/null
+++ b/media/libaaudio/src/fifo/FifoControllerBase.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FifoControllerBase"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include "FifoControllerBase.h"
+
+using namespace android; // TODO just import names needed
+
+FifoControllerBase::FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold)
+ : mCapacity(capacity)
+ , mThreshold(threshold)
+{
+}
+
+FifoControllerBase::~FifoControllerBase() {
+}
+
+fifo_frames_t FifoControllerBase::getFullFramesAvailable() {
+ return (fifo_frames_t) (getWriteCounter() - getReadCounter());
+}
+
+fifo_frames_t FifoControllerBase::getReadIndex() {
+ // % works with non-power of two sizes
+ return (fifo_frames_t) (getReadCounter() % mCapacity);
+}
+
+void FifoControllerBase::advanceReadIndex(fifo_frames_t numFrames) {
+ setReadCounter(getReadCounter() + numFrames);
+}
+
+fifo_frames_t FifoControllerBase::getEmptyFramesAvailable() {
+ return (int32_t)(mThreshold - getFullFramesAvailable());
+}
+
+fifo_frames_t FifoControllerBase::getWriteIndex() {
+ // % works with non-power of two sizes
+ return (fifo_frames_t) (getWriteCounter() % mCapacity);
+}
+
+void FifoControllerBase::advanceWriteIndex(fifo_frames_t numFrames) {
+ setWriteCounter(getWriteCounter() + numFrames);
+}
+
+void FifoControllerBase::setThreshold(fifo_frames_t threshold) {
+ mThreshold = threshold;
+}
diff --git a/media/libaaudio/src/fifo/FifoControllerBase.h b/media/libaaudio/src/fifo/FifoControllerBase.h
new file mode 100644
index 0000000..64af777
--- /dev/null
+++ b/media/libaaudio/src/fifo/FifoControllerBase.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_BASE_H
+#define FIFO_FIFO_CONTROLLER_BASE_H
+
+#include <stdint.h>
+
+namespace android {
+
+typedef int64_t fifo_counter_t;
+typedef int32_t fifo_frames_t;
+
+/**
+ * Manage the read/write indices of a circular buffer.
+ *
+ * The caller is responsible for reading and writing the actual data.
+ * Note that the span of available frames may not be contiguous. They
+ * may wrap around from the end to the beginning of the buffer. In that
+ * case the data must be read or written in at least two blocks of frames.
+ *
+ */
+class FifoControllerBase {
+
+public:
+ /**
+ * Constructor for FifoControllerBase
+ * @param capacity Total size of the circular buffer in frames.
+ * @param threshold Number of frames to fill. Must be less than capacity.
+ */
+ FifoControllerBase(fifo_frames_t capacity, fifo_frames_t threshold);
+
+ virtual ~FifoControllerBase();
+
+ // Abstract methods to be implemented in subclasses.
+ /**
+ * @return Counter used by the reader of the FIFO.
+ */
+ virtual fifo_counter_t getReadCounter() = 0;
+
+ /**
+ * This is normally only used internally.
+ * @param count Number of frames that have been read.
+ */
+ virtual void setReadCounter(fifo_counter_t count) = 0;
+
+ /**
+ * @return Counter used by the reader of the FIFO.
+ */
+ virtual fifo_counter_t getWriteCounter() = 0;
+
+ /**
+ * This is normally only used internally.
+ * @param count Number of frames that have been read.
+ */
+ virtual void setWriteCounter(fifo_counter_t count) = 0;
+
+ /**
+ * This may be negative if an unthrottled reader has read beyond the available data.
+ * @return number of valid frames available to read. Never read more than this.
+ */
+ fifo_frames_t getFullFramesAvailable();
+
+ /**
+ * The index in a circular buffer of the next frame to read.
+ */
+ fifo_frames_t getReadIndex();
+
+ /**
+ * @param numFrames number of frames to advance the read index
+ */
+ void advanceReadIndex(fifo_frames_t numFrames);
+
+ /**
+ * @return number of frames that can be written. Never write more than this.
+ */
+ fifo_frames_t getEmptyFramesAvailable();
+
+ /**
+ * The index in a circular buffer of the next frame to write.
+ */
+ fifo_frames_t getWriteIndex();
+
+ /**
+ * @param numFrames number of frames to advance the write index
+ */
+ void advanceWriteIndex(fifo_frames_t numFrames);
+
+ /**
+ * You can request that the buffer not be filled above a maximum
+ * number of frames.
+ * @param threshold effective size of the buffer
+ */
+ void setThreshold(fifo_frames_t threshold);
+
+ fifo_frames_t getThreshold() const {
+ return mThreshold;
+ }
+
+ fifo_frames_t getCapacity() const {
+ return mCapacity;
+ }
+
+
+private:
+ fifo_frames_t mCapacity;
+ fifo_frames_t mThreshold;
+};
+
+} // android
+
+#endif // FIFO_FIFO_CONTROLLER_BASE_H
diff --git a/media/libaaudio/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
new file mode 100644
index 0000000..5832d9c
--- /dev/null
+++ b/media/libaaudio/src/fifo/FifoControllerIndirect.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FIFO_FIFO_CONTROLLER_INDIRECT_H
+#define FIFO_FIFO_CONTROLLER_INDIRECT_H
+
+#include <stdint.h>
+#include <atomic>
+
+#include "FifoControllerBase.h"
+
+namespace android {
+
+/**
+ * A FifoControllerBase with counters external to the class.
+ *
+ * The actual copunters may be stored in separate regions of shared memory
+ * with different access rights.
+ */
+class FifoControllerIndirect : public FifoControllerBase {
+
+public:
+ FifoControllerIndirect(fifo_frames_t capacity,
+ fifo_frames_t threshold,
+ fifo_counter_t * readCounterAddress,
+ fifo_counter_t * writeCounterAddress)
+ : FifoControllerBase(capacity, threshold)
+ , mReadCounterAddress((std::atomic<fifo_counter_t> *) readCounterAddress)
+ , mWriteCounterAddress((std::atomic<fifo_counter_t> *) writeCounterAddress)
+ {
+ setReadCounter(0);
+ setWriteCounter(0);
+ }
+ virtual ~FifoControllerIndirect() {};
+
+ // TODO review use of memory barriers, probably incorrect
+ virtual fifo_counter_t getReadCounter() override {
+ return mReadCounterAddress->load(std::memory_order_acquire);
+ }
+
+ virtual void setReadCounter(fifo_counter_t count) override {
+ mReadCounterAddress->store(count, std::memory_order_release);
+ }
+
+ virtual fifo_counter_t getWriteCounter() override {
+ return mWriteCounterAddress->load(std::memory_order_acquire);
+ }
+
+ virtual void setWriteCounter(fifo_counter_t count) override {
+ mWriteCounterAddress->store(count, std::memory_order_release);
+ }
+
+private:
+ std::atomic<fifo_counter_t> * mReadCounterAddress;
+ std::atomic<fifo_counter_t> * mWriteCounterAddress;
+};
+
+} // android
+
+#endif //FIFO_FIFO_CONTROLLER_INDIRECT_H
diff --git a/media/libaaudio/src/fifo/README.md b/media/libaaudio/src/fifo/README.md
new file mode 100644
index 0000000..5d0c471
--- /dev/null
+++ b/media/libaaudio/src/fifo/README.md
@@ -0,0 +1,9 @@
+Simple atomic FIFO for passing data between threads or processes.
+This does not require mutexes.
+
+One thread modifies the readCounter and the other thread modifies the writeCounter.
+
+TODO The internal low-level implementation might be merged in some form with audio_utils fifo
+and/or FMQ [after confirming that requirements are met].
+The higher-levels parts related to AAudio use of the FIFO such as API, fds, relative
+location of indices and data buffer, mapping, allocation of memmory will probably be kept as-is.
diff --git a/media/libaaudio/src/legacy/AAudioLegacy.h b/media/libaaudio/src/legacy/AAudioLegacy.h
new file mode 100644
index 0000000..2ceb7d4
--- /dev/null
+++ b/media/libaaudio/src/legacy/AAudioLegacy.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_LEGACY_H
+#define AAUDIO_LEGACY_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+/**
+ * Common code for legacy classes.
+ */
+
+/* AudioTrack uses a 32-bit frame counter that can wrap around in about a day. */
+typedef uint32_t aaudio_wrapping_frames_t;
+
+#endif /* AAUDIO_LEGACY_H */
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
new file mode 100644
index 0000000..dd5e3c0
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioStreamLegacy"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <utils/String16.h>
+#include <media/AudioTrack.h>
+#include <aaudio/AAudio.h>
+
+#include "core/AudioStream.h"
+#include "legacy/AudioStreamLegacy.h"
+
+using namespace android;
+using namespace aaudio;
+
+AudioStreamLegacy::AudioStreamLegacy()
+ : AudioStream(), mDeviceCallback(new StreamDeviceCallback(this)) {
+}
+
+AudioStreamLegacy::~AudioStreamLegacy() {
+}
+
+// Called from AudioTrack.cpp or AudioRecord.cpp
+static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
+ AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
+ streamLegacy->processCallback(event, info);
+}
+
+aaudio_legacy_callback_t AudioStreamLegacy::getLegacyCallback() {
+ return AudioStreamLegacy_callback;
+}
+
+// Implement FixedBlockProcessor
+int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
+ int32_t frameCount = numBytes / getBytesPerFrame();
+ // Call using the AAudio callback interface.
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ return (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ buffer,
+ frameCount);
+}
+
+void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
+ aaudio_data_callback_result_t callbackResult;
+
+ if (!mCallbackEnabled.load()) {
+ return;
+ }
+
+ switch (opcode) {
+ case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
+ if (getState() != AAUDIO_STREAM_STATE_DISCONNECTED) {
+ // Note that this code assumes an AudioTrack::Buffer is the same as
+ // AudioRecord::Buffer
+ // TODO define our own AudioBuffer and pass it from the subclasses.
+ AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+ if (audioBuffer->frameCount == 0) return;
+
+ // If the caller specified an exact size then use a block size adapter.
+ if (mBlockAdapter != nullptr) {
+ int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
+ callbackResult = mBlockAdapter->processVariableBlock(
+ (uint8_t *) audioBuffer->raw, byteCount);
+ } else {
+ // Call using the AAudio callback interface.
+ callbackResult = (*getDataCallbackProc())(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ audioBuffer->raw,
+ audioBuffer->frameCount
+ );
+ }
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+ audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
+ incrementClientFrameCounter(audioBuffer->frameCount);
+ } else {
+ audioBuffer->size = 0;
+ }
+ break;
+ }
+ }
+ /// FALL THROUGH
+
+ // Stream got rerouted so we disconnect.
+ case AAUDIO_CALLBACK_OPERATION_DISCONNECTED: {
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ ALOGD("processCallbackCommon() stream disconnected");
+ if (getErrorCallbackProc() != nullptr) {
+ (*getErrorCallbackProc())(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ AAUDIO_ERROR_DISCONNECTED
+ );
+ }
+ mCallbackEnabled.store(false);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+aaudio_result_t AudioStreamLegacy::getBestTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds,
+ ExtendedTimestamp *extendedTimestamp) {
+ int timebase;
+ switch (clockId) {
+ case CLOCK_BOOTTIME:
+ timebase = ExtendedTimestamp::TIMEBASE_BOOTTIME;
+ break;
+ case CLOCK_MONOTONIC:
+ timebase = ExtendedTimestamp::TIMEBASE_MONOTONIC;
+ break;
+ default:
+ ALOGE("getTimestamp() - Unrecognized clock type %d", (int) clockId);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ break;
+ }
+ status_t status = extendedTimestamp->getBestTimestamp(framePosition, timeNanoseconds, timebase);
+ return AAudioConvert_androidToAAudioResult(status);
+}
+
+void AudioStreamLegacy::onAudioDeviceUpdate(audio_port_handle_t deviceId)
+{
+ ALOGD("onAudioDeviceUpdate() deviceId %d", (int)deviceId);
+ if (getDeviceId() != AAUDIO_UNSPECIFIED && getDeviceId() != deviceId &&
+ getState() != AAUDIO_STREAM_STATE_DISCONNECTED) {
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ // if we have a data callback and the stream is active, send the error callback from
+ // data callback thread when it sees the DISCONNECTED state
+ if (!isDataCallbackActive() && getErrorCallbackProc() != nullptr) {
+ (*getErrorCallbackProc())(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ AAUDIO_ERROR_DISCONNECTED
+ );
+ }
+ }
+ setDeviceId(deviceId);
+}
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
new file mode 100644
index 0000000..0ded8e1
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIO_STREAM_LEGACY_H
+#define LEGACY_AUDIO_STREAM_LEGACY_H
+
+#include <media/AudioTimestamp.h>
+#include <media/AudioSystem.h>
+
+#include <aaudio/AAudio.h>
+
+#include "AudioStream.h"
+#include "AAudioLegacy.h"
+#include "utility/FixedBlockAdapter.h"
+
+namespace aaudio {
+
+
+typedef void (*aaudio_legacy_callback_t)(int event, void* user, void *info);
+
+enum {
+ /**
+ * Request that the callback function should fill the data buffer of an output stream,
+ * or process the data of an input stream.
+ * The address parameter passed to the callback function will point to a data buffer.
+ * For an input stream, the data is read-only.
+ * The value1 parameter will be the number of frames.
+ * The value2 parameter is reserved and will be set to zero.
+ * The callback should return AAUDIO_CALLBACK_RESULT_CONTINUE or AAUDIO_CALLBACK_RESULT_STOP.
+ */
+ AAUDIO_CALLBACK_OPERATION_PROCESS_DATA,
+
+ /**
+ * Inform the callback function that the stream was disconnected.
+ * The address parameter passed to the callback function will be NULL.
+ * The value1 will be an error code or AAUDIO_OK.
+ * The value2 parameter is reserved and will be set to zero.
+ * The callback return value will be ignored.
+ */
+ AAUDIO_CALLBACK_OPERATION_DISCONNECTED,
+};
+typedef int32_t aaudio_callback_operation_t;
+
+
+class AudioStreamLegacy : public AudioStream, public FixedBlockProcessor {
+public:
+ AudioStreamLegacy();
+
+ virtual ~AudioStreamLegacy();
+
+ aaudio_legacy_callback_t getLegacyCallback();
+
+ // This is public so it can be called from the C callback function.
+ // This is called from the AudioTrack/AudioRecord client.
+ virtual void processCallback(int event, void *info) = 0;
+
+ void processCallbackCommon(aaudio_callback_operation_t opcode, void *info);
+
+ // Implement FixedBlockProcessor
+ int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override;
+
+ virtual int64_t incrementClientFrameCounter(int32_t frames) = 0;
+
+protected:
+
+ class StreamDeviceCallback : public android::AudioSystem::AudioDeviceCallback
+ {
+ public:
+
+ StreamDeviceCallback(AudioStreamLegacy *parent) : mParent(parent) {}
+ virtual ~StreamDeviceCallback() {}
+
+ virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo __unused,
+ audio_port_handle_t deviceId) {
+ if (mParent != nullptr) {
+ mParent->onAudioDeviceUpdate(deviceId);
+ }
+ }
+
+ AudioStreamLegacy *mParent;
+ };
+
+ aaudio_result_t getBestTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds,
+ android::ExtendedTimestamp *extendedTimestamp);
+
+ void onAudioDeviceUpdate(audio_port_handle_t deviceId);
+
+ void onStart() { mCallbackEnabled.store(true); }
+ void onStop() { mCallbackEnabled.store(false); }
+
+ FixedBlockAdapter *mBlockAdapter = nullptr;
+ aaudio_wrapping_frames_t mPositionWhenStarting = 0;
+ int32_t mCallbackBufferSize = 0;
+ const android::sp<StreamDeviceCallback> mDeviceCallback;
+};
+
+} /* namespace aaudio */
+
+#endif //LEGACY_AUDIO_STREAM_LEGACY_H
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
new file mode 100644
index 0000000..156e83d
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -0,0 +1,344 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <utils/String16.h>
+#include <media/AudioRecord.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioClock.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "legacy/AudioStreamRecord.h"
+#include "utility/FixedBlockWriter.h"
+
+using namespace android;
+using namespace aaudio;
+
+AudioStreamRecord::AudioStreamRecord()
+ : AudioStreamLegacy()
+ , mFixedBlockWriter(*this)
+{
+}
+
+AudioStreamRecord::~AudioStreamRecord()
+{
+ const aaudio_stream_state_t state = getState();
+ bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
+ ALOGE_IF(bad, "stream not closed, in state %d", state);
+}
+
+aaudio_result_t AudioStreamRecord::open(const AudioStreamBuilder& builder)
+{
+ aaudio_result_t result = AAUDIO_OK;
+
+ result = AudioStream::open(builder);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ // Try to create an AudioRecord
+
+ // TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
+ int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
+ ? 2 : getSamplesPerFrame();
+ audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
+
+ size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
+ : builder.getBufferCapacity();
+
+ // TODO implement an unspecified Android format then use that.
+ audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
+ ? AUDIO_FORMAT_PCM_FLOAT
+ : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
+
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE;
+ aaudio_performance_mode_t perfMode = getPerformanceMode();
+ switch (perfMode) {
+ case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+ flags = (audio_input_flags_t) (AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW);
+ break;
+
+ case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+ case AAUDIO_PERFORMANCE_MODE_NONE:
+ default:
+ // No flags.
+ break;
+ }
+
+ uint32_t notificationFrames = 0;
+
+ // Setup the callback if there is one.
+ AudioRecord::callback_t callback = nullptr;
+ void *callbackData = nullptr;
+ AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
+ if (builder.getDataCallbackProc() != nullptr) {
+ streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
+ callback = getLegacyCallback();
+ callbackData = this;
+ notificationFrames = builder.getFramesPerDataCallback();
+ }
+ mCallbackBufferSize = builder.getFramesPerDataCallback();
+
+ ALOGD("AudioStreamRecord::open(), request notificationFrames = %u, frameCount = %u",
+ notificationFrames, (uint)frameCount);
+ mAudioRecord = new AudioRecord(
+ mOpPackageName // const String16& opPackageName TODO does not compile
+ );
+ if (getDeviceId() != AAUDIO_UNSPECIFIED) {
+ mAudioRecord->setInputDevice(getDeviceId());
+ }
+ mAudioRecord->set(
+ AUDIO_SOURCE_VOICE_RECOGNITION,
+ getSampleRate(),
+ format,
+ channelMask,
+ frameCount,
+ callback,
+ callbackData,
+ notificationFrames,
+ false /*threadCanCallJava*/,
+ AUDIO_SESSION_ALLOCATE,
+ streamTransferType,
+ flags
+ // int uid = -1,
+ // pid_t pid = -1,
+ // const audio_attributes_t* pAttributes = nullptr
+ );
+
+ // Did we get a valid track?
+ status_t status = mAudioRecord->initCheck();
+ if (status != OK) {
+ close();
+ ALOGE("AudioStreamRecord::open(), initCheck() returned %d", status);
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+
+ // Get the actual rate.
+ setSampleRate(mAudioRecord->getSampleRate());
+ setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
+
+ int32_t actualSampleRate = mAudioRecord->getSampleRate();
+ ALOGW_IF(actualSampleRate != getSampleRate(),
+ "AudioStreamRecord::open() sampleRate changed from %d to %d",
+ getSampleRate(), actualSampleRate);
+ setSampleRate(actualSampleRate);
+
+ // We may need to pass the data through a block size adapter to guarantee constant size.
+ if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
+ int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
+ mFixedBlockWriter.open(callbackSizeBytes);
+ mBlockAdapter = &mFixedBlockWriter;
+ } else {
+ mBlockAdapter = nullptr;
+ }
+
+ // Update performance mode based on the actual stream.
+ // For example, if the sample rate does not match native then you won't get a FAST track.
+ audio_input_flags_t actualFlags = mAudioRecord->getFlags();
+ aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ // FIXME Some platforms do not advertise RAW mode for low latency inputs.
+ if ((actualFlags & (AUDIO_INPUT_FLAG_FAST))
+ == (AUDIO_INPUT_FLAG_FAST)) {
+ actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+ }
+ setPerformanceMode(actualPerformanceMode);
+ // Log warning if we did not get what we asked for.
+ ALOGW_IF(actualFlags != flags,
+ "AudioStreamRecord::open() flags changed from 0x%08X to 0x%08X",
+ flags, actualFlags);
+ ALOGW_IF(actualPerformanceMode != perfMode,
+ "AudioStreamRecord::open() perfMode changed from %d to %d",
+ perfMode, actualPerformanceMode);
+
+ setState(AAUDIO_STREAM_STATE_OPEN);
+ setDeviceId(mAudioRecord->getRoutedDeviceId());
+ mAudioRecord->addAudioDeviceCallback(mDeviceCallback);
+
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamRecord::close()
+{
+ // TODO add close() or release() to AudioRecord API then call it from here
+ if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
+ mAudioRecord.clear();
+ setState(AAUDIO_STREAM_STATE_CLOSED);
+ }
+ mFixedBlockWriter.close();
+ return AAUDIO_OK;
+}
+
+void AudioStreamRecord::processCallback(int event, void *info) {
+ switch (event) {
+ case AudioRecord::EVENT_MORE_DATA:
+ processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+ break;
+
+ // Stream got rerouted so we disconnect.
+ case AudioRecord::EVENT_NEW_IAUDIORECORD:
+ processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+ break;
+
+ default:
+ break;
+ }
+ return;
+}
+
+aaudio_result_t AudioStreamRecord::requestStart()
+{
+ if (mAudioRecord.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // Get current position so we can detect when the track is playing.
+ status_t err = mAudioRecord->getPosition(&mPositionWhenStarting);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+
+ err = mAudioRecord->start();
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ } else {
+ onStart();
+ setState(AAUDIO_STREAM_STATE_STARTING);
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamRecord::requestPause()
+{
+ // This does not make sense for an input stream.
+ // There is no real difference between pause() and stop().
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+}
+
+aaudio_result_t AudioStreamRecord::requestFlush() {
+ // This does not make sense for an input stream.
+ return AAUDIO_ERROR_UNIMPLEMENTED;
+}
+
+aaudio_result_t AudioStreamRecord::requestStop() {
+ if (mAudioRecord.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ onStop();
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+ incrementFramesWritten(getFramesRead() - getFramesWritten()); // TODO review
+ mAudioRecord->stop();
+ mFramesRead.reset32();
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamRecord::updateStateWhileWaiting()
+{
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_wrapping_frames_t position;
+ status_t err;
+ switch (getState()) {
+ // TODO add better state visibility to AudioRecord
+ case AAUDIO_STREAM_STATE_STARTING:
+ err = mAudioRecord->getPosition(&position);
+ if (err != OK) {
+ result = AAudioConvert_androidToAAudioResult(err);
+ } else if (position != mPositionWhenStarting) {
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ }
+ break;
+ case AAUDIO_STREAM_STATE_STOPPING:
+ if (mAudioRecord->stopped()) {
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ }
+ break;
+ default:
+ break;
+ }
+ return result;
+}
+
+aaudio_result_t AudioStreamRecord::read(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds)
+{
+ int32_t bytesPerFrame = getBytesPerFrame();
+ int32_t numBytes;
+ aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
+
+ // TODO add timeout to AudioRecord
+ bool blocking = (timeoutNanoseconds > 0);
+ ssize_t bytesRead = mAudioRecord->read(buffer, numBytes, blocking);
+ if (bytesRead == WOULD_BLOCK) {
+ return 0;
+ } else if (bytesRead < 0) {
+ // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
+ // AudioRecord invalidation
+ if (bytesRead == DEAD_OBJECT) {
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
+ return AAudioConvert_androidToAAudioResult(bytesRead);
+ }
+ int32_t framesRead = (int32_t)(bytesRead / bytesPerFrame);
+ incrementFramesRead(framesRead);
+ return (aaudio_result_t) framesRead;
+}
+
+aaudio_result_t AudioStreamRecord::setBufferSize(int32_t requestedFrames)
+{
+ return getBufferSize();
+}
+
+int32_t AudioStreamRecord::getBufferSize() const
+{
+ return getBufferCapacity(); // TODO implement in AudioRecord?
+}
+
+int32_t AudioStreamRecord::getBufferCapacity() const
+{
+ return static_cast<int32_t>(mAudioRecord->frameCount());
+}
+
+int32_t AudioStreamRecord::getXRunCount() const
+{
+ return 0; // TODO implement when AudioRecord supports it
+}
+
+int32_t AudioStreamRecord::getFramesPerBurst() const
+{
+ return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
+}
+
+aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) {
+ ExtendedTimestamp extendedTimestamp;
+ status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
+ if (status != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+ return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
+}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
new file mode 100644
index 0000000..90000fc
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIO_STREAM_RECORD_H
+#define LEGACY_AUDIO_STREAM_RECORD_H
+
+#include <media/AudioRecord.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "AAudioLegacy.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "utility/FixedBlockWriter.h"
+
+namespace aaudio {
+
+/**
+ * Internal stream that uses the legacy AudioRecord path.
+ */
+class AudioStreamRecord : public AudioStreamLegacy {
+public:
+ AudioStreamRecord();
+
+ virtual ~AudioStreamRecord();
+
+ aaudio_result_t open(const AudioStreamBuilder & builder) override;
+ aaudio_result_t close() override;
+
+ aaudio_result_t requestStart() override;
+ aaudio_result_t requestPause() override;
+ aaudio_result_t requestFlush() override;
+ aaudio_result_t requestStop() override;
+
+ virtual aaudio_result_t getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) override;
+
+ aaudio_result_t read(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) override;
+
+ aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+
+ int32_t getBufferSize() const override;
+
+ int32_t getBufferCapacity() const override;
+
+ int32_t getXRunCount() const override;
+
+ int32_t getFramesPerBurst() const override;
+
+ aaudio_result_t updateStateWhileWaiting() override;
+
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_INPUT;
+ }
+
+ // This is public so it can be called from the C callback function.
+ void processCallback(int event, void *info) override;
+
+ int64_t incrementClientFrameCounter(int32_t frames) override {
+ return incrementFramesRead(frames);
+ }
+
+private:
+ android::sp<android::AudioRecord> mAudioRecord;
+ // adapts between variable sized blocks and fixed size blocks
+ FixedBlockWriter mFixedBlockWriter;
+
+ // TODO add 64-bit position reporting to AudioRecord and use it.
+ android::String16 mOpPackageName;
+};
+
+} /* namespace aaudio */
+
+#endif /* LEGACY_AUDIO_STREAM_RECORD_H */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
new file mode 100644
index 0000000..7e39908
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <media/AudioTrack.h>
+
+#include <aaudio/AAudio.h>
+#include "utility/AudioClock.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "legacy/AudioStreamTrack.h"
+#include "utility/FixedBlockReader.h"
+
+using namespace android;
+using namespace aaudio;
+
+// Arbitrary and somewhat generous number of bursts.
+#define DEFAULT_BURSTS_PER_BUFFER_CAPACITY 8
+
+/*
+ * Create a stream that uses the AudioTrack.
+ */
+AudioStreamTrack::AudioStreamTrack()
+ : AudioStreamLegacy()
+ , mFixedBlockReader(*this)
+{
+}
+
+AudioStreamTrack::~AudioStreamTrack()
+{
+ const aaudio_stream_state_t state = getState();
+ bool bad = !(state == AAUDIO_STREAM_STATE_UNINITIALIZED || state == AAUDIO_STREAM_STATE_CLOSED);
+ ALOGE_IF(bad, "stream not closed, in state %d", state);
+}
+
+aaudio_result_t AudioStreamTrack::open(const AudioStreamBuilder& builder)
+{
+ aaudio_result_t result = AAUDIO_OK;
+
+ result = AudioStream::open(builder);
+ if (result != OK) {
+ return result;
+ }
+
+ // Try to create an AudioTrack
+ // Use stereo if unspecified.
+ int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
+ ? 2 : getSamplesPerFrame();
+ audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(samplesPerFrame);
+
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
+ aaudio_performance_mode_t perfMode = getPerformanceMode();
+ switch(perfMode) {
+ case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+ // Bypass the normal mixer and go straight to the FAST mixer.
+ flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW);
+ break;
+
+ case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+ // This uses a mixer that wakes up less often than the FAST mixer.
+ flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ break;
+
+ case AAUDIO_PERFORMANCE_MODE_NONE:
+ default:
+ // No flags. Use a normal mixer in front of the FAST mixer.
+ break;
+ }
+
+ size_t frameCount = (size_t)builder.getBufferCapacity();
+
+ int32_t notificationFrames = 0;
+
+ audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
+ ? AUDIO_FORMAT_PCM_FLOAT
+ : AAudioConvert_aaudioToAndroidDataFormat(getFormat());
+
+ // Setup the callback if there is one.
+ AudioTrack::callback_t callback = nullptr;
+ void *callbackData = nullptr;
+ // Note that TRANSFER_SYNC does not allow FAST track
+ AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
+ if (builder.getDataCallbackProc() != nullptr) {
+ streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
+ callback = getLegacyCallback();
+ callbackData = this;
+
+ // If the total buffer size is unspecified then base the size on the burst size.
+ if (frameCount == 0
+ && ((flags & AUDIO_OUTPUT_FLAG_FAST) != 0)) {
+ // Take advantage of a special trick that allows us to create a buffer
+ // that is some multiple of the burst size.
+ notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
+ } else {
+ notificationFrames = builder.getFramesPerDataCallback();
+ }
+ }
+ mCallbackBufferSize = builder.getFramesPerDataCallback();
+
+ ALOGD("AudioStreamTrack::open(), request notificationFrames = %d, frameCount = %u",
+ notificationFrames, (uint)frameCount);
+ mAudioTrack = new AudioTrack();
+ if (getDeviceId() != AAUDIO_UNSPECIFIED) {
+ mAudioTrack->setOutputDevice(getDeviceId());
+ }
+ mAudioTrack->set(
+ (audio_stream_type_t) AUDIO_STREAM_MUSIC,
+ getSampleRate(),
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ callback,
+ callbackData,
+ notificationFrames,
+ 0 /*sharedBuffer*/,
+ false /*threadCanCallJava*/,
+ AUDIO_SESSION_ALLOCATE,
+ streamTransferType
+ );
+
+ // Did we get a valid track?
+ status_t status = mAudioTrack->initCheck();
+ if (status != NO_ERROR) {
+ close();
+ ALOGE("AudioStreamTrack::open(), initCheck() returned %d", status);
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+
+ //TrackPlayerBase init
+ init(mAudioTrack.get(), PLAYER_TYPE_AAUDIO, AUDIO_USAGE_MEDIA);
+
+ // Get the actual values from the AudioTrack.
+ setSamplesPerFrame(mAudioTrack->channelCount());
+ aaudio_format_t aaudioFormat =
+ AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
+ setFormat(aaudioFormat);
+
+ int32_t actualSampleRate = mAudioTrack->getSampleRate();
+ ALOGW_IF(actualSampleRate != getSampleRate(),
+ "AudioStreamTrack::open() sampleRate changed from %d to %d",
+ getSampleRate(), actualSampleRate);
+ setSampleRate(actualSampleRate);
+
+ // We may need to pass the data through a block size adapter to guarantee constant size.
+ if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
+ int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
+ mFixedBlockReader.open(callbackSizeBytes);
+ mBlockAdapter = &mFixedBlockReader;
+ } else {
+ mBlockAdapter = nullptr;
+ }
+
+ setState(AAUDIO_STREAM_STATE_OPEN);
+ setDeviceId(mAudioTrack->getRoutedDeviceId());
+ mAudioTrack->addAudioDeviceCallback(mDeviceCallback);
+
+ // Update performance mode based on the actual stream.
+ // For example, if the sample rate is not allowed then you won't get a FAST track.
+ audio_output_flags_t actualFlags = mAudioTrack->getFlags();
+ aaudio_performance_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ if ((actualFlags & (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW))
+ == (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_RAW)) {
+ actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
+
+ } else if ((actualFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
+ actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_POWER_SAVING;
+ }
+ setPerformanceMode(actualPerformanceMode);
+ // Log warning if we did not get what we asked for.
+ ALOGW_IF(actualFlags != flags,
+ "AudioStreamTrack::open() flags changed from 0x%08X to 0x%08X",
+ flags, actualFlags);
+ ALOGW_IF(actualPerformanceMode != perfMode,
+ "AudioStreamTrack::open() perfMode changed from %d to %d",
+ perfMode, actualPerformanceMode);
+
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::close()
+{
+ if (getState() != AAUDIO_STREAM_STATE_CLOSED) {
+ destroy();
+ setState(AAUDIO_STREAM_STATE_CLOSED);
+ }
+ mFixedBlockReader.close();
+ return AAUDIO_OK;
+}
+
+void AudioStreamTrack::processCallback(int event, void *info) {
+
+ switch (event) {
+ case AudioTrack::EVENT_MORE_DATA:
+ processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+ break;
+
+ // Stream got rerouted so we disconnect.
+ case AudioTrack::EVENT_NEW_IAUDIOTRACK:
+ processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+ break;
+
+ default:
+ break;
+ }
+ return;
+}
+
+aaudio_result_t AudioStreamTrack::requestStart()
+{
+ std::lock_guard<std::mutex> lock(mStreamMutex);
+
+ if (mAudioTrack.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // Get current position so we can detect when the track is playing.
+ status_t err = mAudioTrack->getPosition(&mPositionWhenStarting);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+
+ err = startWithStatus();
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ } else {
+ onStart();
+ setState(AAUDIO_STREAM_STATE_STARTING);
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::requestPause()
+{
+ std::lock_guard<std::mutex> lock(mStreamMutex);
+
+ if (mAudioTrack.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ } else if (getState() != AAUDIO_STREAM_STATE_STARTING
+ && getState() != AAUDIO_STREAM_STATE_STARTED) {
+ ALOGE("requestPause(), called when state is %s",
+ AAudio_convertStreamStateToText(getState()));
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ onStop();
+ setState(AAUDIO_STREAM_STATE_PAUSING);
+ pause();
+ status_t err = mAudioTrack->getPosition(&mPositionWhenPausing);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::requestFlush() {
+ std::lock_guard<std::mutex> lock(mStreamMutex);
+
+ if (mAudioTrack.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ } else if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ setState(AAUDIO_STREAM_STATE_FLUSHING);
+ incrementFramesRead(getFramesWritten() - getFramesRead());
+ mAudioTrack->flush();
+ mFramesWritten.reset32();
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::requestStop() {
+ std::lock_guard<std::mutex> lock(mStreamMutex);
+
+ if (mAudioTrack.get() == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ onStop();
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+ incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
+ stop();
+ mFramesWritten.reset32();
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::updateStateWhileWaiting()
+{
+ status_t err;
+ aaudio_wrapping_frames_t position;
+ switch (getState()) {
+ // TODO add better state visibility to AudioTrack
+ case AAUDIO_STREAM_STATE_STARTING:
+ if (mAudioTrack->hasStarted()) {
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ }
+ break;
+ case AAUDIO_STREAM_STATE_PAUSING:
+ if (mAudioTrack->stopped()) {
+ err = mAudioTrack->getPosition(&position);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ } else if (position == mPositionWhenPausing) {
+ // Has stream really stopped advancing?
+ setState(AAUDIO_STREAM_STATE_PAUSED);
+ }
+ mPositionWhenPausing = position;
+ }
+ break;
+ case AAUDIO_STREAM_STATE_FLUSHING:
+ {
+ err = mAudioTrack->getPosition(&position);
+ if (err != OK) {
+ return AAudioConvert_androidToAAudioResult(err);
+ } else if (position == 0) {
+ // TODO Advance frames read to match written.
+ setState(AAUDIO_STREAM_STATE_FLUSHED);
+ }
+ }
+ break;
+ case AAUDIO_STREAM_STATE_STOPPING:
+ if (mAudioTrack->stopped()) {
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ }
+ break;
+ default:
+ break;
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AudioStreamTrack::write(const void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds)
+{
+ int32_t bytesPerFrame = getBytesPerFrame();
+ int32_t numBytes;
+ aaudio_result_t result = AAudioConvert_framesToBytes(numFrames, bytesPerFrame, &numBytes);
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ if (getState() == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
+
+ // TODO add timeout to AudioTrack
+ bool blocking = timeoutNanoseconds > 0;
+ ssize_t bytesWritten = mAudioTrack->write(buffer, numBytes, blocking);
+ if (bytesWritten == WOULD_BLOCK) {
+ return 0;
+ } else if (bytesWritten < 0) {
+ ALOGE("invalid write, returned %d", (int)bytesWritten);
+ // in this context, a DEAD_OBJECT is more likely to be a disconnect notification due to
+ // AudioTrack invalidation
+ if (bytesWritten == DEAD_OBJECT) {
+ setState(AAUDIO_STREAM_STATE_DISCONNECTED);
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
+ return AAudioConvert_androidToAAudioResult(bytesWritten);
+ }
+ int32_t framesWritten = (int32_t)(bytesWritten / bytesPerFrame);
+ incrementFramesWritten(framesWritten);
+ return framesWritten;
+}
+
+aaudio_result_t AudioStreamTrack::setBufferSize(int32_t requestedFrames)
+{
+ ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
+ if (result < 0) {
+ return AAudioConvert_androidToAAudioResult(result);
+ } else {
+ return result;
+ }
+}
+
+int32_t AudioStreamTrack::getBufferSize() const
+{
+ return static_cast<int32_t>(mAudioTrack->getBufferSizeInFrames());
+}
+
+int32_t AudioStreamTrack::getBufferCapacity() const
+{
+ return static_cast<int32_t>(mAudioTrack->frameCount());
+}
+
+int32_t AudioStreamTrack::getXRunCount() const
+{
+ return static_cast<int32_t>(mAudioTrack->getUnderrunCount());
+}
+
+int32_t AudioStreamTrack::getFramesPerBurst() const
+{
+ return static_cast<int32_t>(mAudioTrack->getNotificationPeriodInFrames());
+}
+
+int64_t AudioStreamTrack::getFramesRead() {
+ aaudio_wrapping_frames_t position;
+ status_t result;
+ switch (getState()) {
+ case AAUDIO_STREAM_STATE_STARTING:
+ case AAUDIO_STREAM_STATE_STARTED:
+ case AAUDIO_STREAM_STATE_STOPPING:
+ case AAUDIO_STREAM_STATE_PAUSING:
+ case AAUDIO_STREAM_STATE_PAUSED:
+ result = mAudioTrack->getPosition(&position);
+ if (result == OK) {
+ mFramesRead.update32(position);
+ }
+ break;
+ default:
+ break;
+ }
+ return AudioStream::getFramesRead();
+}
+
+aaudio_result_t AudioStreamTrack::getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) {
+ ExtendedTimestamp extendedTimestamp;
+ status_t status = mAudioTrack->getTimestamp(&extendedTimestamp);
+ if (status != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+ return getBestTimestamp(clockId, framePosition, timeNanoseconds, &extendedTimestamp);
+}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
new file mode 100644
index 0000000..ff429ea
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIO_STREAM_TRACK_H
+#define LEGACY_AUDIO_STREAM_TRACK_H
+
+#include <math.h>
+#include <media/TrackPlayerBase.h>
+#include <aaudio/AAudio.h>
+
+#include "AudioStreamBuilder.h"
+#include "AudioStream.h"
+#include "legacy/AAudioLegacy.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "utility/FixedBlockReader.h"
+
+namespace aaudio {
+
+/**
+ * Internal stream that uses the legacy AudioTrack path.
+ */
+class AudioStreamTrack : public AudioStreamLegacy, public android::TrackPlayerBase {
+public:
+ AudioStreamTrack();
+
+ virtual ~AudioStreamTrack();
+
+
+ aaudio_result_t open(const AudioStreamBuilder & builder) override;
+ aaudio_result_t close() override;
+
+ aaudio_result_t requestStart() override;
+ aaudio_result_t requestPause() override;
+ aaudio_result_t requestFlush() override;
+ aaudio_result_t requestStop() override;
+
+ aaudio_result_t getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) override;
+
+ aaudio_result_t write(const void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) override;
+
+ aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+ int32_t getBufferSize() const override;
+ int32_t getBufferCapacity() const override;
+ int32_t getFramesPerBurst()const override;
+ int32_t getXRunCount() const override;
+
+ int64_t getFramesRead() override;
+
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_OUTPUT;
+ }
+
+ aaudio_result_t updateStateWhileWaiting() override;
+
+ // This is public so it can be called from the C callback function.
+ void processCallback(int event, void *info) override;
+
+ int64_t incrementClientFrameCounter(int32_t frames) override {
+ return incrementFramesWritten(frames);
+ }
+
+private:
+
+ // adapts between variable sized blocks and fixed size blocks
+ FixedBlockReader mFixedBlockReader;
+
+ // TODO add 64-bit position reporting to AudioRecord and use it.
+ aaudio_wrapping_frames_t mPositionWhenStarting = 0;
+ aaudio_wrapping_frames_t mPositionWhenPausing = 0;
+};
+
+} /* namespace aaudio */
+
+#endif /* LEGACY_AUDIO_STREAM_TRACK_H */
diff --git a/media/libaaudio/src/legacy/README.md b/media/libaaudio/src/legacy/README.md
new file mode 100644
index 0000000..8805915
--- /dev/null
+++ b/media/libaaudio/src/legacy/README.md
@@ -0,0 +1,2 @@
+The legacy folder contains the classes that implement AAudio AudioStream on top of
+Android AudioTrack and AudioRecord.
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
new file mode 100644
index 0000000..164784d
--- /dev/null
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -0,0 +1,373 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <cutils/properties.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <utils/Errors.h>
+
+#include "aaudio/AAudio.h"
+#include <aaudio/AAudioTesting.h>
+
+#include "utility/AAudioUtilities.h"
+
+using namespace android;
+
+// This is 3 dB, (10^(3/20)), to match the maximum headroom in AudioTrack for float data.
+// It is designed to allow occasional transient peaks.
+#define MAX_HEADROOM (1.41253754f)
+#define MIN_HEADROOM (0 - MAX_HEADROOM)
+
+int32_t AAudioConvert_formatToSizeInBytes(aaudio_format_t format) {
+ int32_t size = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ switch (format) {
+ case AAUDIO_FORMAT_PCM_I16:
+ size = sizeof(int16_t);
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ size = sizeof(float);
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+
+// TODO expose and call clamp16_from_float function in primitives.h
+static inline int16_t clamp16_from_float(float f) {
+ /* Offset is used to expand the valid range of [-1.0, 1.0) into the 16 lsbs of the
+ * floating point significand. The normal shift is 3<<22, but the -15 offset
+ * is used to multiply by 32768.
+ */
+ static const float offset = (float)(3 << (22 - 15));
+ /* zero = (0x10f << 22) = 0x43c00000 (not directly used) */
+ static const int32_t limneg = (0x10f << 22) /*zero*/ - 32768; /* 0x43bf8000 */
+ static const int32_t limpos = (0x10f << 22) /*zero*/ + 32767; /* 0x43c07fff */
+
+ union {
+ float f;
+ int32_t i;
+ } u;
+
+ u.f = f + offset; /* recenter valid range */
+ /* Now the valid range is represented as integers between [limneg, limpos].
+ * Clamp using the fact that float representation (as an integer) is an ordered set.
+ */
+ if (u.i < limneg)
+ u.i = -32768;
+ else if (u.i > limpos)
+ u.i = 32767;
+ return u.i; /* Return lower 16 bits, the part of interest in the significand. */
+}
+
+// Same but without clipping.
+// Convert -1.0f to +1.0f to -32768 to +32767
+static inline int16_t floatToInt16(float f) {
+ static const float offset = (float)(3 << (22 - 15));
+ union {
+ float f;
+ int32_t i;
+ } u;
+ u.f = f + offset; /* recenter valid range */
+ return u.i; /* Return lower 16 bits, the part of interest in the significand. */
+}
+
+static float clipAndClampFloatToPcm16(float sample, float scaler) {
+ // Clip to valid range of a float sample to prevent excessive volume.
+ if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
+ else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+
+ // Scale and convert to a short.
+ float fval = sample * scaler;
+ return clamp16_from_float(fval);
+}
+
+void AAudioConvert_floatToPcm16(const float *source,
+ int16_t *destination,
+ int32_t numSamples,
+ float amplitude) {
+ float scaler = amplitude;
+ for (int i = 0; i < numSamples; i++) {
+ float sample = *source++;
+ *destination++ = clipAndClampFloatToPcm16(sample, scaler);
+ }
+}
+
+void AAudioConvert_floatToPcm16(const float *source,
+ int16_t *destination,
+ int32_t numFrames,
+ int32_t samplesPerFrame,
+ float amplitude1,
+ float amplitude2) {
+ float scaler = amplitude1;
+ // divide by numFrames so that we almost reach amplitude2
+ float delta = (amplitude2 - amplitude1) / numFrames;
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
+ float sample = *source++;
+ *destination++ = clipAndClampFloatToPcm16(sample, scaler);
+ }
+ scaler += delta;
+ }
+}
+
+#define SHORT_SCALE 32768
+
+void AAudioConvert_pcm16ToFloat(const int16_t *source,
+ float *destination,
+ int32_t numSamples,
+ float amplitude) {
+ float scaler = amplitude / SHORT_SCALE;
+ for (int i = 0; i < numSamples; i++) {
+ destination[i] = source[i] * scaler;
+ }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudioConvert_pcm16ToFloat(const int16_t *source,
+ float *destination,
+ int32_t numFrames,
+ int32_t samplesPerFrame,
+ float amplitude1,
+ float amplitude2) {
+ float scaler = amplitude1 / SHORT_SCALE;
+ float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
+ *destination++ = *source++ * scaler;
+ }
+ scaler += delta;
+ }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudio_linearRamp(const float *source,
+ float *destination,
+ int32_t numFrames,
+ int32_t samplesPerFrame,
+ float amplitude1,
+ float amplitude2) {
+ float scaler = amplitude1;
+ float delta = (amplitude2 - amplitude1) / numFrames;
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
+ float sample = *source++;
+
+ // Clip to valid range of a float sample to prevent excessive volume.
+ if (sample > MAX_HEADROOM) sample = MAX_HEADROOM;
+ else if (sample < MIN_HEADROOM) sample = MIN_HEADROOM;
+
+ *destination++ = sample * scaler;
+ }
+ scaler += delta;
+ }
+}
+
+// This code assumes amplitude1 and amplitude2 are between 0.0 and 1.0
+void AAudio_linearRamp(const int16_t *source,
+ int16_t *destination,
+ int32_t numFrames,
+ int32_t samplesPerFrame,
+ float amplitude1,
+ float amplitude2) {
+ float scaler = amplitude1 / SHORT_SCALE;
+ float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
+ // No need to clip because int16_t range is inherently limited.
+ float sample = *source++ * scaler;
+ *destination++ = floatToInt16(sample);
+ }
+ scaler += delta;
+ }
+}
+
+status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result) {
+ // This covers the case for AAUDIO_OK and for positive results.
+ if (result >= 0) {
+ return result;
+ }
+ status_t status;
+ switch (result) {
+ case AAUDIO_ERROR_DISCONNECTED:
+ case AAUDIO_ERROR_INVALID_HANDLE:
+ status = DEAD_OBJECT;
+ break;
+ case AAUDIO_ERROR_INVALID_STATE:
+ status = INVALID_OPERATION;
+ break;
+ case AAUDIO_ERROR_INVALID_RATE:
+ case AAUDIO_ERROR_INVALID_FORMAT:
+ case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
+ case AAUDIO_ERROR_OUT_OF_RANGE:
+ status = BAD_VALUE;
+ break;
+ case AAUDIO_ERROR_WOULD_BLOCK:
+ status = WOULD_BLOCK;
+ break;
+ case AAUDIO_ERROR_NULL:
+ status = UNEXPECTED_NULL;
+ break;
+ // TODO translate these result codes
+ case AAUDIO_ERROR_INTERNAL:
+ case AAUDIO_ERROR_UNIMPLEMENTED:
+ case AAUDIO_ERROR_UNAVAILABLE:
+ case AAUDIO_ERROR_NO_FREE_HANDLES:
+ case AAUDIO_ERROR_NO_MEMORY:
+ case AAUDIO_ERROR_TIMEOUT:
+ case AAUDIO_ERROR_NO_SERVICE:
+ default:
+ status = UNKNOWN_ERROR;
+ break;
+ }
+ return status;
+}
+
+aaudio_result_t AAudioConvert_androidToAAudioResult(status_t status) {
+ // This covers the case for OK and for positive result.
+ if (status >= 0) {
+ return status;
+ }
+ aaudio_result_t result;
+ switch (status) {
+ case BAD_TYPE:
+ result = AAUDIO_ERROR_INVALID_HANDLE;
+ break;
+ case DEAD_OBJECT:
+ result = AAUDIO_ERROR_NO_SERVICE;
+ break;
+ case INVALID_OPERATION:
+ result = AAUDIO_ERROR_INVALID_STATE;
+ break;
+ case UNEXPECTED_NULL:
+ result = AAUDIO_ERROR_NULL;
+ break;
+ case BAD_VALUE:
+ result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ break;
+ case WOULD_BLOCK:
+ result = AAUDIO_ERROR_WOULD_BLOCK;
+ break;
+ default:
+ result = AAUDIO_ERROR_INTERNAL;
+ break;
+ }
+ return result;
+}
+
+audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudioFormat) {
+ audio_format_t androidFormat;
+ switch (aaudioFormat) {
+ case AAUDIO_FORMAT_PCM_I16:
+ androidFormat = AUDIO_FORMAT_PCM_16_BIT;
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT:
+ androidFormat = AUDIO_FORMAT_PCM_FLOAT;
+ break;
+ default:
+ androidFormat = AUDIO_FORMAT_DEFAULT;
+ ALOGE("AAudioConvert_aaudioToAndroidDataFormat 0x%08X unrecognized", aaudioFormat);
+ break;
+ }
+ return androidFormat;
+}
+
+aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t androidFormat) {
+ aaudio_format_t aaudioFormat = AAUDIO_FORMAT_INVALID;
+ switch (androidFormat) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ aaudioFormat = AAUDIO_FORMAT_PCM_I16;
+ break;
+ case AUDIO_FORMAT_PCM_FLOAT:
+ aaudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
+ break;
+ default:
+ aaudioFormat = AAUDIO_FORMAT_INVALID;
+ ALOGE("AAudioConvert_androidToAAudioDataFormat 0x%08X unrecognized", androidFormat);
+ break;
+ }
+ return aaudioFormat;
+}
+
+int32_t AAudioConvert_framesToBytes(int32_t numFrames,
+ int32_t bytesPerFrame,
+ int32_t *sizeInBytes) {
+ // TODO implement more elegantly
+ const int32_t maxChannels = 256; // ridiculously large
+ const int32_t maxBytesPerFrame = maxChannels * sizeof(float);
+ // Prevent overflow by limiting multiplicands.
+ if (bytesPerFrame > maxBytesPerFrame || numFrames > (0x3FFFFFFF / maxBytesPerFrame)) {
+ ALOGE("size overflow, numFrames = %d, frameSize = %zd", numFrames, bytesPerFrame);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ *sizeInBytes = numFrames * bytesPerFrame;
+ return AAUDIO_OK;
+}
+
+static int32_t AAudioProperty_getMMapProperty(const char *propName,
+ int32_t defaultValue,
+ const char * caller) {
+ int32_t prop = property_get_int32(propName, defaultValue);
+ switch (prop) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_POLICY_NEVER:
+ case AAUDIO_POLICY_ALWAYS:
+ case AAUDIO_POLICY_AUTO:
+ break;
+ default:
+ ALOGE("%s: invalid = %d", caller, prop);
+ prop = defaultValue;
+ break;
+ }
+ return prop;
+}
+
+int32_t AAudioProperty_getMMapPolicy() {
+ return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_POLICY,
+ AAUDIO_UNSPECIFIED, __func__);
+}
+
+int32_t AAudioProperty_getMMapExclusivePolicy() {
+ return AAudioProperty_getMMapProperty(AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY,
+ AAUDIO_UNSPECIFIED, __func__);
+}
+
+int32_t AAudioProperty_getMixerBursts() {
+ const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
+ const int32_t maxBursts = 1024; // arbitrary
+ int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
+ if (prop < 1 || prop > maxBursts) {
+ ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
+ prop = defaultBursts;
+ }
+ return prop;
+}
+
+int32_t AAudioProperty_getHardwareBurstMinMicros() {
+ const int32_t defaultMicros = 1000; // arbitrary
+ const int32_t maxMicros = 1000 * 1000; // arbitrary
+ int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
+ if (prop < 1 || prop > maxMicros) {
+ ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d", prop);
+ prop = defaultMicros;
+ }
+ return prop;
+}
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
new file mode 100644
index 0000000..f894bc0
--- /dev/null
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_AAUDIO_UTILITIES_H
+#define UTILITY_AAUDIO_UTILITIES_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <hardware/audio.h>
+
+#include "aaudio/AAudio.h"
+
+/**
+ * Convert an AAudio result into the closest matching Android status.
+ */
+android::status_t AAudioConvert_aaudioToAndroidStatus(aaudio_result_t result);
+
+/**
+ * Convert an Android status into the closest matching AAudio result.
+ */
+aaudio_result_t AAudioConvert_androidToAAudioResult(android::status_t status);
+
+/**
+ * Convert an array of floats to an array of int16_t.
+ *
+ * @param source
+ * @param destination
+ * @param numSamples number of values in the array
+ * @param amplitude level between 0.0 and 1.0
+ */
+void AAudioConvert_floatToPcm16(const float *source,
+ int16_t *destination,
+ int32_t numSamples,
+ float amplitude);
+
+/**
+ * Convert floats to int16_t and scale by a linear ramp.
+ *
+ * The ramp stops just short of reaching amplitude2 so that the next
+ * ramp can start at amplitude2 without causing a discontinuity.
+ *
+ * @param source
+ * @param destination
+ * @param numFrames
+ * @param samplesPerFrame AKA number of channels
+ * @param amplitude1 level at start of ramp, between 0.0 and 1.0
+ * @param amplitude2 level past end of ramp, between 0.0 and 1.0
+ */
+void AAudioConvert_floatToPcm16(const float *source,
+ int16_t *destination,
+ int32_t numFrames,
+ int32_t samplesPerFrame,
+ float amplitude1,
+ float amplitude2);
+
+/**
+ * Convert int16_t array to float array ranging from -1.0 to +1.0.
+ * @param source
+ * @param destination
+ * @param numSamples
+ */
+//void AAudioConvert_pcm16ToFloat(const int16_t *source, int32_t numSamples,
+// float *destination);
+
+/**
+ *
+ * Convert int16_t array to float array ranging from +/- amplitude.
+ * @param source
+ * @param destination
+ * @param numSamples
+ * @param amplitude
+ */
+void AAudioConvert_pcm16ToFloat(const int16_t *source,
+ float *destination,
+ int32_t numSamples,
+ float amplitude);
+
+/**
+ * Convert floats to int16_t and scale by a linear ramp.
+ *
+ * The ramp stops just short of reaching amplitude2 so that the next
+ * ramp can start at amplitude2 without causing a discontinuity.
+ *
+ * @param source
+ * @param destination
+ * @param numFrames
+ * @param samplesPerFrame AKA number of channels
+ * @param amplitude1 level at start of ramp, between 0.0 and 1.0
+ * @param amplitude2 level at end of ramp, between 0.0 and 1.0
+ */
+void AAudioConvert_pcm16ToFloat(const int16_t *source,
+ float *destination,
+ int32_t numFrames,
+ int32_t samplesPerFrame,
+ float amplitude1,
+ float amplitude2);
+
+/**
+ * Scale floats by a linear ramp.
+ *
+ * The ramp stops just short of reaching amplitude2 so that the next
+ * ramp can start at amplitude2 without causing a discontinuity.
+ *
+ * @param source
+ * @param destination
+ * @param numFrames
+ * @param samplesPerFrame
+ * @param amplitude1
+ * @param amplitude2
+ */
+void AAudio_linearRamp(const float *source,
+ float *destination,
+ int32_t numFrames,
+ int32_t samplesPerFrame,
+ float amplitude1,
+ float amplitude2);
+
+/**
+ * Scale int16_t's by a linear ramp.
+ *
+ * The ramp stops just short of reaching amplitude2 so that the next
+ * ramp can start at amplitude2 without causing a discontinuity.
+ *
+ * @param source
+ * @param destination
+ * @param numFrames
+ * @param samplesPerFrame
+ * @param amplitude1
+ * @param amplitude2
+ */
+void AAudio_linearRamp(const int16_t *source,
+ int16_t *destination,
+ int32_t numFrames,
+ int32_t samplesPerFrame,
+ float amplitude1,
+ float amplitude2);
+
+/**
+ * Calculate the number of bytes and prevent numeric overflow.
+ * @param numFrames frame count
+ * @param bytesPerFrame size of a frame in bytes
+ * @param sizeInBytes total size in bytes
+ * @return AAUDIO_OK or negative error, eg. AAUDIO_ERROR_OUT_OF_RANGE
+ */
+int32_t AAudioConvert_framesToBytes(int32_t numFrames,
+ int32_t bytesPerFrame,
+ int32_t *sizeInBytes);
+
+audio_format_t AAudioConvert_aaudioToAndroidDataFormat(aaudio_format_t aaudio_format);
+
+aaudio_format_t AAudioConvert_androidToAAudioDataFormat(audio_format_t format);
+
+/**
+ * @return the size of a sample of the given format in bytes or AAUDIO_ERROR_ILLEGAL_ARGUMENT
+ */
+int32_t AAudioConvert_formatToSizeInBytes(aaudio_format_t format);
+
+
+// Note that this code may be replaced by Settings or by some other system configuration tool.
+
+#define AAUDIO_PROP_MMAP_POLICY "aaudio.mmap_policy"
+
+/**
+ * Read system property.
+ * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
+ */
+int32_t AAudioProperty_getMMapPolicy();
+
+#define AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY "aaudio.mmap_exclusive_policy"
+
+/**
+ * Read system property.
+ * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
+ */
+int32_t AAudioProperty_getMMapExclusivePolicy();
+
+#define AAUDIO_PROP_MIXER_BURSTS "aaudio.mixer_bursts"
+
+/**
+ * Read system property.
+ * @return number of bursts per mixer cycle
+ */
+int32_t AAudioProperty_getMixerBursts();
+
+#define AAUDIO_PROP_HW_BURST_MIN_USEC "aaudio.hw_burst_min_usec"
+
+/**
+ * Read system property.
+ * This is handy in case the DMA is bursting too quickly for the CPU to keep up.
+ * For example, there may be a DMA burst every 100 usec but you only
+ * want to feed the MMAP buffer every 2000 usec.
+ *
+ * This will affect the framesPerBurst for an MMAP stream.
+ *
+ * @return minimum number of microseconds for a MMAP HW burst
+ */
+int32_t AAudioProperty_getHardwareBurstMinMicros();
+
+#endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/src/utility/AudioClock.h b/media/libaaudio/src/utility/AudioClock.h
new file mode 100644
index 0000000..43b71b0
--- /dev/null
+++ b/media/libaaudio/src/utility/AudioClock.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_AUDIO_CLOCK_H
+#define UTILITY_AUDIO_CLOCK_H
+
+#include <stdint.h>
+#include <time.h>
+
+#include <aaudio/AAudio.h>
+
+// Time conversion constants.
+#define AAUDIO_NANOS_PER_MICROSECOND ((int64_t)1000)
+#define AAUDIO_NANOS_PER_MILLISECOND (AAUDIO_NANOS_PER_MICROSECOND * 1000)
+#define AAUDIO_MILLIS_PER_SECOND 1000
+#define AAUDIO_NANOS_PER_SECOND (AAUDIO_NANOS_PER_MILLISECOND * AAUDIO_MILLIS_PER_SECOND)
+
+class AudioClock {
+public:
+ static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+ struct timespec time;
+ int result = clock_gettime(clockId, &time);
+ if (result < 0) {
+ return -errno;
+ }
+ return (time.tv_sec * AAUDIO_NANOS_PER_SECOND) + time.tv_nsec;
+ }
+
+ /**
+ * Sleep until the specified absolute time.
+ * Return immediately with AAUDIO_ERROR_ILLEGAL_ARGUMENT if a negative
+ * nanoTime is specified.
+ *
+ * @param nanoTime time to wake up
+ * @param clockId CLOCK_MONOTONIC is default
+ * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
+ */
+ static int sleepUntilNanoTime(int64_t nanoTime,
+ clockid_t clockId = CLOCK_MONOTONIC) {
+ if (nanoTime > 0) {
+ struct timespec time;
+ time.tv_sec = nanoTime / AAUDIO_NANOS_PER_SECOND;
+ // Calculate the fractional nanoseconds. Avoids expensive % operation.
+ time.tv_nsec = nanoTime - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
+ int err = clock_nanosleep(clockId, TIMER_ABSTIME, &time, nullptr);
+ switch (err) {
+ case EINTR:
+ return 1;
+ case 0:
+ return 0;
+ default:
+ // Subtract because clock_nanosleep() returns a positive error number!
+ return 0 - err;
+ }
+ } else {
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ }
+
+ /**
+ * Sleep for the specified number of relative nanoseconds in real-time.
+ * Return immediately with 0 if a negative nanoseconds is specified.
+ *
+ * @param nanoseconds time to sleep
+ * @param clockId CLOCK_MONOTONIC is default
+ * @return 0, a negative error, or 1 if the call is interrupted by a signal handler (EINTR)
+ */
+ static int sleepForNanos(int64_t nanoseconds, clockid_t clockId = CLOCK_MONOTONIC) {
+ if (nanoseconds > 0) {
+ struct timespec time;
+ time.tv_sec = nanoseconds / AAUDIO_NANOS_PER_SECOND;
+ // Calculate the fractional nanoseconds. Avoids expensive % operation.
+ time.tv_nsec = nanoseconds - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
+ const int flags = 0; // documented as relative sleep
+ int err = clock_nanosleep(clockId, flags, &time, nullptr);
+ switch (err) {
+ case EINTR:
+ return 1;
+ case 0:
+ return 0;
+ default:
+ // Subtract because clock_nanosleep() returns a positive error number!
+ return 0 - err;
+ }
+ }
+ return 0;
+ }
+};
+
+
+#endif // UTILITY_AUDIO_CLOCK_H
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.cpp b/media/libaaudio/src/utility/FixedBlockAdapter.cpp
new file mode 100644
index 0000000..63495f0
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+FixedBlockAdapter::~FixedBlockAdapter() {
+ close();
+}
+
+int32_t FixedBlockAdapter::open(int32_t bytesPerFixedBlock)
+{
+ mSize = bytesPerFixedBlock;
+ mStorage = new uint8_t[bytesPerFixedBlock];
+ mPosition = 0;
+ return 0;
+}
+
+int32_t FixedBlockAdapter::close()
+{
+ delete[] mStorage;
+ mStorage = nullptr;
+ mSize = 0;
+ mPosition = 0;
+ return 0;
+}
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.h b/media/libaaudio/src/utility/FixedBlockAdapter.h
new file mode 100644
index 0000000..7008b25
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_ADAPTER_H
+#define AAUDIO_FIXED_BLOCK_ADAPTER_H
+
+#include <stdio.h>
+
+/**
+ * Interface for a class that needs fixed-size blocks.
+ */
+class FixedBlockProcessor {
+public:
+ virtual ~FixedBlockProcessor() = default;
+ virtual int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) = 0;
+};
+
+/**
+ * Base class for a variable-to-fixed-size block adapter.
+ */
+class FixedBlockAdapter
+{
+public:
+ FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
+ : mFixedBlockProcessor(fixedBlockProcessor) {}
+
+ virtual ~FixedBlockAdapter();
+
+ /**
+ * Allocate internal resources needed for buffering data.
+ */
+ virtual int32_t open(int32_t bytesPerFixedBlock);
+
+ /**
+ * Note that if the fixed-sized blocks must be aligned, then the variable-sized blocks
+ * must have the same alignment.
+ * For example, if the fixed-size blocks must be a multiple of 8, then the variable-sized
+ * blocks must also be a multiple of 8.
+ *
+ * @param buffer
+ * @param numBytes
+ * @return zero if OK or a non-zero code
+ */
+ virtual int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) = 0;
+
+ /**
+ * Free internal resources.
+ */
+ int32_t close();
+
+protected:
+ FixedBlockProcessor &mFixedBlockProcessor;
+ uint8_t *mStorage = nullptr; // Store data here while assembling buffers.
+ int32_t mSize = 0; // Size in bytes of the fixed size buffer.
+ int32_t mPosition = 0; // Offset of the last byte read or written.
+};
+
+#endif /* AAUDIO_FIXED_BLOCK_ADAPTER_H */
diff --git a/media/libaaudio/src/utility/FixedBlockReader.cpp b/media/libaaudio/src/utility/FixedBlockReader.cpp
new file mode 100644
index 0000000..21ea70e
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockReader.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include "FixedBlockAdapter.h"
+
+#include "FixedBlockReader.h"
+
+
+FixedBlockReader::FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor)
+ : FixedBlockAdapter(fixedBlockProcessor) {
+ mPosition = mSize;
+}
+
+int32_t FixedBlockReader::open(int32_t bytesPerFixedBlock) {
+ int32_t result = FixedBlockAdapter::open(bytesPerFixedBlock);
+ mPosition = mSize; // Indicate no data in storage.
+ return result;
+}
+
+int32_t FixedBlockReader::readFromStorage(uint8_t *buffer, int32_t numBytes) {
+ int32_t bytesToRead = numBytes;
+ int32_t dataAvailable = mSize - mPosition;
+ if (bytesToRead > dataAvailable) {
+ bytesToRead = dataAvailable;
+ }
+ memcpy(buffer, mStorage + mPosition, bytesToRead);
+ mPosition += bytesToRead;
+ return bytesToRead;
+}
+
+int32_t FixedBlockReader::processVariableBlock(uint8_t *buffer, int32_t numBytes) {
+ int32_t result = 0;
+ int32_t bytesLeft = numBytes;
+ while(bytesLeft > 0 && result == 0) {
+ if (mPosition < mSize) {
+ // Use up bytes currently in storage.
+ int32_t bytesRead = readFromStorage(buffer, bytesLeft);
+ buffer += bytesRead;
+ bytesLeft -= bytesRead;
+ } else if (bytesLeft >= mSize) {
+ // Read through if enough for a complete block.
+ result = mFixedBlockProcessor.onProcessFixedBlock(buffer, mSize);
+ buffer += mSize;
+ bytesLeft -= mSize;
+ } else {
+ // Just need a partial block so we have to use storage.
+ result = mFixedBlockProcessor.onProcessFixedBlock(mStorage, mSize);
+ mPosition = 0;
+ }
+ }
+ return result;
+}
+
diff --git a/media/libaaudio/src/utility/FixedBlockReader.h b/media/libaaudio/src/utility/FixedBlockReader.h
new file mode 100644
index 0000000..128dd52
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockReader.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_READER_H
+#define AAUDIO_FIXED_BLOCK_READER_H
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+/**
+ * Read from a fixed-size block to a variable sized block.
+ *
+ * This can be used to convert a pull data flow from fixed sized buffers to variable sized buffers.
+ * An example would be an audio output callback that reads from the app.
+ */
+class FixedBlockReader : public FixedBlockAdapter
+{
+public:
+ FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
+
+ virtual ~FixedBlockReader() = default;
+
+ int32_t open(int32_t bytesPerFixedBlock) override;
+
+ int32_t readFromStorage(uint8_t *buffer, int32_t numBytes);
+
+ /**
+ * Read into a variable sized block.
+ */
+ int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) override;
+};
+
+
+#endif /* AAUDIO_FIXED_BLOCK_READER_H */
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.cpp b/media/libaaudio/src/utility/FixedBlockWriter.cpp
new file mode 100644
index 0000000..2ce8046
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockWriter.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include "FixedBlockAdapter.h"
+#include "FixedBlockWriter.h"
+
+FixedBlockWriter::FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor)
+ : FixedBlockAdapter(fixedBlockProcessor) {}
+
+
+int32_t FixedBlockWriter::writeToStorage(uint8_t *buffer, int32_t numBytes) {
+ int32_t bytesToStore = numBytes;
+ int32_t roomAvailable = mSize - mPosition;
+ if (bytesToStore > roomAvailable) {
+ bytesToStore = roomAvailable;
+ }
+ memcpy(mStorage + mPosition, buffer, bytesToStore);
+ mPosition += bytesToStore;
+ return bytesToStore;
+}
+
+int32_t FixedBlockWriter::processVariableBlock(uint8_t *buffer, int32_t numBytes) {
+ int32_t result = 0;
+ int32_t bytesLeft = numBytes;
+
+ // If we already have data in storage then add to it.
+ if (mPosition > 0) {
+ int32_t bytesWritten = writeToStorage(buffer, bytesLeft);
+ buffer += bytesWritten;
+ bytesLeft -= bytesWritten;
+ // If storage full then flush it out
+ if (mPosition == mSize) {
+ result = mFixedBlockProcessor.onProcessFixedBlock(mStorage, mSize);
+ mPosition = 0;
+ }
+ }
+
+ // Write through if enough for a complete block.
+ while(bytesLeft > mSize && result == 0) {
+ result = mFixedBlockProcessor.onProcessFixedBlock(buffer, mSize);
+ buffer += mSize;
+ bytesLeft -= mSize;
+ }
+
+ // Save any remaining partial block for next time.
+ if (bytesLeft > 0) {
+ writeToStorage(buffer, bytesLeft);
+ }
+
+ return result;
+}
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.h b/media/libaaudio/src/utility/FixedBlockWriter.h
new file mode 100644
index 0000000..f1d917c
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockWriter.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_WRITER_H
+#define AAUDIO_FIXED_BLOCK_WRITER_H
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+/**
+ * This can be used to convert a push data flow from variable sized buffers to fixed sized buffers.
+ * An example would be an audio input callback.
+ */
+class FixedBlockWriter : public FixedBlockAdapter
+{
+public:
+ FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
+
+ virtual ~FixedBlockWriter() = default;
+
+ int32_t writeToStorage(uint8_t *buffer, int32_t numBytes);
+
+ /**
+ * Write from a variable sized block.
+ */
+ int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) override;
+};
+
+#endif /* AAUDIO_FIXED_BLOCK_WRITER_H */
diff --git a/media/libaaudio/src/utility/HandleTracker.cpp b/media/libaaudio/src/utility/HandleTracker.cpp
new file mode 100644
index 0000000..f957234
--- /dev/null
+++ b/media/libaaudio/src/utility/HandleTracker.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <new>
+#include <stdint.h>
+#include <utils/Mutex.h>
+
+#include <aaudio/AAudio.h>
+#include "HandleTracker.h"
+
+using android::Mutex;
+
+// Handle format is: tgggiiii
+// where each letter is 4 bits, t=type, g=generation, i=index
+
+#define TYPE_SIZE 4
+#define GENERATION_SIZE 12
+#define INDEX_SIZE 16
+
+#define GENERATION_INVALID 0
+#define GENERATION_SHIFT INDEX_SIZE
+
+#define TYPE_MASK ((1 << TYPE_SIZE) - 1)
+#define GENERATION_MASK ((1 << GENERATION_SIZE) - 1)
+#define INDEX_MASK ((1 << INDEX_SIZE) - 1)
+
+#define SLOT_UNAVAILABLE (-1)
+
+// Error if handle is negative so type is limited to bottom half.
+#define HANDLE_INVALID_TYPE TYPE_MASK
+
+static_assert(HANDLE_TRACKER_MAX_TYPES == (1 << (TYPE_SIZE - 1)),
+ "Mismatch between header and cpp.");
+static_assert(HANDLE_TRACKER_MAX_HANDLES == (1 << (INDEX_SIZE)),
+ "Mismatch between header and cpp.");
+
+HandleTracker::HandleTracker(uint32_t maxHandles)
+ : mMaxHandleCount(maxHandles)
+ , mHandleHeaders(nullptr)
+{
+ assert(maxHandles <= HANDLE_TRACKER_MAX_HANDLES);
+ // Allocate arrays to hold addresses and validation info.
+ mHandleAddresses = (handle_tracker_address_t *)
+ new(std::nothrow) handle_tracker_address_t[maxHandles];
+ if (mHandleAddresses != nullptr) {
+ mHandleHeaders = new(std::nothrow) handle_tracker_header_t[maxHandles];
+
+ if (mHandleHeaders != nullptr) {
+ handle_tracker_header_t initialHeader = buildHeader(0, 1);
+ // Initialize linked list of free nodes. nullptr terminated.
+ for (uint32_t i = 0; i < (maxHandles - 1); i++) {
+ mHandleAddresses[i] = &mHandleAddresses[i + 1]; // point to next node
+ mHandleHeaders[i] = initialHeader;
+ }
+ mNextFreeAddress = &mHandleAddresses[0];
+ mHandleAddresses[maxHandles - 1] = nullptr;
+ mHandleHeaders[maxHandles - 1] = 0;
+ } else {
+ delete[] mHandleAddresses; // so the class appears uninitialized
+ mHandleAddresses = nullptr;
+ }
+ }
+}
+
+HandleTracker::~HandleTracker()
+{
+ Mutex::Autolock _l(mLock);
+ delete[] mHandleAddresses;
+ delete[] mHandleHeaders;
+ mHandleAddresses = nullptr;
+}
+
+bool HandleTracker::isInitialized() const {
+ return mHandleAddresses != nullptr;
+}
+
+handle_tracker_slot_t HandleTracker::allocateSlot_l() {
+ void **allocated = mNextFreeAddress;
+ if (allocated == nullptr) {
+ return SLOT_UNAVAILABLE;
+ }
+ // Remove this slot from the head of the linked list.
+ mNextFreeAddress = (void **) *allocated;
+ return (allocated - mHandleAddresses);
+}
+
+handle_tracker_generation_t HandleTracker::nextGeneration_l(handle_tracker_slot_t index) {
+ handle_tracker_generation_t generation = (mHandleHeaders[index] + 1) & GENERATION_MASK;
+ // Avoid generation zero so that 0x0 is not a valid handle.
+ if (generation == GENERATION_INVALID) {
+ generation++;
+ }
+ return generation;
+}
+
+aaudio_handle_t HandleTracker::put(handle_tracker_type_t type, void *address)
+{
+ if (type < 0 || type >= HANDLE_TRACKER_MAX_TYPES) {
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_OUT_OF_RANGE);
+ }
+ if (!isInitialized()) {
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_NO_MEMORY);
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ // Find an empty slot.
+ handle_tracker_slot_t index = allocateSlot_l();
+ if (index == SLOT_UNAVAILABLE) {
+ ALOGE("HandleTracker::put() no room for more handles");
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_NO_FREE_HANDLES);
+ }
+
+ // Cycle the generation counter so stale handles can be detected.
+ handle_tracker_generation_t generation = nextGeneration_l(index); // reads header table
+ handle_tracker_header_t inputHeader = buildHeader(type, generation);
+
+ // These two writes may need to be observed by other threads or cores during get().
+ mHandleHeaders[index] = inputHeader;
+ mHandleAddresses[index] = address;
+ // TODO use store release to enforce memory order with get()
+
+ // Generate a handle.
+ aaudio_handle_t handle = buildHandle(inputHeader, index);
+
+ ALOGV("HandleTracker::put(%p) returns 0x%08x", address, handle);
+ return handle;
+}
+
+handle_tracker_slot_t HandleTracker::handleToIndex(handle_tracker_type_t type,
+ aaudio_handle_t handle) const
+{
+ // Validate the handle.
+ handle_tracker_slot_t index = extractIndex(handle);
+ if (index >= mMaxHandleCount) {
+ ALOGE("HandleTracker::handleToIndex() invalid handle = 0x%08X", handle);
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ handle_tracker_generation_t handleGeneration = extractGeneration(handle);
+ handle_tracker_header_t inputHeader = buildHeader(type, handleGeneration);
+ // We do not need to synchronize this access to mHandleHeaders because it is constant for
+ // the lifetime of the handle.
+ if (inputHeader != mHandleHeaders[index]) {
+ ALOGE("HandleTracker::handleToIndex() inputHeader = 0x%08x != mHandleHeaders[%d] = 0x%08x",
+ inputHeader, index, mHandleHeaders[index]);
+ return static_cast<aaudio_handle_t>(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ return index;
+}
+
+handle_tracker_address_t HandleTracker::get(handle_tracker_type_t type, aaudio_handle_t handle) const
+{
+ if (!isInitialized()) {
+ return nullptr;
+ }
+ handle_tracker_slot_t index = handleToIndex(type, handle);
+ if (index >= 0) {
+ // We do not need to synchronize this access to mHandleHeaders because this slot
+ // is allocated and, therefore, not part of the linked list of free slots.
+ return mHandleAddresses[index];
+ } else {
+ return nullptr;
+ }
+}
+
+handle_tracker_address_t HandleTracker::remove(handle_tracker_type_t type, aaudio_handle_t handle) {
+ if (!isInitialized()) {
+ return nullptr;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ handle_tracker_slot_t index = handleToIndex(type,handle);
+ if (index >= 0) {
+ handle_tracker_address_t address = mHandleAddresses[index];
+
+ // Invalidate the header type but preserve the generation count.
+ handle_tracker_generation_t generation = mHandleHeaders[index] & GENERATION_MASK;
+ handle_tracker_header_t inputHeader = buildHeader(
+ (handle_tracker_type_t) HANDLE_INVALID_TYPE, generation);
+ mHandleHeaders[index] = inputHeader;
+
+ // Add this slot to the head of the linked list.
+ mHandleAddresses[index] = mNextFreeAddress;
+ mNextFreeAddress = (handle_tracker_address_t *) &mHandleAddresses[index];
+ return address;
+ } else {
+ return nullptr;
+ }
+}
+
+aaudio_handle_t HandleTracker::buildHandle(handle_tracker_header_t typeGeneration,
+ handle_tracker_slot_t index) {
+ return (aaudio_handle_t)((typeGeneration << GENERATION_SHIFT) | (index & INDEX_MASK));
+}
+
+handle_tracker_header_t HandleTracker::buildHeader(handle_tracker_type_t type,
+ handle_tracker_generation_t generation)
+{
+ return (handle_tracker_header_t) (((type & TYPE_MASK) << GENERATION_SIZE)
+ | (generation & GENERATION_MASK));
+}
+
+handle_tracker_slot_t HandleTracker::extractIndex(aaudio_handle_t handle)
+{
+ return handle & INDEX_MASK;
+}
+
+handle_tracker_generation_t HandleTracker::extractGeneration(aaudio_handle_t handle)
+{
+ return (handle >> GENERATION_SHIFT) & GENERATION_MASK;
+}
diff --git a/media/libaaudio/src/utility/HandleTracker.h b/media/libaaudio/src/utility/HandleTracker.h
new file mode 100644
index 0000000..23a73ed
--- /dev/null
+++ b/media/libaaudio/src/utility/HandleTracker.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_HANDLE_TRACKER_H
+#define UTILITY_HANDLE_TRACKER_H
+
+#include <stdint.h>
+#include <utils/Mutex.h>
+
+typedef int32_t aaudio_handle_t;
+typedef int32_t handle_tracker_type_t; // what kind of handle
+typedef int32_t handle_tracker_slot_t; // index in allocation table
+typedef int32_t handle_tracker_generation_t; // incremented when slot used
+typedef uint16_t handle_tracker_header_t; // combines type and generation
+typedef void *handle_tracker_address_t; // address of something that is stored here
+
+#define HANDLE_TRACKER_MAX_TYPES (1 << 3)
+#define HANDLE_TRACKER_MAX_HANDLES (1 << 16)
+
+/**
+ * Represent Objects using an integer handle that can be used with Java.
+ * This also makes the 'C' ABI more robust.
+ *
+ * Note that this should only be called from a single thread.
+ * If you call it from more than one thread then you need to use your own mutex.
+ */
+class HandleTracker {
+
+public:
+ /**
+ * @param maxHandles cannot exceed HANDLE_TRACKER_MAX_HANDLES
+ */
+ HandleTracker(uint32_t maxHandles = 256);
+ virtual ~HandleTracker();
+
+ /**
+ * Don't use if this returns false;
+ * @return true if the internal allocation succeeded
+ */
+ bool isInitialized() const;
+
+ /**
+ * Store a pointer and return a handle that can be used to retrieve the pointer.
+ *
+ * It is safe to call put() or remove() from multiple threads.
+ *
+ * @param expectedType the type of the object to be tracked
+ * @param address pointer to be converted to a handle
+ * @return a valid handle or a negative error
+ */
+ aaudio_handle_t put(handle_tracker_type_t expectedType, handle_tracker_address_t address);
+
+ /**
+ * Get the original pointer associated with the handle.
+ * The handle will be validated to prevent stale handles from being reused.
+ * Note that the validation is designed to prevent common coding errors and not
+ * to prevent deliberate hacking.
+ *
+ * @param expectedType shouldmatch the type we passed to put()
+ * @param handle to be converted to a pointer
+ * @return address associated with handle or nullptr
+ */
+ handle_tracker_address_t get(handle_tracker_type_t expectedType, aaudio_handle_t handle) const;
+
+ /**
+ * Free up the storage associated with the handle.
+ * Subsequent attempts to use the handle will fail.
+ *
+ * Do NOT remove() a handle while get() is being called for the same handle from another thread.
+ *
+ * @param expectedType shouldmatch the type we passed to put()
+ * @param handle to be removed from tracking
+ * @return address associated with handle or nullptr if not found
+ */
+ handle_tracker_address_t remove(handle_tracker_type_t expectedType, aaudio_handle_t handle);
+
+private:
+ const int32_t mMaxHandleCount; // size of array
+ // This address is const after initialization.
+ handle_tracker_address_t * mHandleAddresses; // address of objects or a free linked list node
+ // This address is const after initialization.
+ handle_tracker_header_t * mHandleHeaders; // combination of type and generation
+ // head of the linked list of free nodes in mHandleAddresses
+ handle_tracker_address_t * mNextFreeAddress;
+
+ // This Mutex protects the linked list of free nodes.
+ // The list is managed using mHandleAddresses and mNextFreeAddress.
+ // The data in mHandleHeaders is only changed by put() and remove().
+ android::Mutex mLock;
+
+ /**
+ * Pull slot off of a list of empty slots.
+ * @return index or a negative error
+ */
+ handle_tracker_slot_t allocateSlot_l();
+
+ /**
+ * Increment the generation for the slot, avoiding zero.
+ */
+ handle_tracker_generation_t nextGeneration_l(handle_tracker_slot_t index);
+
+ /**
+ * Validate the handle and return the corresponding index.
+ * @return slot index or a negative error
+ */
+ handle_tracker_slot_t handleToIndex(aaudio_handle_t handle, handle_tracker_type_t type) const;
+
+ /**
+ * Construct a handle from a header and an index.
+ * @param header combination of a type and a generation
+ * @param index slot index returned from allocateSlot
+ * @return handle or a negative error
+ */
+ static aaudio_handle_t buildHandle(handle_tracker_header_t header, handle_tracker_slot_t index);
+
+ /**
+ * Combine a type and a generation field into a header.
+ */
+ static handle_tracker_header_t buildHeader(handle_tracker_type_t type,
+ handle_tracker_generation_t generation);
+
+ /**
+ * Extract the index from a handle.
+ * Does not validate the handle.
+ * @return index associated with a handle
+ */
+ static handle_tracker_slot_t extractIndex(aaudio_handle_t handle);
+
+ /**
+ * Extract the generation from a handle.
+ * Does not validate the handle.
+ * @return generation associated with a handle
+ */
+ static handle_tracker_generation_t extractGeneration(aaudio_handle_t handle);
+
+};
+
+#endif //UTILITY_HANDLE_TRACKER_H
diff --git a/media/libaaudio/src/utility/LinearRamp.cpp b/media/libaaudio/src/utility/LinearRamp.cpp
new file mode 100644
index 0000000..1714bbf
--- /dev/null
+++ b/media/libaaudio/src/utility/LinearRamp.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LinearRamp.h"
+
+bool LinearRamp::isRamping() {
+ float target = mTarget.load();
+ if (target != mLevelTo) {
+ // Update target. Continue from previous level.
+ mLevelTo = target;
+ mRemaining = mLengthInFrames;
+ return true;
+ } else {
+ return mRemaining > 0;
+ }
+}
+
+bool LinearRamp::nextSegment(int32_t frames, float *levelFrom, float *levelTo) {
+ bool ramping = isRamping();
+ *levelFrom = mLevelFrom;
+ if (ramping) {
+ float level;
+ if (frames >= mRemaining) {
+ level = mLevelTo;
+ mRemaining = 0;
+ } else {
+ // Interpolate to a point along the full ramp.
+ level = mLevelFrom + (frames * (mLevelTo - mLevelFrom) / mRemaining);
+ mRemaining -= frames;
+ }
+ mLevelFrom = level; // for next ramp
+ *levelTo = level;
+ } else {
+ *levelTo = mLevelTo;
+ }
+ return ramping;
+}
\ No newline at end of file
diff --git a/media/libaaudio/src/utility/LinearRamp.h b/media/libaaudio/src/utility/LinearRamp.h
new file mode 100644
index 0000000..ff09dce
--- /dev/null
+++ b/media/libaaudio/src/utility/LinearRamp.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_LINEAR_RAMP_H
+#define AAUDIO_LINEAR_RAMP_H
+
+#include <atomic>
+#include <stdint.h>
+
+/**
+ * Generate segments along a linear ramp.
+ * The ramp target can be updated from another thread.
+ * When the target is updated, a new ramp is started from the current position.
+ *
+ * The first ramp starts at 0.0.
+ *
+ */
+class LinearRamp {
+public:
+ LinearRamp() {
+ mTarget.store(1.0f);
+ }
+
+ void setLengthInFrames(int32_t frames) {
+ mLengthInFrames = frames;
+ }
+
+ int32_t getLengthInFrames() {
+ return mLengthInFrames;
+ }
+
+ /**
+ * This may be called by another thread.
+ * @param target
+ */
+ void setTarget(float target) {
+ mTarget.store(target);
+ }
+
+ float getTarget() {
+ return mTarget.load();
+ }
+
+ /**
+ * Force the nextSegment to start from this level.
+ *
+ * WARNING: this can cause a discontinuity if called while the ramp is being used.
+ * Only call this when setting the initial ramp.
+ *
+ * @param level
+ */
+ void forceCurrent(float level) {
+ mLevelFrom = level;
+ mLevelTo = level; // forces a ramp if it does not match target
+ }
+
+ float getCurrent() {
+ return mLevelFrom;
+ }
+
+ /**
+ * Get levels for next ramp segment.
+ *
+ * @param frames number of frames in the segment
+ * @param levelFrom pointer to starting amplitude
+ * @param levelTo pointer to ending amplitude
+ * @return true if ramp is still moving towards the target
+ */
+ bool nextSegment(int32_t frames, float *levelFrom, float *levelTo);
+
+private:
+
+ bool isRamping();
+
+ std::atomic<float> mTarget;
+
+ int32_t mLengthInFrames = 48000 / 50; // 20 msec at 48000 Hz
+ int32_t mRemaining = 0;
+ float mLevelFrom = 0.0f;
+ float mLevelTo = 0.0f;
+};
+
+
+#endif //AAUDIO_LINEAR_RAMP_H
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
new file mode 100644
index 0000000..81d7f89
--- /dev/null
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef UTILITY_MONOTONIC_COUNTER_H
+#define UTILITY_MONOTONIC_COUNTER_H
+
+#include <stdint.h>
+
+/**
+ * Maintain a 64-bit monotonic counter.
+ * Can be used to track a 32-bit counter that wraps or gets reset.
+ *
+ * Note that this is not atomic and has no interior locks.
+ * A caller will need to provide their own exterior locking
+ * if they need to use it from multiple threads.
+ */
+class MonotonicCounter {
+
+public:
+ MonotonicCounter() {};
+ virtual ~MonotonicCounter() {};
+
+ /**
+ * @return current value of the counter
+ */
+ int64_t get() const {
+ return mCounter64;
+ }
+
+ /**
+ * Advance the counter if delta is positive.
+ * @return current value of the counter
+ */
+ int64_t increment(int64_t delta) {
+ if (delta > 0) {
+ mCounter64 += delta;
+ }
+ return mCounter64;
+ }
+
+ /**
+ * Advance the 64-bit counter if (current32 - previousCurrent32) > 0.
+ * This can be used to convert a 32-bit counter that may be wrapping into
+ * a monotonic 64-bit counter.
+ *
+ * This counter32 should NOT be allowed to advance by more than 0x7FFFFFFF between calls.
+ * Think of the wrapping counter like a sine wave. If the frequency of the signal
+ * is more than half the sampling rate (Nyquist rate) then you cannot measure it properly.
+ * If the counter wraps around every 24 hours then we should measure it with a period
+ * of less than 12 hours.
+ *
+ * @return current value of the 64-bit counter
+ */
+ int64_t update32(int32_t counter32) {
+ int32_t delta = counter32 - mCounter32;
+ // protect against the mCounter64 going backwards
+ if (delta > 0) {
+ mCounter64 += delta;
+ mCounter32 = counter32;
+ }
+ return mCounter64;
+ }
+
+ /**
+ * Reset the stored value of the 32-bit counter.
+ * This is used if your counter32 has been reset to zero.
+ */
+ void reset32() {
+ mCounter32 = 0;
+ }
+
+private:
+ int64_t mCounter64 = 0;
+ int32_t mCounter32 = 0;
+};
+
+
+#endif //UTILITY_MONOTONIC_COUNTER_H
diff --git a/media/libaaudio/src/utility/README.md b/media/libaaudio/src/utility/README.md
new file mode 100644
index 0000000..0ac74ea
--- /dev/null
+++ b/media/libaaudio/src/utility/README.md
@@ -0,0 +1,3 @@
+The utility folder contains things that may be shared between the AAudio client and server.
+They might also be handy outside AAudio.
+They generally do not depend on AAudio functionality.
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
new file mode 100644
index 0000000..afcdebf
--- /dev/null
+++ b/media/libaaudio/tests/Android.mk
@@ -0,0 +1,61 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src
+LOCAL_SRC_FILES:= test_handle_tracker.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_handle_tracker
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src
+LOCAL_SRC_FILES:= test_marshalling.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_aaudio_marshalling
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src
+LOCAL_SRC_FILES:= test_block_adapter.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_block_adapter
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src
+LOCAL_SRC_FILES:= test_linear_ramp.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_linear_ramp
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src
+LOCAL_SRC_FILES:= test_open_params.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils libaudiomanager
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_open_params
+include $(BUILD_NATIVE_TEST)
diff --git a/media/libaaudio/tests/test_block_adapter.cpp b/media/libaaudio/tests/test_block_adapter.cpp
new file mode 100644
index 0000000..a22abb9
--- /dev/null
+++ b/media/libaaudio/tests/test_block_adapter.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include "utility/FixedBlockAdapter.h"
+#include "utility/FixedBlockWriter.h"
+#include "utility/FixedBlockReader.h"
+
+#define FIXED_BLOCK_SIZE 47
+#define TEST_BUFFER_SIZE 103
+
+// Pass varying sized blocks.
+// Frames contain a sequential index, which are easily checked.
+class TestBlockAdapter {
+public:
+ TestBlockAdapter()
+ : mTestIndex(0), mLastIndex(0) {
+ }
+
+ ~TestBlockAdapter() = default;
+
+ void fillSequence(int32_t *indexBuffer, int32_t frameCount) {
+ ASSERT_LE(frameCount, TEST_BUFFER_SIZE);
+ for (int i = 0; i < frameCount; i++) {
+ indexBuffer[i] = mLastIndex++;
+ }
+ }
+
+ int checkSequence(const int32_t *indexBuffer, int32_t frameCount) {
+ // This is equivalent to calling an output callback.
+ for (int i = 0; i < frameCount; i++) {
+ int32_t expected = mTestIndex++;
+ int32_t actual = indexBuffer[i];
+ EXPECT_EQ(expected, actual);
+ if (actual != expected) {
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ int32_t mTestBuffer[TEST_BUFFER_SIZE];
+ int32_t mTestIndex;
+ int32_t mLastIndex;
+};
+
+class TestBlockWriter : public TestBlockAdapter, FixedBlockProcessor {
+public:
+ TestBlockWriter()
+ : mFixedBlockWriter(*this) {
+ mFixedBlockWriter.open(sizeof(int32_t) * FIXED_BLOCK_SIZE);
+ }
+
+ ~TestBlockWriter() {
+ mFixedBlockWriter.close();
+ }
+
+ int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override {
+ int32_t frameCount = numBytes / sizeof(int32_t);
+ return checkSequence((int32_t *) buffer, frameCount);
+ }
+
+ // Simulate audio input from a variable sized callback.
+ int32_t testInputWrite(int32_t variableCount) {
+ fillSequence(mTestBuffer, variableCount);
+ int32_t sizeBytes = variableCount * sizeof(int32_t);
+ return mFixedBlockWriter.processVariableBlock((uint8_t *) mTestBuffer, sizeBytes);
+ }
+
+private:
+ FixedBlockWriter mFixedBlockWriter;
+};
+
+class TestBlockReader : public TestBlockAdapter, FixedBlockProcessor {
+public:
+ TestBlockReader()
+ : mFixedBlockReader(*this) {
+ mFixedBlockReader.open(sizeof(int32_t) * FIXED_BLOCK_SIZE);
+ }
+
+ ~TestBlockReader() {
+ mFixedBlockReader.close();
+ }
+
+ int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override {
+ int32_t frameCount = numBytes / sizeof(int32_t);
+ fillSequence((int32_t *) buffer, frameCount);
+ return 0;
+ }
+
+ // Simulate audio output from a variable sized callback.
+ int32_t testOutputRead(int32_t variableCount) {
+ int32_t sizeBytes = variableCount * sizeof(int32_t);
+ int32_t result = mFixedBlockReader.processVariableBlock((uint8_t *) mTestBuffer, sizeBytes);
+ if (result >= 0) {
+ result = checkSequence((int32_t *)mTestBuffer, variableCount);
+ }
+ return result;
+ }
+
+private:
+ FixedBlockReader mFixedBlockReader;
+};
+
+
+TEST(test_block_adapter, block_adapter_write) {
+ TestBlockWriter tester;
+ int result = 0;
+ const int numLoops = 1000;
+
+ for (int i = 0; i<numLoops && result == 0; i++) {
+ long r = random();
+ int32_t size = (r % TEST_BUFFER_SIZE);
+ ASSERT_LE(size, TEST_BUFFER_SIZE);
+ ASSERT_GE(size, 0);
+ result = tester.testInputWrite(size);
+ }
+ ASSERT_EQ(0, result);
+}
+
+TEST(test_block_adapter, block_adapter_read) {
+ TestBlockReader tester;
+ int result = 0;
+ const int numLoops = 1000;
+
+ for (int i = 0; i < numLoops && result == 0; i++) {
+ long r = random();
+ int32_t size = (r % TEST_BUFFER_SIZE);
+ ASSERT_LE(size, TEST_BUFFER_SIZE);
+ ASSERT_GE(size, 0);
+ result = tester.testOutputRead(size);
+ }
+ ASSERT_EQ(0, result);
+};
+
diff --git a/media/libaaudio/tests/test_handle_tracker.cpp b/media/libaaudio/tests/test_handle_tracker.cpp
new file mode 100644
index 0000000..c4db47a
--- /dev/null
+++ b/media/libaaudio/tests/test_handle_tracker.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for AAudio Handle Tracker
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <gtest/gtest.h>
+
+#include <aaudio/AAudio.h>
+#include "utility/HandleTracker.h"
+
+// Test adding one address.
+TEST(test_handle_tracker, aaudio_handle_tracker) {
+ const int MAX_HANDLES = 4;
+ HandleTracker tracker(MAX_HANDLES);
+ handle_tracker_type_t type = 3; // arbitrary generic type
+ int data; // something that has an address we can use
+ handle_tracker_address_t found;
+
+ // repeat the test several times to see if it breaks
+ const int SEVERAL = 5; // arbitrary
+ for (int i = 0; i < SEVERAL; i++) {
+ // should fail to find a bogus handle
+ found = tracker.get(type, 0); // bad handle
+ EXPECT_EQ(nullptr, found);
+
+ // create a valid handle and use it to lookup the object again
+ aaudio_handle_t dataHandle = tracker.put(type, &data);
+ ASSERT_TRUE(dataHandle > 0);
+ found = tracker.get(type, dataHandle);
+ EXPECT_EQ(&data, found);
+ found = tracker.get(type, 0); // bad handle
+ EXPECT_EQ(nullptr, found);
+
+ // wrong type
+ found = tracker.get(type+1, dataHandle);
+ EXPECT_EQ(nullptr, found);
+
+ // remove from storage
+ found = tracker.remove(type, dataHandle);
+ EXPECT_EQ(&data, found);
+ // should fail the second time
+ found = tracker.remove(type, dataHandle);
+ EXPECT_EQ(nullptr, found);
+ }
+}
+
+// Test filling the tracker.
+TEST(test_handle_tracker, aaudio_full_up) {
+ const int MAX_HANDLES = 5;
+ HandleTracker tracker(MAX_HANDLES);
+ handle_tracker_type_t type = 4; // arbitrary generic type
+ int data[MAX_HANDLES];
+ aaudio_handle_t handles[MAX_HANDLES];
+ handle_tracker_address_t found;
+
+ // repeat the test several times to see if it breaks
+ const int SEVERAL = 5; // arbitrary
+ for (int i = 0; i < SEVERAL; i++) {
+ for (int i = 0; i < MAX_HANDLES; i++) {
+ // add a handle
+ handles[i] = tracker.put(type, &data[i]);
+ ASSERT_TRUE(handles[i] > 0);
+ found = tracker.get(type, handles[i]);
+ EXPECT_EQ(&data[i], found);
+ }
+
+ // Now that it is full, try to add one more.
+ aaudio_handle_t handle = tracker.put(type, &data[0]);
+ EXPECT_TRUE(handle < 0);
+
+ for (int i = 0; i < MAX_HANDLES; i++) {
+ // look up each handle
+ found = tracker.get(type, handles[i]);
+ EXPECT_EQ(&data[i], found);
+ }
+
+ // remove one from storage
+ found = tracker.remove(type, handles[2]);
+ EXPECT_EQ(&data[2], found);
+ // now try to look up the same handle and fail
+ found = tracker.get(type, handles[2]);
+ EXPECT_EQ(nullptr, found);
+
+ // add that same one back
+ handle = tracker.put(type, &data[2]);
+ ASSERT_TRUE(handle > 0);
+ found = tracker.get(type, handle);
+ EXPECT_EQ(&data[2], found);
+ // now use a stale handle again with a valid index and fail
+ found = tracker.get(type, handles[2]);
+ EXPECT_EQ(nullptr, found);
+
+ // remove them all
+ handles[2] = handle;
+ for (int i = 0; i < MAX_HANDLES; i++) {
+ // look up each handle
+ found = tracker.remove(type, handles[i]);
+ EXPECT_EQ(&data[i], found);
+ }
+ }
+}
diff --git a/media/libaaudio/tests/test_linear_ramp.cpp b/media/libaaudio/tests/test_linear_ramp.cpp
new file mode 100644
index 0000000..5c53982
--- /dev/null
+++ b/media/libaaudio/tests/test_linear_ramp.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include "utility/AAudioUtilities.h"
+#include "utility/LinearRamp.h"
+
+
+TEST(test_linear_ramp, linear_ramp_segments) {
+ LinearRamp ramp;
+ const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
+ float destination[4] = {1.0f, 1.0f, 1.0f, 1.0f };
+
+ float levelFrom = -1.0f;
+ float levelTo = -1.0f;
+ ramp.setLengthInFrames(8);
+ ramp.setTarget(8.0f);
+
+ ASSERT_EQ(8, ramp.getLengthInFrames());
+
+ bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
+ ASSERT_EQ(1, ramping);
+ ASSERT_EQ(0.0f, levelFrom);
+ ASSERT_EQ(4.0f, levelTo);
+
+ AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
+ ASSERT_EQ(0.0f, destination[0]);
+ ASSERT_EQ(1.0f, destination[1]);
+ ASSERT_EQ(2.0f, destination[2]);
+ ASSERT_EQ(3.0f, destination[3]);
+
+ ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
+ ASSERT_EQ(1, ramping);
+ ASSERT_EQ(4.0f, levelFrom);
+ ASSERT_EQ(8.0f, levelTo);
+
+ AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
+ ASSERT_EQ(4.0f, destination[0]);
+ ASSERT_EQ(5.0f, destination[1]);
+ ASSERT_EQ(6.0f, destination[2]);
+ ASSERT_EQ(7.0f, destination[3]);
+
+ ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
+ ASSERT_EQ(0, ramping);
+ ASSERT_EQ(8.0f, levelFrom);
+ ASSERT_EQ(8.0f, levelTo);
+
+ AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
+ ASSERT_EQ(8.0f, destination[0]);
+ ASSERT_EQ(8.0f, destination[1]);
+ ASSERT_EQ(8.0f, destination[2]);
+ ASSERT_EQ(8.0f, destination[3]);
+
+};
+
+
+TEST(test_linear_ramp, linear_ramp_forced) {
+ LinearRamp ramp;
+ const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
+ float destination[4] = {1.0f, 1.0f, 1.0f, 1.0f };
+
+ float levelFrom = -1.0f;
+ float levelTo = -1.0f;
+ ramp.setLengthInFrames(4);
+ ramp.setTarget(8.0f);
+ ramp.forceCurrent(4.0f);
+ ASSERT_EQ(4.0f, ramp.getCurrent());
+
+ bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
+ ASSERT_EQ(1, ramping);
+ ASSERT_EQ(4.0f, levelFrom);
+ ASSERT_EQ(8.0f, levelTo);
+
+ AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
+ ASSERT_EQ(4.0f, destination[0]);
+ ASSERT_EQ(5.0f, destination[1]);
+ ASSERT_EQ(6.0f, destination[2]);
+ ASSERT_EQ(7.0f, destination[3]);
+
+ ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
+ ASSERT_EQ(0, ramping);
+ ASSERT_EQ(8.0f, levelFrom);
+ ASSERT_EQ(8.0f, levelTo);
+
+ AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
+ ASSERT_EQ(8.0f, destination[0]);
+ ASSERT_EQ(8.0f, destination[1]);
+ ASSERT_EQ(8.0f, destination[2]);
+ ASSERT_EQ(8.0f, destination[3]);
+
+};
+
diff --git a/media/libaaudio/tests/test_marshalling.cpp b/media/libaaudio/tests/test_marshalling.cpp
new file mode 100644
index 0000000..79beed6
--- /dev/null
+++ b/media/libaaudio/tests/test_marshalling.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit tests for AAudio Marshalling of RingBuffer information.
+
+#include <stdlib.h>
+#include <math.h>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <cutils/ashmem.h>
+#include <gtest/gtest.h>
+#include <sys/mman.h>
+
+#include <aaudio/AAudio.h>
+#include <binding/AudioEndpointParcelable.h>
+
+using namespace android;
+using namespace aaudio;
+
+// Test adding one value.
+TEST(test_marshalling, aaudio_one_read_write) {
+ Parcel parcel;
+ size_t pos = parcel.dataPosition();
+ const int arbitraryValue = 235;
+ parcel.writeInt32(arbitraryValue);
+ parcel.setDataPosition(pos);
+ int32_t y;
+ parcel.readInt32(&y);
+ EXPECT_EQ(arbitraryValue, y);
+}
+
+// Test SharedMemoryParcel.
+TEST(test_marshalling, aaudio_shared_memory) {
+ SharedMemoryParcelable sharedMemoryA;
+ SharedMemoryParcelable sharedMemoryB;
+ const size_t memSizeBytes = 840;
+ int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+ ASSERT_LE(0, fd);
+ sharedMemoryA.setup(fd, memSizeBytes);
+ void *region1;
+ EXPECT_EQ(AAUDIO_OK, sharedMemoryA.resolve(0, 16, ®ion1)); // fits in region
+ EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(-2, 16, ®ion1)); // offset is negative
+ EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(0, memSizeBytes + 8, ®ion1)); // size too big
+ EXPECT_NE(AAUDIO_OK, sharedMemoryA.resolve(memSizeBytes - 8, 16, ®ion1)); // goes past the end
+ int32_t *buffer1 = (int32_t *)region1;
+ buffer1[0] = 98735; // arbitrary value
+
+ Parcel parcel;
+ size_t pos = parcel.dataPosition();
+ sharedMemoryA.writeToParcel(&parcel);
+
+ parcel.setDataPosition(pos);
+ sharedMemoryB.readFromParcel(&parcel);
+ EXPECT_EQ(sharedMemoryA.getSizeInBytes(), sharedMemoryB.getSizeInBytes());
+
+ // should see same value at two different addresses
+ void *region2;
+ EXPECT_EQ(AAUDIO_OK, sharedMemoryB.resolve(0, 16, ®ion2));
+ int32_t *buffer2 = (int32_t *)region2;
+ EXPECT_NE(buffer1, buffer2);
+ EXPECT_EQ(buffer1[0], buffer2[0]);
+}
+
+// Test SharedRegionParcel.
+TEST(test_marshalling, aaudio_shared_region) {
+ SharedMemoryParcelable sharedMemories[2];
+ SharedRegionParcelable sharedRegionA;
+ SharedRegionParcelable sharedRegionB;
+ const size_t memSizeBytes = 840;
+ int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+ ASSERT_LE(0, fd);
+ sharedMemories[0].setup(fd, memSizeBytes);
+ int32_t regionOffset1 = 32;
+ int32_t regionSize1 = 16;
+ sharedRegionA.setup(0, regionOffset1, regionSize1);
+
+ void *region1;
+ EXPECT_EQ(AAUDIO_OK, sharedRegionA.resolve(sharedMemories, ®ion1));
+ int32_t *buffer1 = (int32_t *)region1;
+ buffer1[0] = 336677; // arbitrary value
+
+ Parcel parcel;
+ size_t pos = parcel.dataPosition();
+ sharedRegionA.writeToParcel(&parcel);
+
+ parcel.setDataPosition(pos);
+ sharedRegionB.readFromParcel(&parcel);
+
+ // should see same value
+ void *region2;
+ EXPECT_EQ(AAUDIO_OK, sharedRegionB.resolve(sharedMemories, ®ion2));
+ int32_t *buffer2 = (int32_t *)region2;
+ EXPECT_EQ(buffer1[0], buffer2[0]);
+}
+
+// Test RingBufferParcelable.
+TEST(test_marshalling, aaudio_ring_buffer_parcelable) {
+ SharedMemoryParcelable sharedMemories[2];
+ RingBufferParcelable ringBufferA;
+ RingBufferParcelable ringBufferB;
+
+ const size_t bytesPerFrame = 8;
+ const size_t framesPerBurst = 32;
+ const size_t dataSizeBytes = 2048;
+ const int32_t counterSizeBytes = sizeof(int64_t);
+ const size_t memSizeBytes = dataSizeBytes + (2 * counterSizeBytes);
+
+ int fd = ashmem_create_region("TestMarshalling", memSizeBytes);
+ ASSERT_LE(0, fd);
+ sharedMemories[0].setup(fd, memSizeBytes);
+
+ int32_t sharedMemoryIndex = 0;
+ // arrange indices and data in the shared memory
+ int32_t readOffset = 0;
+ int32_t writeOffset = readOffset + counterSizeBytes;
+ int32_t dataOffset = writeOffset + counterSizeBytes;
+ ringBufferA.setupMemory(sharedMemoryIndex, dataOffset, dataSizeBytes,
+ readOffset, writeOffset, counterSizeBytes);
+ ringBufferA.setFramesPerBurst(framesPerBurst);
+ ringBufferA.setBytesPerFrame(bytesPerFrame);
+ ringBufferA.setCapacityInFrames(dataSizeBytes / bytesPerFrame);
+
+ // setup A
+ RingBufferDescriptor descriptorA;
+ EXPECT_EQ(AAUDIO_OK, ringBufferA.resolve(sharedMemories, &descriptorA));
+ descriptorA.dataAddress[0] = 95;
+ descriptorA.dataAddress[1] = 57;
+ descriptorA.readCounterAddress[0] = 17;
+ descriptorA.writeCounterAddress[0] = 39;
+
+ // write A to parcel
+ Parcel parcel;
+ size_t pos = parcel.dataPosition();
+ ringBufferA.writeToParcel(&parcel);
+
+ // read B from parcel
+ parcel.setDataPosition(pos);
+ ringBufferB.readFromParcel(&parcel);
+
+ RingBufferDescriptor descriptorB;
+ EXPECT_EQ(AAUDIO_OK, ringBufferB.resolve(sharedMemories, &descriptorB));
+
+ // A and B should match
+ EXPECT_EQ(descriptorA.dataAddress[0], descriptorB.dataAddress[0]);
+ EXPECT_EQ(descriptorA.dataAddress[1], descriptorB.dataAddress[1]);
+ EXPECT_EQ(descriptorA.readCounterAddress[0], descriptorB.readCounterAddress[0]);
+ EXPECT_EQ(descriptorA.writeCounterAddress[0], descriptorB.writeCounterAddress[0]);
+
+ EXPECT_EQ(ringBufferA.getFramesPerBurst(), ringBufferB.getFramesPerBurst());
+ EXPECT_EQ(ringBufferA.getBytesPerFrame(), ringBufferB.getBytesPerFrame());
+ EXPECT_EQ(ringBufferA.getCapacityInFrames(), ringBufferB.getCapacityInFrames());
+}
diff --git a/media/libaaudio/tests/test_open_params.cpp b/media/libaaudio/tests/test_open_params.cpp
new file mode 100644
index 0000000..01b8799
--- /dev/null
+++ b/media/libaaudio/tests/test_open_params.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using AAudio.
+
+#include <stdio.h>
+//#include <stdlib.h>
+//#include <math.h>
+
+#include <android-base/macros.h>
+#include <aaudio/AAudio.h>
+
+#include <gtest/gtest.h>
+
+static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+ const char *modeText = "unknown";
+ switch (mode) {
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ modeText = "EXCLUSIVE";
+ break;
+ case AAUDIO_SHARING_MODE_SHARED:
+ modeText = "SHARED";
+ break;
+ default:
+ break;
+ }
+ return modeText;
+}
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t MyDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+) {
+ (void) stream;
+ (void) userData;
+ (void) audioData;
+ (void) numFrames;
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+static void testOpenOptions(aaudio_direction_t direction,
+ int32_t channelCount,
+ int32_t sampleRate,
+ aaudio_format_t format) {
+
+ aaudio_result_t result = AAUDIO_OK;
+
+ int32_t bufferCapacity;
+ int32_t framesPerBurst = 0;
+
+ int32_t actualChannelCount = 0;
+ int32_t actualSampleRate = 0;
+ aaudio_format_t actualDataFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ aaudio_direction_t actualDirection;
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ printf("TestOpen: dir = %d, chans = %3d, rate = %6d format = %d\n",
+ direction, channelCount, sampleRate, format);
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ ASSERT_EQ(AAUDIO_OK, AAudio_createStreamBuilder(&aaudioBuilder));
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
+ AAudioStreamBuilder_setSampleRate(aaudioBuilder, sampleRate);
+ AAudioStreamBuilder_setChannelCount(aaudioBuilder, channelCount);
+ AAudioStreamBuilder_setFormat(aaudioBuilder, format);
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, MyDataCallbackProc, nullptr);
+
+ //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_NONE);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+ //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_POWER_SAVING);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("Stream not opened! That may be OK.\n");
+ goto finish;
+ }
+
+ // Check to see what kind of stream we actually got.
+ actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
+ actualChannelCount = AAudioStream_getChannelCount(aaudioStream);
+ actualDataFormat = AAudioStream_getFormat(aaudioStream);
+ actualDirection = AAudioStream_getDirection(aaudioStream);
+
+ printf(" dir = %d, chans = %3d, rate = %6d format = %d\n",
+ direction, actualChannelCount, actualSampleRate, actualDataFormat);
+
+ // If we ask for something specific then we should get that.
+ if (channelCount != AAUDIO_UNSPECIFIED) {
+ EXPECT_EQ(channelCount, actualChannelCount);
+ }
+ if (sampleRate != AAUDIO_UNSPECIFIED) {
+ EXPECT_EQ(sampleRate, actualSampleRate);
+ }
+ if (format != AAUDIO_FORMAT_UNSPECIFIED) {
+ EXPECT_EQ(format, actualDataFormat);
+ }
+ EXPECT_EQ(direction, actualDirection);
+
+ // This is the number of frames that are read in one chunk by a DMA controller
+ // or a DSP or a mixer.
+ framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
+ bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
+ printf(" bufferCapacity = %d, remainder = %d\n",
+ bufferCapacity, bufferCapacity % framesPerBurst);
+
+finish:
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+ printf(" result = %d = %s\n", result, AAudio_convertResultToText(result));
+}
+
+//void foo() { // for tricking the Android Studio formatter
+TEST(test_open_params, aaudio_open_all) {
+ aaudio_direction_t directions[] = {AAUDIO_DIRECTION_OUTPUT, AAUDIO_DIRECTION_INPUT};
+ aaudio_format_t formats[] = {AAUDIO_FORMAT_UNSPECIFIED,
+ AAUDIO_FORMAT_PCM_I16, AAUDIO_FORMAT_PCM_FLOAT};
+ int32_t rates[] = {AAUDIO_UNSPECIFIED, 22050, 32000, 44100, 48000, 88200, 96000, 37913, 59132};
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ for (uint dirIndex = 0;dirIndex < arraysize(directions); dirIndex++) {
+ aaudio_direction_t direction = directions[dirIndex];
+ for (int32_t channelCount = 0; channelCount <= 8; channelCount++) {
+ testOpenOptions(direction, channelCount,
+ AAUDIO_UNSPECIFIED, AAUDIO_FORMAT_UNSPECIFIED);
+ }
+ for (uint i = 0; i < arraysize(rates); i++) {
+ testOpenOptions(direction, AAUDIO_UNSPECIFIED, rates[i], AAUDIO_FORMAT_UNSPECIFIED);
+ }
+ for (uint i = 0; i < arraysize(formats); i++) {
+ testOpenOptions(direction, AAUDIO_UNSPECIFIED, AAUDIO_UNSPECIFIED, formats[i]);
+ }
+ }
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index d5c9217..61c946c 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -1,3 +1,9 @@
+cc_library_headers {
+ name: "libaudioclient_headers",
+ vendor_available: true,
+ export_include_dirs: ["include"],
+}
+
cc_library_shared {
name: "libaudioclient",
srcs: [
@@ -16,6 +22,8 @@
"IEffect.cpp",
"IEffectClient.cpp",
"ToneGenerator.cpp",
+ "PlayerBase.cpp",
+ "TrackPlayerBase.cpp",
],
shared_libs: [
"liblog",
@@ -24,16 +32,22 @@
"libbinder",
"libdl",
"libaudioutils",
+ "libaudiomanager",
],
export_shared_lib_headers: ["libbinder"],
+
+ local_include_dirs: ["include/media"],
+ header_libs: ["libaudioclient_headers"],
+ export_header_lib_headers: ["libaudioclient_headers"],
+
// for memory heap analysis
static_libs: [
"libc_malloc_debug_backtrace",
],
cflags: [
+ "-Wall",
"-Werror",
"-Wno-error=deprecated-declarations",
- "-Wall",
],
sanitize: {
misc_undefined : [
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 590952f..a5f9ab6 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -128,9 +128,11 @@
mDescriptor.uuid = *(uuid != NULL ? uuid : EFFECT_UUID_NULL);
mIEffectClient = new EffectClient(this);
+ mClientPid = IPCThreadState::self()->getCallingPid();
iEffect = audioFlinger->createEffect((effect_descriptor_t *)&mDescriptor,
- mIEffectClient, priority, io, mSessionId, mOpPackageName, &mStatus, &mId, &enabled);
+ mIEffectClient, priority, io, mSessionId, mOpPackageName, mClientPid,
+ &mStatus, &mId, &enabled);
if (iEffect == 0 || (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS)) {
ALOGE("set(): AudioFlinger could not create effect, status: %d", mStatus);
@@ -156,7 +158,6 @@
mCblk->buffer = (uint8_t *)mCblk + bufOffset;
IInterface::asBinder(iEffect)->linkToDeath(mIEffectClient);
- mClientPid = IPCThreadState::self()->getCallingPid();
ALOGV("set() %p OK effect: %s id: %d status %d enabled %d pid %d", this, mDescriptor.name, mId,
mStatus, mEnabled, mClientPid);
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a80c891..e749ac4 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -66,9 +66,10 @@
// ---------------------------------------------------------------------------
AudioRecord::AudioRecord(const String16 &opPackageName)
- : mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
+ : mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName),
+ mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mPortId(AUDIO_PORT_HANDLE_NONE)
{
}
@@ -85,7 +86,7 @@
audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes)
: mActive(false),
@@ -95,7 +96,8 @@
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mProxy(NULL),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mPortId(AUDIO_PORT_HANDLE_NONE)
{
mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
@@ -143,7 +145,7 @@
audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes)
{
@@ -236,7 +238,7 @@
int callingpid = IPCThreadState::self()->getCallingPid();
int mypid = getpid();
- if (uid == -1 || (callingpid != mypid)) {
+ if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
@@ -479,12 +481,14 @@
AutoMutex lock(mLock);
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
- // stop capture so that audio policy manager does not reject the new instance start request
- // as only one capture can be active at a time.
- if (mAudioRecord != 0 && mActive) {
- mAudioRecord->stop();
+ if (mStatus == NO_ERROR) {
+ // stop capture so that audio policy manager does not reject the new instance start request
+ // as only one capture can be active at a time.
+ if (mAudioRecord != 0 && mActive) {
+ mAudioRecord->stop();
+ }
+ android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
}
- android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
}
return NO_ERROR;
}
@@ -529,14 +533,18 @@
// The sp<> references will be dropped when re-entering scope.
// The lack of indentation is deliberate, to reduce code churn and ease merges.
for (;;) {
-
+ audio_config_base_t config = {
+ .sample_rate = mSampleRate,
+ .channel_mask = mChannelMask,
+ .format = mFormat
+ };
status = AudioSystem::getInputForAttr(&mAttributes, &input,
mSessionId,
// FIXME compare to AudioTrack
mClientPid,
mClientUid,
- mSampleRate, mFormat, mChannelMask,
- mFlags, mSelectedDeviceId);
+ &config,
+ mFlags, mSelectedDeviceId, &mPortId);
if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE) {
ALOGE("Could not get audio input for session %d, record source %d, sample rate %u, "
@@ -570,10 +578,17 @@
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
bool useCaseAllowed =
- // either of these use cases:
+ // any of these use cases:
// use case 1: callback transfer mode
(mTransfer == TRANSFER_CALLBACK) ||
- // use case 2: obtain/release mode
+ // use case 2: blocking read mode
+ // The default buffer capacity at 48 kHz is 2048 frames, or ~42.6 ms.
+ // That's enough for double-buffering with our standard 20 ms rule of thumb for
+ // the minimum period of a non-SCHED_FIFO thread.
+ // This is needed so that AAudio apps can do a low latency non-blocking read from a
+ // callback running with SCHED_FIFO.
+ (mTransfer == TRANSFER_SYNC) ||
+ // use case 3: obtain/release mode
(mTransfer == TRANSFER_OBTAIN);
// sample rates must also match
bool fastAllowed = useCaseAllowed && (mSampleRate == afSampleRate);
@@ -622,7 +637,8 @@
¬ificationFrames,
iMem,
bufferMem,
- &status);
+ &status,
+ mPortId);
ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
"session ID changed from %d to %d", originalSessionId, mSessionId);
@@ -638,10 +654,10 @@
mAwaitBoost = false;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
if (flags & AUDIO_INPUT_FLAG_FAST) {
- ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
+ ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
mAwaitBoost = true;
} else {
- ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
+ ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount, temp);
mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
AUDIO_INPUT_FLAG_RAW));
continue; // retry
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index bbe6a8f..9ef1db7 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include <binder/IServiceManager.h>
+#include <binder/ProcessState.h>
#include <media/AudioSystem.h>
#include <media/IAudioFlinger.h>
#include <media/IAudioPolicyService.h>
@@ -68,6 +69,8 @@
gAudioFlinger = interface_cast<IAudioFlinger>(binder);
LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
afc = gAudioFlingerClient;
+ // Make sure callbacks can be received by gAudioFlingerClient
+ ProcessState::self()->startThreadPool();
}
af = gAudioFlinger;
}
@@ -554,7 +557,8 @@
"channel mask %#x frameCount %zu frameCountHAL %zu deviceId %d",
event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
- ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->mFrameCountHAL, ioDesc->getDeviceId());
+ ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->mFrameCountHAL,
+ ioDesc->getDeviceId());
} break;
}
@@ -710,6 +714,8 @@
gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
LOG_ALWAYS_FATAL_IF(gAudioPolicyService == 0);
apc = gAudioPolicyServiceClient;
+ // Make sure callbacks can be received by gAudioPolicyServiceClient
+ ProcessState::self()->startThreadPool();
}
ap = gAudioPolicyService;
}
@@ -811,18 +817,16 @@
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t selectedDeviceId,
- const audio_offload_info_t *offloadInfo)
+ audio_port_handle_t *portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return NO_INIT;
return aps->getOutputForAttr(attr, output, session, stream, uid,
- samplingRate, format, channelMask,
- flags, selectedDeviceId, offloadInfo);
+ config,
+ flags, selectedDeviceId, portId);
}
status_t AudioSystem::startOutput(audio_io_handle_t output,
@@ -857,17 +861,16 @@
audio_session_t session,
pid_t pid,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
- audio_port_handle_t selectedDeviceId)
+ audio_port_handle_t selectedDeviceId,
+ audio_port_handle_t *portId)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return NO_INIT;
return aps->getInputForAttr(
attr, input, session, pid, uid,
- samplingRate, format, channelMask, flags, selectedDeviceId);
+ config, flags, selectedDeviceId, portId);
}
status_t AudioSystem::startInput(audio_io_handle_t input,
@@ -1194,14 +1197,14 @@
status_t AudioSystem::startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_io_handle_t *handle)
+ audio_patch_handle_t *handle)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
return aps->startAudioSource(source, attributes, handle);
}
-status_t AudioSystem::stopAudioSource(audio_io_handle_t handle)
+status_t AudioSystem::stopAudioSource(audio_patch_handle_t handle)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index c96f16a..ffb7703 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -50,6 +50,8 @@
return x > y ? x : y;
}
+static const int32_t NANOS_PER_SECOND = 1000000000;
+
static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
{
return ((double)frames * 1000000000) / ((double)sampleRate * speed);
@@ -60,6 +62,11 @@
return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
}
+static inline nsecs_t convertTimespecToNs(const struct timespec &tv)
+{
+ return tv.tv_sec * (long long)NANOS_PER_SECOND + tv.tv_nsec;
+}
+
// current monotonic time in microseconds.
static int64_t getNowUs()
{
@@ -176,7 +183,8 @@
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mPortId(AUDIO_PORT_HANDLE_NONE)
{
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
mAttributes.usage = AUDIO_USAGE_UNKNOWN;
@@ -197,7 +205,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
@@ -207,7 +215,8 @@
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mPortId(AUDIO_PORT_HANDLE_NONE)
{
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
@@ -228,7 +237,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
@@ -238,7 +247,8 @@
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mPortId(AUDIO_PORT_HANDLE_NONE)
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
@@ -289,7 +299,7 @@
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
- int uid,
+ uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
@@ -373,6 +383,10 @@
if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
}
+ // check deep buffer after flags have been modified above
+ if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
+ flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ }
}
// these below should probably come from the audioFlinger too...
@@ -445,6 +459,7 @@
mOffloadInfo = &mOffloadInfoCopy;
} else {
mOffloadInfo = NULL;
+ memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
}
mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
@@ -483,7 +498,7 @@
}
int callingpid = IPCThreadState::self()->getCallingPid();
int mypid = getpid();
- if (uid == -1 || (callingpid != mypid)) {
+ if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
@@ -536,10 +551,12 @@
mTimestampStartupGlitchReported = false;
mRetrogradeMotionReported = false;
mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
+ mStartTs.mPosition = 0;
mUnderrunCountOffset = 0;
mFramesWritten = 0;
mFramesWrittenServerOffset = 0;
-
+ mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
+ mVolumeHandler = new VolumeHandler();
return NO_ERROR;
}
@@ -562,6 +579,17 @@
mState = STATE_ACTIVE;
}
(void) updateAndGetPosition_l();
+
+ // save start timestamp
+ if (isOffloadedOrDirect_l()) {
+ if (getTimestamp_l(mStartTs) != OK) {
+ mStartTs.mPosition = 0;
+ }
+ } else {
+ if (getTimestamp_l(&mStartEts) != OK) {
+ mStartEts.clear();
+ }
+ }
if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
// reset current position as seen by client to 0
mPosition = 0;
@@ -570,19 +598,17 @@
mRetrogradeMotionReported = false;
mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
- // read last server side position change via timestamp.
- ExtendedTimestamp ets;
- if (mProxy->getTimestamp(&ets) == OK &&
- ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
+ if (!isOffloadedOrDirect_l()
+ && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
// Server side has consumed something, but is it finished consuming?
// It is possible since flush and stop are asynchronous that the server
// is still active at this point.
ALOGV("start: server read:%lld cumulative flushed:%lld client written:%lld",
(long long)(mFramesWrittenServerOffset
- + ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
- (long long)ets.mFlushed,
+ + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
+ (long long)mStartEts.mFlushed,
(long long)mFramesWritten);
- mFramesWrittenServerOffset = -ets.mPosition[ExtendedTimestamp::LOCATION_SERVER];
+ mFramesWrittenServerOffset = -mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
}
mFramesWritten = 0;
mProxy->clearTimestamp(); // need new server push for valid timestamp
@@ -599,7 +625,7 @@
mRefreshRemaining = true;
}
mNewPosition = mPosition + mUpdatePeriod;
- int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+ int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
status_t status = NO_ERROR;
if (!(flags & CBLK_INVALID)) {
@@ -626,6 +652,9 @@
get_sched_policy(0, &mPreviousSchedulingGroup);
androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
}
+
+ // Start our local VolumeHandler for restoration purposes.
+ mVolumeHandler->setStarted();
} else {
ALOGE("start() status %d", status);
mState = previousState;
@@ -653,6 +682,8 @@
mState = STATE_STOPPING;
} else {
mState = STATE_STOPPED;
+ ALOGD_IF(mSharedBuffer == nullptr,
+ "stop() called with %u frames delivered", mReleased.value());
mReleased = 0;
}
@@ -889,7 +920,8 @@
}
// Check resampler ratios are within bounds
- if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
+ if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
+ (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
playbackRate.mSpeed, playbackRate.mPitch);
return BAD_VALUE;
@@ -1177,7 +1209,9 @@
AutoMutex lock(mLock);
if (mSelectedDeviceId != deviceId) {
mSelectedDeviceId = deviceId;
- android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+ if (mStatus == NO_ERROR) {
+ android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+ }
}
return NO_ERROR;
}
@@ -1235,15 +1269,21 @@
// After fast request is denied, we will request again if IAudioTrack is re-created.
status_t status;
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ config.sample_rate = mSampleRate;
+ config.channel_mask = mChannelMask;
+ config.format = mFormat;
+ config.offload_info = mOffloadInfoCopy;
status = AudioSystem::getOutputForAttr(attr, &output,
mSessionId, &streamType, mClientUid,
- mSampleRate, mFormat, mChannelMask,
- mFlags, mSelectedDeviceId, mOffloadInfo);
+ &config,
+ mFlags, mSelectedDeviceId, &mPortId);
if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
- ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
- " channel mask %#x, flags %#x",
- mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
+ ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u,"
+ " format %#x, channel mask %#x, flags %#x",
+ mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask,
+ mFlags);
return BAD_VALUE;
}
{
@@ -1396,7 +1436,8 @@
tid,
&mSessionId,
mClientUid,
- &status);
+ &status,
+ mPortId);
ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
"session ID changed from %d to %d", originalSessionId, mSessionId);
@@ -1443,12 +1484,13 @@
mAwaitBoost = false;
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
if (flags & AUDIO_OUTPUT_FLAG_FAST) {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
+ ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
if (!mThreadCanCallJava) {
mAwaitBoost = true;
}
} else {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
+ ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
+ temp);
}
}
mFlags = flags;
@@ -2181,10 +2223,12 @@
mUnderrunCountOffset = getUnderrunCount_l();
// save the old static buffer position
+ uint32_t staticPosition = 0;
size_t bufferPosition = 0;
int loopCount = 0;
if (mStaticProxy != 0) {
mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
+ staticPosition = mStaticProxy->getPosition().unsignedValue();
}
mFlags = mOrigFlags;
@@ -2214,10 +2258,30 @@
}
}
}
+ // restore volume handler
+ mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
+ sp<VolumeShaper::Operation> operationToEnd =
+ new VolumeShaper::Operation(shaper.mOperation);
+ // TODO: Ideally we would restore to the exact xOffset position
+ // as returned by getVolumeShaperState(), but we don't have that
+ // information when restoring at the client unless we periodically poll
+ // the server or create shared memory state.
+ //
+ // For now, we simply advance to the end of the VolumeShaper effect
+ // if it has been started.
+ if (shaper.isStarted()) {
+ operationToEnd->setNormalizedTime(1.f);
+ }
+ return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
+ });
+
if (mState == STATE_ACTIVE) {
result = mAudioTrack->start();
- mFramesWrittenServerOffset = mFramesWritten; // server resets to zero so we offset
}
+ // server resets to zero so we offset
+ mFramesWrittenServerOffset =
+ mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
+ mFramesWrittenAtRestore = mFramesWrittenServerOffset;
}
if (result != NO_ERROR) {
ALOGW("restoreTrack_l() failed status %d", result);
@@ -2271,6 +2335,45 @@
return mAudioTrack->setParameters(keyValuePairs);
}
+VolumeShaper::Status AudioTrack::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation)
+{
+ AutoMutex lock(mLock);
+ mVolumeHandler->setIdIfNecessary(configuration);
+ VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
+
+ if (status == DEAD_OBJECT) {
+ if (restoreTrack_l("applyVolumeShaper") == OK) {
+ status = mAudioTrack->applyVolumeShaper(configuration, operation);
+ }
+ }
+ if (status >= 0) {
+ // save VolumeShaper for restore
+ mVolumeHandler->applyVolumeShaper(configuration, operation);
+ if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
+ mVolumeHandler->setStarted();
+ }
+ } else {
+ // warn only if not an expected restore failure.
+ ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
+ "applyVolumeShaper failed: %d", status);
+ }
+ return status;
+}
+
+sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
+{
+ AutoMutex lock(mLock);
+ sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
+ if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
+ if (restoreTrack_l("getVolumeShaperState") == OK) {
+ state = mAudioTrack->getVolumeShaperState(id);
+ }
+ }
+ return state;
+}
+
status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
{
if (timestamp == nullptr) {
@@ -2315,7 +2418,11 @@
status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
{
AutoMutex lock(mLock);
+ return getTimestamp_l(timestamp);
+}
+status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
+{
bool previousTimestampValid = mPreviousTimestampValid;
// Set false here to cover all the error return cases.
mPreviousTimestampValid = false;
@@ -2393,6 +2500,26 @@
ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
"getTimestamp() location moved from server to kernel");
}
+
+ // We update the timestamp time even when paused.
+ if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
+ const int64_t now = systemTime();
+ const int64_t at = convertTimespecToNs(timestamp.mTime);
+ const int64_t lag =
+ (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
+ ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
+ ? int64_t(mAfLatency * 1000000LL)
+ : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
+ - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
+ * NANOS_PER_SECOND / mSampleRate;
+ const int64_t limit = now - lag; // no earlier than this limit
+ if (at < limit) {
+ ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
+ (long long)lag, (long long)at, (long long)limit);
+ timestamp.mTime.tv_sec = limit / NANOS_PER_SECOND;
+ timestamp.mTime.tv_nsec = limit % NANOS_PER_SECOND; // compiler opt.
+ }
+ }
mPreviousLocation = location;
} else {
// right after AudioTrack is started, one may not find a timestamp
@@ -2400,7 +2527,17 @@
}
}
if (status == INVALID_OPERATION) {
- status = WOULD_BLOCK;
+ // INVALID_OPERATION occurs when no timestamp has been issued by the server;
+ // other failures are signaled by a negative time.
+ // If we come out of FLUSHED or STOPPED where the position is known
+ // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
+ // "zero" for NuPlayer). We don't convert for track restoration as position
+ // does not reset.
+ ALOGV("timestamp server offset:%lld restore frames:%lld",
+ (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
+ if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
+ status = WOULD_BLOCK;
+ }
}
}
if (status != NO_ERROR) {
@@ -2412,6 +2549,7 @@
// use cached paused position in case another offloaded track is running.
timestamp.mPosition = mPausedPosition;
clock_gettime(CLOCK_MONOTONIC, ×tamp.mTime);
+ // TODO: adjust for delay
return NO_ERROR;
}
@@ -2498,21 +2636,18 @@
// This is sometimes caused by erratic reports of the available space in the ALSA drivers.
if (status == NO_ERROR) {
if (previousTimestampValid) {
-#define TIME_TO_NANOS(time) ((int64_t)(time).tv_sec * 1000000000 + (time).tv_nsec)
- const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
- const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
-#undef TIME_TO_NANOS
+ const int64_t previousTimeNanos = convertTimespecToNs(mPreviousTimestamp.mTime);
+ const int64_t currentTimeNanos = convertTimespecToNs(timestamp.mTime);
if (currentTimeNanos < previousTimeNanos) {
- ALOGW("retrograde timestamp time");
- // FIXME Consider blocking this from propagating upwards.
+ ALOGW("retrograde timestamp time corrected, %lld < %lld",
+ (long long)currentTimeNanos, (long long)previousTimeNanos);
+ timestamp.mTime = mPreviousTimestamp.mTime;
}
// Looking at signed delta will work even when the timestamps
// are wrapping around.
int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
- mPreviousTimestamp.mPosition).signedValue();
- // position can bobble slightly as an artifact; this hides the bobble
- static const int32_t MINIMUM_POSITION_DELTA = 8;
if (deltaPosition < 0) {
// Only report once per position instead of spamming the log.
if (!mRetrogradeMotionReported) {
@@ -2525,9 +2660,21 @@
} else {
mRetrogradeMotionReported = false;
}
- if (deltaPosition < MINIMUM_POSITION_DELTA) {
- timestamp = mPreviousTimestamp; // Use last valid timestamp.
+ if (deltaPosition < 0) {
+ timestamp.mPosition = mPreviousTimestamp.mPosition;
+ deltaPosition = 0;
}
+#if 0
+ // Uncomment this to verify audio timestamp rate.
+ const int64_t deltaTime =
+ convertTimespecToNs(timestamp.mTime) - previousTimeNanos;
+ if (deltaTime != 0) {
+ const int64_t computedSampleRate =
+ deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
+ ALOGD("computedSampleRate:%u sampleRate:%u",
+ (unsigned)computedSampleRate, mSampleRate);
+ }
+#endif
}
mPreviousTimestamp = timestamp;
mPreviousTimestampValid = true;
@@ -2697,6 +2844,75 @@
return NO_ERROR;
}
+bool AudioTrack::hasStarted()
+{
+ AutoMutex lock(mLock);
+ switch (mState) {
+ case STATE_STOPPED:
+ if (isOffloadedOrDirect_l()) {
+ // check if we have started in the past to return true.
+ return mStartUs > 0;
+ }
+ // A normal audio track may still be draining, so
+ // check if stream has ended. This covers fasttrack position
+ // instability and start/stop without any data written.
+ if (mProxy->getStreamEndDone()) {
+ return true;
+ }
+ // fall through
+ case STATE_ACTIVE:
+ case STATE_STOPPING:
+ break;
+ case STATE_PAUSED:
+ case STATE_PAUSED_STOPPING:
+ case STATE_FLUSHED:
+ return false; // we're not active
+ default:
+ LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
+ break;
+ }
+
+ // wait indicates whether we need to wait for a timestamp.
+ // This is conservatively figured - if we encounter an unexpected error
+ // then we will not wait.
+ bool wait = false;
+ if (isOffloadedOrDirect_l()) {
+ AudioTimestamp ts;
+ status_t status = getTimestamp_l(ts);
+ if (status == WOULD_BLOCK) {
+ wait = true;
+ } else if (status == OK) {
+ wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
+ }
+ ALOGV("hasStarted wait:%d ts:%u start position:%lld",
+ (int)wait,
+ ts.mPosition,
+ (long long)mStartTs.mPosition);
+ } else {
+ int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
+ ExtendedTimestamp ets;
+ status_t status = getTimestamp_l(&ets);
+ if (status == WOULD_BLOCK) { // no SERVER or KERNEL frame info in ets
+ wait = true;
+ } else if (status == OK) {
+ for (location = ExtendedTimestamp::LOCATION_KERNEL;
+ location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
+ if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
+ continue;
+ }
+ wait = ets.mPosition[location] == 0
+ || ets.mPosition[location] == mStartEts.mPosition[location];
+ break;
+ }
+ }
+ ALOGV("hasStarted wait:%d ets:%lld start position:%lld",
+ (int)wait,
+ (long long)ets.mPosition[location],
+ (long long)mStartEts.mPosition[location]);
+ }
+ return !wait;
+}
+
// =========================================================================
void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index 846f8b8..2ce6c63 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -696,7 +696,8 @@
ssize_t filled = rear - front;
// pipe should not already be overfull
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
- ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
+ ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); shutting down",
+ filled, mFrameCount);
mIsShutdown = true;
}
if (mIsShutdown) {
@@ -820,7 +821,8 @@
ssize_t filled = rear - cblk->u.mStreaming.mFront;
// pipe should not already be overfull
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
- ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
+ ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); shutting down",
+ filled, mFrameCount);
mIsShutdown = true;
return 0;
}
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 65fdedb..858b5cc 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,7 +24,7 @@
#include <binder/Parcel.h>
-#include <media/IAudioFlinger.h>
+#include "IAudioFlinger.h"
namespace android {
@@ -108,7 +108,8 @@
pid_t tid,
audio_session_t *sessionId,
int clientUid,
- status_t *status)
+ status_t *status,
+ audio_port_handle_t portId)
{
Parcel data, reply;
sp<IAudioTrack> track;
@@ -137,6 +138,7 @@
}
data.writeInt32(lSessionId);
data.writeInt32(clientUid);
+ data.writeInt32(portId);
status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
if (lStatus != NO_ERROR) {
ALOGE("createTrack error: %s", strerror(-lStatus));
@@ -188,7 +190,8 @@
size_t *notificationFrames,
sp<IMemory>& cblk,
sp<IMemory>& buffers,
- status_t *status)
+ status_t *status,
+ audio_port_handle_t portId)
{
Parcel data, reply;
sp<IAudioRecord> record;
@@ -211,6 +214,7 @@
}
data.writeInt32(lSessionId);
data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
+ data.writeInt32(portId);
cblk.clear();
buffers.clear();
status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
@@ -716,6 +720,7 @@
audio_io_handle_t output,
audio_session_t sessionId,
const String16& opPackageName,
+ pid_t pid,
status_t *status,
int *id,
int *enabled)
@@ -737,6 +742,7 @@
data.writeInt32((int32_t) output);
data.writeInt32(sessionId);
data.writeString16(opPackageName);
+ data.writeInt32((int32_t) pid);
status_t lStatus = remote()->transact(CREATE_EFFECT, data, &reply);
if (lStatus != NO_ERROR) {
@@ -929,7 +935,6 @@
}
return reply.readInt64();
}
-
};
IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -939,6 +944,29 @@
status_t BnAudioFlinger::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
+ // Whitelist of relevant events to trigger log merging.
+ // Log merging should activate during audio activity of any kind. This are considered the
+ // most relevant events.
+ // TODO should select more wisely the items from the list
+ switch (code) {
+ case CREATE_TRACK:
+ case OPEN_RECORD:
+ case SET_MASTER_VOLUME:
+ case SET_MASTER_MUTE:
+ case SET_STREAM_VOLUME:
+ case SET_STREAM_MUTE:
+ case SET_MIC_MUTE:
+ case SET_PARAMETERS:
+ case OPEN_INPUT:
+ case SET_VOICE_VOLUME:
+ case CREATE_EFFECT:
+ case SYSTEM_READY: {
+ requestLogMerge();
+ break;
+ }
+ default:
+ break;
+ }
switch (code) {
case CREATE_TRACK: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
@@ -958,6 +986,7 @@
pid_t tid = (pid_t) data.readInt32();
audio_session_t sessionId = (audio_session_t) data.readInt32();
int clientUid = data.readInt32();
+ audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
status_t status = NO_ERROR;
sp<IAudioTrack> track;
if ((haveSharedBuffer && (buffer == 0)) ||
@@ -968,7 +997,7 @@
track = createTrack(
(audio_stream_type_t) streamType, sampleRate, format,
channelMask, &frameCount, &flags, buffer, output, pid, tid,
- &sessionId, clientUid, &status);
+ &sessionId, clientUid, &status, portId);
LOG_ALWAYS_FATAL_IF((track != 0) != (status == NO_ERROR));
}
reply->writeInt64(frameCount);
@@ -992,13 +1021,14 @@
int clientUid = data.readInt32();
audio_session_t sessionId = (audio_session_t) data.readInt32();
size_t notificationFrames = data.readInt64();
+ audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
sp<IMemory> cblk;
sp<IMemory> buffers;
status_t status = NO_ERROR;
sp<IAudioRecord> record = openRecord(input,
sampleRate, format, channelMask, opPackageName, &frameCount, &flags,
pid, tid, clientUid, &sessionId, ¬ificationFrames, cblk, buffers,
- &status);
+ &status, portId);
LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
reply->writeInt64(frameCount);
reply->writeInt32(flags);
@@ -1294,12 +1324,14 @@
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
audio_session_t sessionId = (audio_session_t) data.readInt32();
const String16 opPackageName = data.readString16();
+ pid_t pid = (pid_t)data.readInt32();
+
status_t status = NO_ERROR;
int id = 0;
int enabled = 0;
sp<IEffect> effect = createEffect(&desc, client, priority, output, sessionId,
- opPackageName, &status, &id, &enabled);
+ opPackageName, pid, &status, &id, &enabled);
reply->writeInt32(status);
reply->writeInt32(id);
reply->writeInt32(enabled);
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 0ac5726..f0f413d 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -189,12 +189,10 @@
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t selectedDeviceId,
- const audio_offload_info_t *offloadInfo)
+ audio_port_handle_t *portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -212,6 +210,10 @@
ALOGE("getOutputForAttr NULL output - shouldn't happen");
return BAD_VALUE;
}
+ if (portId == NULL) {
+ ALOGE("getOutputForAttr NULL portId - shouldn't happen");
+ return BAD_VALUE;
+ }
if (attr == NULL) {
data.writeInt32(0);
} else {
@@ -226,18 +228,10 @@
data.writeInt32(*stream);
}
data.writeInt32(uid);
- data.writeInt32(samplingRate);
- data.writeInt32(static_cast <uint32_t>(format));
- data.writeInt32(channelMask);
+ data.write(config, sizeof(audio_config_t));
data.writeInt32(static_cast <uint32_t>(flags));
data.writeInt32(selectedDeviceId);
- // hasOffloadInfo
- if (offloadInfo == NULL) {
- data.writeInt32(0);
- } else {
- data.writeInt32(1);
- data.write(offloadInfo, sizeof(audio_offload_info_t));
- }
+ data.writeInt32(*portId);
status_t status = remote()->transact(GET_OUTPUT_FOR_ATTR, data, &reply);
if (status != NO_ERROR) {
return status;
@@ -247,9 +241,11 @@
return status;
}
*output = (audio_io_handle_t)reply.readInt32();
+ audio_stream_type_t lStream = (audio_stream_type_t)reply.readInt32();
if (stream != NULL) {
- *stream = (audio_stream_type_t)reply.readInt32();
+ *stream = lStream;
}
+ *portId = (audio_port_handle_t)reply.readInt32();
return status;
}
@@ -296,11 +292,10 @@
audio_session_t session,
pid_t pid,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
- audio_port_handle_t selectedDeviceId)
+ audio_port_handle_t selectedDeviceId,
+ audio_port_handle_t *portId)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -312,15 +307,18 @@
ALOGE("getInputForAttr NULL input - shouldn't happen");
return BAD_VALUE;
}
+ if (portId == NULL) {
+ ALOGE("getInputForAttr NULL portId - shouldn't happen");
+ return BAD_VALUE;
+ }
data.write(attr, sizeof(audio_attributes_t));
data.writeInt32(session);
data.writeInt32(pid);
data.writeInt32(uid);
- data.writeInt32(samplingRate);
- data.writeInt32(static_cast <uint32_t>(format));
- data.writeInt32(channelMask);
+ data.write(config, sizeof(audio_config_base_t));
data.writeInt32(flags);
data.writeInt32(selectedDeviceId);
+ data.writeInt32(*portId);
status_t status = remote()->transact(GET_INPUT_FOR_ATTR, data, &reply);
if (status != NO_ERROR) {
return status;
@@ -330,6 +328,7 @@
return status;
}
*input = (audio_io_handle_t)reply.readInt32();
+ *portId = (audio_port_handle_t)reply.readInt32();
return NO_ERROR;
}
@@ -752,7 +751,7 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_io_handle_t *handle)
+ audio_patch_handle_t *handle)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -769,11 +768,11 @@
if (status != NO_ERROR) {
return status;
}
- *handle = (audio_io_handle_t)reply.readInt32();
+ *handle = (audio_patch_handle_t)reply.readInt32();
return status;
}
- virtual status_t stopAudioSource(audio_io_handle_t handle)
+ virtual status_t stopAudioSource(audio_patch_handle_t handle)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -942,25 +941,22 @@
stream = (audio_stream_type_t)data.readInt32();
}
uid_t uid = (uid_t)data.readInt32();
- uint32_t samplingRate = data.readInt32();
- audio_format_t format = (audio_format_t) data.readInt32();
- audio_channel_mask_t channelMask = data.readInt32();
+ audio_config_t config;
+ memset(&config, 0, sizeof(audio_config_t));
+ data.read(&config, sizeof(audio_config_t));
audio_output_flags_t flags =
static_cast <audio_output_flags_t>(data.readInt32());
audio_port_handle_t selectedDeviceId = data.readInt32();
- bool hasOffloadInfo = data.readInt32() != 0;
- audio_offload_info_t offloadInfo;
- if (hasOffloadInfo) {
- data.read(&offloadInfo, sizeof(audio_offload_info_t));
- }
+ audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
audio_io_handle_t output = 0;
status_t status = getOutputForAttr(hasAttributes ? &attr : NULL,
&output, session, &stream, uid,
- samplingRate, format, channelMask,
- flags, selectedDeviceId, hasOffloadInfo ? &offloadInfo : NULL);
+ &config,
+ flags, selectedDeviceId, &portId);
reply->writeInt32(status);
reply->writeInt32(output);
reply->writeInt32(stream);
+ reply->writeInt32(portId);
return NO_ERROR;
} break;
@@ -1004,18 +1000,20 @@
audio_session_t session = (audio_session_t)data.readInt32();
pid_t pid = (pid_t)data.readInt32();
uid_t uid = (uid_t)data.readInt32();
- uint32_t samplingRate = data.readInt32();
- audio_format_t format = (audio_format_t) data.readInt32();
- audio_channel_mask_t channelMask = data.readInt32();
+ audio_config_base_t config;
+ memset(&config, 0, sizeof(audio_config_base_t));
+ data.read(&config, sizeof(audio_config_base_t));
audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
+ audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
status_t status = getInputForAttr(&attr, &input, session, pid, uid,
- samplingRate, format, channelMask,
- flags, selectedDeviceId);
+ &config,
+ flags, selectedDeviceId, &portId);
reply->writeInt32(status);
if (status == NO_ERROR) {
reply->writeInt32(input);
+ reply->writeInt32(portId);
}
return NO_ERROR;
} break;
@@ -1373,7 +1371,7 @@
data.read(&source, sizeof(struct audio_port_config));
audio_attributes_t attributes;
data.read(&attributes, sizeof(audio_attributes_t));
- audio_io_handle_t handle = {};
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
status_t status = startAudioSource(&source, &attributes, &handle);
reply->writeInt32(status);
reply->writeInt32(handle);
@@ -1382,7 +1380,7 @@
case STOP_AUDIO_SOURCE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- audio_io_handle_t handle = (audio_io_handle_t)data.readInt32();
+ audio_patch_handle_t handle = (audio_patch_handle_t) data.readInt32();
status_t status = stopAudioSource(handle);
reply->writeInt32(status);
return NO_ERROR;
diff --git a/media/libaudioclient/IAudioTrack.cpp b/media/libaudioclient/IAudioTrack.cpp
index 89e0fcc..79e864d 100644
--- a/media/libaudioclient/IAudioTrack.cpp
+++ b/media/libaudioclient/IAudioTrack.cpp
@@ -39,6 +39,8 @@
SET_PARAMETERS,
GET_TIMESTAMP,
SIGNAL,
+ APPLY_VOLUME_SHAPER,
+ GET_VOLUME_SHAPER_STATE,
};
class BpAudioTrack : public BpInterface<IAudioTrack>
@@ -143,6 +145,52 @@
data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
remote()->transact(SIGNAL, data, &reply);
}
+
+ virtual VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+
+ status_t status = configuration.get() == nullptr
+ ? data.writeInt32(0)
+ : data.writeInt32(1)
+ ?: configuration->writeToParcel(&data);
+ if (status != NO_ERROR) {
+ return VolumeShaper::Status(status);
+ }
+
+ status = operation.get() == nullptr
+ ? status = data.writeInt32(0)
+ : data.writeInt32(1)
+ ?: operation->writeToParcel(&data);
+ if (status != NO_ERROR) {
+ return VolumeShaper::Status(status);
+ }
+
+ int32_t remoteVolumeShaperStatus;
+ status = remote()->transact(APPLY_VOLUME_SHAPER, data, &reply)
+ ?: reply.readInt32(&remoteVolumeShaperStatus);
+
+ return VolumeShaper::Status(status ?: remoteVolumeShaperStatus);
+ }
+
+ virtual sp<VolumeShaper::State> getVolumeShaperState(int id) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+
+ data.writeInt32(id);
+ status_t status = remote()->transact(GET_VOLUME_SHAPER_STATE, data, &reply);
+ if (status != NO_ERROR) {
+ return nullptr;
+ }
+ sp<VolumeShaper::State> state = new VolumeShaper::State;
+ status = state->readFromParcel(reply);
+ if (status != NO_ERROR) {
+ return nullptr;
+ }
+ return state;
+ }
};
IMPLEMENT_META_INTERFACE(AudioTrack, "android.media.IAudioTrack");
@@ -206,6 +254,40 @@
signal();
return NO_ERROR;
} break;
+ case APPLY_VOLUME_SHAPER: {
+ CHECK_INTERFACE(IAudioTrack, data, reply);
+ sp<VolumeShaper::Configuration> configuration;
+ sp<VolumeShaper::Operation> operation;
+
+ int32_t present;
+ status_t status = data.readInt32(&present);
+ if (status == NO_ERROR && present != 0) {
+ configuration = new VolumeShaper::Configuration();
+ status = configuration->readFromParcel(data);
+ }
+ status = status ?: data.readInt32(&present);
+ if (status == NO_ERROR && present != 0) {
+ operation = new VolumeShaper::Operation();
+ status = operation->readFromParcel(data);
+ }
+ if (status == NO_ERROR) {
+ status = (status_t)applyVolumeShaper(configuration, operation);
+ }
+ reply->writeInt32(status);
+ return NO_ERROR;
+ } break;
+ case GET_VOLUME_SHAPER_STATE: {
+ CHECK_INTERFACE(IAudioTrack, data, reply);
+ int id;
+ status_t status = data.readInt32(&id);
+ if (status == NO_ERROR) {
+ sp<VolumeShaper::State> state = getVolumeShaperState(id);
+ if (state.get() != nullptr) {
+ status = state->writeToParcel(reply);
+ }
+ }
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libaudioclient/PlayerBase.cpp b/media/libaudioclient/PlayerBase.cpp
new file mode 100644
index 0000000..cbef1b3
--- /dev/null
+++ b/media/libaudioclient/PlayerBase.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binder/IServiceManager.h>
+#include <media/PlayerBase.h>
+
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+namespace android {
+
+//--------------------------------------------------------------------------------------------------
+PlayerBase::PlayerBase() : BnPlayer(),
+ mPanMultiplierL(1.0f), mPanMultiplierR(1.0f),
+ mVolumeMultiplierL(1.0f), mVolumeMultiplierR(1.0f),
+ mPIId(PLAYER_PIID_INVALID), mLastReportedEvent(PLAYER_STATE_UNKNOWN)
+{
+ ALOGD("PlayerBase::PlayerBase()");
+ // use checkService() to avoid blocking if audio service is not up yet
+ sp<IBinder> binder = defaultServiceManager()->checkService(String16("audio"));
+ if (binder == 0) {
+ ALOGE("PlayerBase(): binding to audio service failed, service up?");
+ } else {
+ mAudioManager = interface_cast<IAudioManager>(binder);
+ }
+}
+
+
+PlayerBase::~PlayerBase() {
+ ALOGD("PlayerBase::~PlayerBase()");
+ baseDestroy();
+}
+
+void PlayerBase::init(player_type_t playerType, audio_usage_t usage) {
+ if (mAudioManager == 0) {
+ ALOGE("AudioPlayer realize: no audio service, player will not be registered");
+ } else {
+ mPIId = mAudioManager->trackPlayer(playerType, usage, AUDIO_CONTENT_TYPE_UNKNOWN, this);
+ }
+}
+
+void PlayerBase::baseDestroy() {
+ serviceReleasePlayer();
+ if (mAudioManager != 0) {
+ mAudioManager.clear();
+ }
+}
+
+//------------------------------------------------------------------------------
+void PlayerBase::servicePlayerEvent(player_state_t event) {
+ if (mAudioManager != 0) {
+ // only report state change
+ Mutex::Autolock _l(mPlayerStateLock);
+ if (event != mLastReportedEvent
+ && mPIId != PLAYER_PIID_INVALID) {
+ mLastReportedEvent = event;
+ mAudioManager->playerEvent(mPIId, event);
+ }
+ }
+}
+
+void PlayerBase::serviceReleasePlayer() {
+ if (mAudioManager != 0
+ && mPIId != PLAYER_PIID_INVALID) {
+ mAudioManager->releasePlayer(mPIId);
+ }
+}
+
+//FIXME temporary method while some AudioTrack state is outside of this class
+void PlayerBase::reportEvent(player_state_t event) {
+ servicePlayerEvent(event);
+}
+
+status_t PlayerBase::startWithStatus() {
+ status_t status = playerStart();
+ if (status == NO_ERROR) {
+ ALOGD("PlayerBase::start() from IPlayer");
+ servicePlayerEvent(PLAYER_STATE_STARTED);
+ } else {
+ ALOGD("PlayerBase::start() no AudioTrack to start from IPlayer");
+ }
+ return status;
+}
+
+//------------------------------------------------------------------------------
+// Implementation of IPlayer
+void PlayerBase::start() {
+ (void)startWithStatus();
+}
+
+void PlayerBase::pause() {
+ if (playerPause() == NO_ERROR) {
+ ALOGD("PlayerBase::pause() from IPlayer");
+ servicePlayerEvent(PLAYER_STATE_PAUSED);
+ } else {
+ ALOGD("PlayerBase::pause() no AudioTrack to pause from IPlayer");
+ }
+}
+
+
+void PlayerBase::stop() {
+ if (playerStop() == NO_ERROR) {
+ ALOGD("PlayerBase::stop() from IPlayer");
+ servicePlayerEvent(PLAYER_STATE_STOPPED);
+ } else {
+ ALOGD("PlayerBase::stop() no AudioTrack to stop from IPlayer");
+ }
+}
+
+void PlayerBase::setVolume(float vol) {
+ {
+ Mutex::Autolock _l(mSettingsLock);
+ mVolumeMultiplierL = vol;
+ mVolumeMultiplierR = vol;
+ }
+ if (playerSetVolume() == NO_ERROR) {
+ ALOGD("PlayerBase::setVolume() from IPlayer");
+ } else {
+ ALOGD("PlayerBase::setVolume() no AudioTrack for volume control from IPlayer");
+ }
+}
+
+void PlayerBase::setPan(float pan) {
+ {
+ Mutex::Autolock _l(mSettingsLock);
+ pan = min(max(-1.0f, pan), 1.0f);
+ if (pan >= 0.0f) {
+ mPanMultiplierL = 1.0f - pan;
+ mPanMultiplierR = 1.0f;
+ } else {
+ mPanMultiplierL = 1.0f;
+ mPanMultiplierR = 1.0f + pan;
+ }
+ }
+ if (playerSetVolume() == NO_ERROR) {
+ ALOGD("PlayerBase::setPan() from IPlayer");
+ } else {
+ ALOGD("PlayerBase::setPan() no AudioTrack for volume control from IPlayer");
+ }
+}
+
+void PlayerBase::setStartDelayMs(int32_t delayMs __unused) {
+ ALOGW("setStartDelay() is not supported");
+}
+
+void PlayerBase::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration __unused,
+ const sp<VolumeShaper::Operation>& operation __unused) {
+ ALOGW("applyVolumeShaper() is not supported");
+}
+
+status_t PlayerBase::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ return BnPlayer::onTransact(code, data, reply, flags);
+}
+
+} // namespace android
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index 7a72237..9bc2594 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -1420,7 +1420,7 @@
// Instantiate a wave generator if ot already done for this frequency
if (mWaveGens.indexOfKey(frequency) == NAME_NOT_FOUND) {
ToneGenerator::WaveGenerator *lpWaveGen =
- new ToneGenerator::WaveGenerator((unsigned short)mSamplingRate,
+ new ToneGenerator::WaveGenerator(mSamplingRate,
frequency,
TONEGEN_GAIN/lNumWaves);
mWaveGens.add(frequency, lpWaveGen);
@@ -1544,7 +1544,7 @@
// none
//
////////////////////////////////////////////////////////////////////////////////
-ToneGenerator::WaveGenerator::WaveGenerator(unsigned short samplingRate,
+ToneGenerator::WaveGenerator::WaveGenerator(uint32_t samplingRate,
unsigned short frequency, float volume) {
double d0;
double F_div_Fs; // frequency / samplingRate
diff --git a/media/libaudioclient/TrackPlayerBase.cpp b/media/libaudioclient/TrackPlayerBase.cpp
new file mode 100644
index 0000000..48cd803
--- /dev/null
+++ b/media/libaudioclient/TrackPlayerBase.cpp
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/TrackPlayerBase.h>
+
+namespace android {
+
+//--------------------------------------------------------------------------------------------------
+TrackPlayerBase::TrackPlayerBase() : PlayerBase(),
+ mPlayerVolumeL(1.0f), mPlayerVolumeR(1.0f)
+{
+ ALOGD("TrackPlayerBase::TrackPlayerBase()");
+}
+
+
+TrackPlayerBase::~TrackPlayerBase() {
+ ALOGD("TrackPlayerBase::~TrackPlayerBase()");
+ doDestroy();
+}
+
+void TrackPlayerBase::init(AudioTrack* pat, player_type_t playerType, audio_usage_t usage) {
+ PlayerBase::init(playerType, usage);
+ mAudioTrack = pat;
+}
+
+void TrackPlayerBase::destroy() {
+ doDestroy();
+ baseDestroy();
+}
+
+void TrackPlayerBase::doDestroy() {
+ if (mAudioTrack != 0) {
+ mAudioTrack->stop();
+ // Note that there may still be another reference in post-unlock phase of SetPlayState
+ mAudioTrack.clear();
+ }
+}
+
+void TrackPlayerBase::setPlayerVolume(float vl, float vr) {
+ {
+ Mutex::Autolock _l(mSettingsLock);
+ mPlayerVolumeL = vl;
+ mPlayerVolumeR = vr;
+ }
+ doSetVolume();
+}
+
+//------------------------------------------------------------------------------
+// Implementation of IPlayer
+status_t TrackPlayerBase::playerStart() {
+ status_t status = NO_INIT;
+ if (mAudioTrack != 0) {
+ status = mAudioTrack->start();
+ }
+ return status;
+}
+
+status_t TrackPlayerBase::playerPause() {
+ status_t status = NO_INIT;
+ if (mAudioTrack != 0) {
+ mAudioTrack->pause();
+ status = NO_ERROR;
+ }
+ return status;
+}
+
+
+status_t TrackPlayerBase::playerStop() {
+ status_t status = NO_INIT;
+ if (mAudioTrack != 0) {
+ mAudioTrack->stop();
+ status = NO_ERROR;
+ }
+ return status;
+}
+
+status_t TrackPlayerBase::playerSetVolume() {
+ return doSetVolume();
+}
+
+status_t TrackPlayerBase::doSetVolume() {
+ status_t status = NO_INIT;
+ if (mAudioTrack != 0) {
+ float tl = mPlayerVolumeL * mPanMultiplierL * mVolumeMultiplierL;
+ float tr = mPlayerVolumeR * mPanMultiplierR * mVolumeMultiplierR;
+ mAudioTrack->setVolume(tl, tr);
+ status = NO_ERROR;
+ }
+ return status;
+}
+
+
+void TrackPlayerBase::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) {
+ if (mAudioTrack != 0) {
+ ALOGD("TrackPlayerBase::applyVolumeShaper() from IPlayer");
+ VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
+ if (status < 0) { // a non-negative value is the volume shaper id.
+ ALOGE("TrackPlayerBase::applyVolumeShaper() failed with status %d", status);
+ }
+ } else {
+ ALOGD("TrackPlayerBase::applyVolumeShaper()"
+ " no AudioTrack for volume control from IPlayer");
+ }
+}
+
+} // namespace android
diff --git a/include/media/AudioBufferProvider.h b/media/libaudioclient/include/media/AudioBufferProvider.h
similarity index 100%
rename from include/media/AudioBufferProvider.h
rename to media/libaudioclient/include/media/AudioBufferProvider.h
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
new file mode 100644
index 0000000..bfc068b
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -0,0 +1,488 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOEFFECT_H
+#define ANDROID_AUDIOEFFECT_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/IAudioFlinger.h>
+#include <media/IAudioPolicyService.h>
+#include <media/IEffect.h>
+#include <media/IEffectClient.h>
+#include <media/AudioSystem.h>
+#include <system/audio_effect.h>
+
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <binder/IInterface.h>
+
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+struct effect_param_cblk_t;
+
+// ----------------------------------------------------------------------------
+
+class AudioEffect : public RefBase
+{
+public:
+
+ /*
+ * Static methods for effects enumeration.
+ */
+
+ /*
+ * Returns the number of effects available. This method together
+ * with queryEffect() is used to enumerate all effects:
+ * The enumeration sequence is:
+ * queryNumberEffects(&num_effects);
+ * for (i = 0; i < num_effects; i++)
+ * queryEffect(i,...);
+ *
+ * Parameters:
+ * numEffects: address where the number of effects should be returned.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * NO_ERROR successful operation.
+ * PERMISSION_DENIED could not get AudioFlinger interface
+ * NO_INIT effect library failed to initialize
+ * BAD_VALUE invalid numEffects pointer
+ *
+ * Returned value
+ * *numEffects: updated with number of effects available
+ */
+ static status_t queryNumberEffects(uint32_t *numEffects);
+
+ /*
+ * Returns an effect descriptor during effect
+ * enumeration.
+ *
+ * Parameters:
+ * index: index of the queried effect.
+ * descriptor: address where the effect descriptor should be returned.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * NO_ERROR successful operation.
+ * PERMISSION_DENIED could not get AudioFlinger interface
+ * NO_INIT effect library failed to initialize
+ * BAD_VALUE invalid descriptor pointer or index
+ * INVALID_OPERATION effect list has changed since last execution of queryNumberEffects()
+ *
+ * Returned value
+ * *descriptor: updated with effect descriptor
+ */
+ static status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor);
+
+
+ /*
+ * Returns the descriptor for the specified effect uuid.
+ *
+ * Parameters:
+ * uuid: pointer to effect uuid.
+ * descriptor: address where the effect descriptor should be returned.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * NO_ERROR successful operation.
+ * PERMISSION_DENIED could not get AudioFlinger interface
+ * NO_INIT effect library failed to initialize
+ * BAD_VALUE invalid uuid or descriptor pointers
+ * NAME_NOT_FOUND no effect with this uuid found
+ *
+ * Returned value
+ * *descriptor updated with effect descriptor
+ */
+ static status_t getEffectDescriptor(const effect_uuid_t *uuid,
+ effect_descriptor_t *descriptor) /*const*/;
+
+
+ /*
+ * Returns a list of descriptors corresponding to the pre processings enabled by default
+ * on an AudioRecord with the supplied audio session ID.
+ *
+ * Parameters:
+ * audioSession: audio session ID.
+ * descriptors: address where the effect descriptors should be returned.
+ * count: as input, the maximum number of descriptor than should be returned
+ * as output, the number of descriptor returned if status is NO_ERROR or the actual
+ * number of enabled pre processings if status is NO_MEMORY
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * NO_ERROR successful operation.
+ * NO_MEMORY the number of descriptor to return is more than the maximum number
+ * indicated by count.
+ * PERMISSION_DENIED could not get AudioFlinger interface
+ * NO_INIT effect library failed to initialize
+ * BAD_VALUE invalid audio session or descriptor pointers
+ *
+ * Returned value
+ * *descriptor updated with descriptors of pre processings enabled by default
+ * *count number of descriptors returned if returned status is NO_ERROR.
+ * total number of pre processing enabled by default if returned status is
+ * NO_MEMORY. This happens if the count passed as input is less than the number
+ * of descriptors to return.
+ * *count is limited to kMaxPreProcessing on return.
+ */
+ static status_t queryDefaultPreProcessing(audio_session_t audioSession,
+ effect_descriptor_t *descriptors,
+ uint32_t *count);
+
+ /*
+ * Events used by callback function (effect_callback_t).
+ */
+ enum event_type {
+ EVENT_CONTROL_STATUS_CHANGED = 0,
+ EVENT_ENABLE_STATUS_CHANGED = 1,
+ EVENT_PARAMETER_CHANGED = 2,
+ EVENT_ERROR = 3
+ };
+
+ /* Callback function notifying client application of a change in effect engine state or
+ * configuration.
+ * An effect engine can be shared by several applications but only one has the control
+ * of the engine activity and configuration at a time.
+ * The EVENT_CONTROL_STATUS_CHANGED event is received when an application loses or
+ * retrieves the control of the effect engine. Loss of control happens
+ * if another application requests the use of the engine by creating an AudioEffect for
+ * the same effect type but with a higher priority. Control is returned when the
+ * application having the control deletes its AudioEffect object.
+ * The EVENT_ENABLE_STATUS_CHANGED event is received by all applications not having the
+ * control of the effect engine when the effect is enabled or disabled.
+ * The EVENT_PARAMETER_CHANGED event is received by all applications not having the
+ * control of the effect engine when an effect parameter is changed.
+ * The EVENT_ERROR event is received when the media server process dies.
+ *
+ * Parameters:
+ *
+ * event: type of event notified (see enum AudioEffect::event_type).
+ * user: Pointer to context for use by the callback receiver.
+ * info: Pointer to optional parameter according to event type:
+ * - EVENT_CONTROL_STATUS_CHANGED: boolean indicating if control is granted (true)
+ * or stolen (false).
+ * - EVENT_ENABLE_STATUS_CHANGED: boolean indicating if effect is now enabled (true)
+ * or disabled (false).
+ * - EVENT_PARAMETER_CHANGED: pointer to a effect_param_t structure.
+ * - EVENT_ERROR: status_t indicating the error (DEAD_OBJECT when media server dies).
+ */
+
+ typedef void (*effect_callback_t)(int32_t event, void* user, void *info);
+
+
+ /* Constructor.
+ * AudioEffect is the base class for creating and controlling an effect engine from
+ * the application process. Creating an AudioEffect object will create the effect engine
+ * in the AudioFlinger if no engine of the specified type exists. If one exists, this engine
+ * will be used. The application creating the AudioEffect object (or a derived class like
+ * Reverb for instance) will either receive control of the effect engine or not, depending
+ * on the priority parameter. If priority is higher than the priority used by the current
+ * effect engine owner, the control will be transfered to the new application. Otherwise
+ * control will remain to the previous application. In this case, the new application will be
+ * notified of changes in effect engine state or control ownership by the effect callback.
+ * After creating the AudioEffect, the application must call the initCheck() method and
+ * check the creation status before trying to control the effect engine (see initCheck()).
+ * If the effect is to be applied to an AudioTrack or MediaPlayer only the application
+ * must specify the audio session ID corresponding to this player.
+ */
+
+ /* Simple Constructor.
+ *
+ * Parameters:
+ *
+ * opPackageName: The package name used for app op checks.
+ */
+ AudioEffect(const String16& opPackageName);
+
+
+ /* Constructor.
+ *
+ * Parameters:
+ *
+ * type: type of effect created: can be null if uuid is specified. This corresponds to
+ * the OpenSL ES interface implemented by this effect.
+ * opPackageName: The package name used for app op checks.
+ * uuid: Uuid of effect created: can be null if type is specified. This uuid corresponds to
+ * a particular implementation of an effect type.
+ * priority: requested priority for effect control: the priority level corresponds to the
+ * value of priority parameter: negative values indicate lower priorities, positive values
+ * higher priorities, 0 being the normal priority.
+ * cbf: optional callback function (see effect_callback_t)
+ * user: pointer to context for use by the callback receiver.
+ * sessionID: audio session this effect is associated to.
+ * If equal to AUDIO_SESSION_OUTPUT_MIX, the effect will be global to
+ * the output mix. Otherwise, the effect will be applied to all players
+ * (AudioTrack or MediaPLayer) within the same audio session.
+ * io: HAL audio output or input stream to which this effect must be attached. Leave at 0 for
+ * automatic output selection by AudioFlinger.
+ */
+
+ AudioEffect(const effect_uuid_t *type,
+ const String16& opPackageName,
+ const effect_uuid_t *uuid = NULL,
+ int32_t priority = 0,
+ effect_callback_t cbf = NULL,
+ void* user = NULL,
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
+ audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
+ );
+
+ /* Constructor.
+ * Same as above but with type and uuid specified by character strings
+ */
+ AudioEffect(const char *typeStr,
+ const String16& opPackageName,
+ const char *uuidStr = NULL,
+ int32_t priority = 0,
+ effect_callback_t cbf = NULL,
+ void* user = NULL,
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
+ audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
+ );
+
+ /* Terminates the AudioEffect and unregisters it from AudioFlinger.
+ * The effect engine is also destroyed if this AudioEffect was the last controlling
+ * the engine.
+ */
+ ~AudioEffect();
+
+ /* Initialize an uninitialized AudioEffect.
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR or ALREADY_EXISTS: successful initialization
+ * - INVALID_OPERATION: AudioEffect is already initialized
+ * - BAD_VALUE: invalid parameter
+ * - NO_INIT: audio flinger or audio hardware not initialized
+ * */
+ status_t set(const effect_uuid_t *type,
+ const effect_uuid_t *uuid = NULL,
+ int32_t priority = 0,
+ effect_callback_t cbf = NULL,
+ void* user = NULL,
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
+ audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
+ );
+
+ /* Result of constructing the AudioEffect. This must be checked
+ * before using any AudioEffect API.
+ * initCheck() can return:
+ * - NO_ERROR: the effect engine is successfully created and the application has control.
+ * - ALREADY_EXISTS: the effect engine is successfully created but the application does not
+ * have control.
+ * - NO_INIT: the effect creation failed.
+ *
+ */
+ status_t initCheck() const;
+
+
+ /* Returns the unique effect Id for the controlled effect engine. This ID is unique
+ * system wide and is used for instance in the case of auxiliary effects to attach
+ * the effect to an AudioTrack or MediaPlayer.
+ *
+ */
+ int32_t id() const { return mId; }
+
+ /* Returns a descriptor for the effect (see effect_descriptor_t in audio_effect.h).
+ */
+ effect_descriptor_t descriptor() const;
+
+ /* Returns effect control priority of this AudioEffect object.
+ */
+ int32_t priority() const { return mPriority; }
+
+
+ /* Enables or disables the effect engine.
+ *
+ * Parameters:
+ * enabled: requested enable state.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: the application does not have control of the effect engine or the
+ * effect is already in the requested state.
+ */
+ virtual status_t setEnabled(bool enabled);
+ bool getEnabled() const;
+
+ /* Sets a parameter value.
+ *
+ * Parameters:
+ * param: pointer to effect_param_t structure containing the parameter
+ * and its value (See audio_effect.h).
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation.
+ * - INVALID_OPERATION: the application does not have control of the effect engine.
+ * - BAD_VALUE: invalid parameter identifier or value.
+ * - DEAD_OBJECT: the effect engine has been deleted.
+ */
+ virtual status_t setParameter(effect_param_t *param);
+
+ /* Prepare a new parameter value that will be set by next call to
+ * setParameterCommit(). This method can be used to set multiple parameters
+ * in a synchronous manner or to avoid multiple binder calls for each
+ * parameter.
+ *
+ * Parameters:
+ * param: pointer to effect_param_t structure containing the parameter
+ * and its value (See audio_effect.h).
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation.
+ * - INVALID_OPERATION: the application does not have control of the effect engine.
+ * - NO_MEMORY: no more space available in shared memory used for deferred parameter
+ * setting.
+ */
+ virtual status_t setParameterDeferred(effect_param_t *param);
+
+ /* Commit all parameter values previously prepared by setParameterDeferred().
+ *
+ * Parameters:
+ * none
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation.
+ * - INVALID_OPERATION: No new parameter values ready for commit.
+ * - BAD_VALUE: invalid parameter identifier or value: there is no indication
+ * as to which of the parameters caused this error.
+ * - DEAD_OBJECT: the effect engine has been deleted.
+ */
+ virtual status_t setParameterCommit();
+
+ /* Gets a parameter value.
+ *
+ * Parameters:
+ * param: pointer to effect_param_t structure containing the parameter
+ * and the returned value (See audio_effect.h).
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation.
+ * - INVALID_OPERATION: the AudioEffect was not successfully initialized.
+ * - BAD_VALUE: invalid parameter identifier.
+ * - DEAD_OBJECT: the effect engine has been deleted.
+ */
+ virtual status_t getParameter(effect_param_t *param);
+
+ /* Sends a command and receives a response to/from effect engine.
+ * See audio_effect.h for details on effect command() function, valid command codes
+ * and formats.
+ */
+ virtual status_t command(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *cmdData,
+ uint32_t *replySize,
+ void *replyData);
+
+
+ /*
+ * Utility functions.
+ */
+
+ /* Converts the string passed as first argument to the effect_uuid_t
+ * pointed to by second argument
+ */
+ static status_t stringToGuid(const char *str, effect_uuid_t *guid);
+ /* Converts the effect_uuid_t pointed to by first argument to the
+ * string passed as second argument
+ */
+ static status_t guidToString(const effect_uuid_t *guid, char *str, size_t maxLen);
+
+ // kMaxPreProcessing is a reasonable value for the maximum number of preprocessing effects
+ // that can be applied simultaneously.
+ static const uint32_t kMaxPreProcessing = 10;
+
+protected:
+ bool mEnabled; // enable state
+ audio_session_t mSessionId; // audio session ID
+ int32_t mPriority; // priority for effect control
+ status_t mStatus; // effect status
+ effect_callback_t mCbf; // callback function for status, control and
+ // parameter changes notifications
+ void* mUserData; // client context for callback function
+ effect_descriptor_t mDescriptor; // effect descriptor
+ int32_t mId; // system wide unique effect engine instance ID
+ Mutex mLock; // Mutex for mEnabled access
+
+ String16 mOpPackageName; // The package name used for app op checks.
+
+ // IEffectClient
+ virtual void controlStatusChanged(bool controlGranted);
+ virtual void enableStatusChanged(bool enabled);
+ virtual void commandExecuted(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t replySize,
+ void *pReplyData);
+
+private:
+
+ // Implements the IEffectClient interface
+ class EffectClient :
+ public android::BnEffectClient, public android::IBinder::DeathRecipient
+ {
+ public:
+
+ EffectClient(AudioEffect *effect) : mEffect(effect){}
+
+ // IEffectClient
+ virtual void controlStatusChanged(bool controlGranted) {
+ sp<AudioEffect> effect = mEffect.promote();
+ if (effect != 0) {
+ effect->controlStatusChanged(controlGranted);
+ }
+ }
+ virtual void enableStatusChanged(bool enabled) {
+ sp<AudioEffect> effect = mEffect.promote();
+ if (effect != 0) {
+ effect->enableStatusChanged(enabled);
+ }
+ }
+ virtual void commandExecuted(uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *pCmdData,
+ uint32_t replySize,
+ void *pReplyData) {
+ sp<AudioEffect> effect = mEffect.promote();
+ if (effect != 0) {
+ effect->commandExecuted(
+ cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ }
+ }
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& /*who*/) {
+ sp<AudioEffect> effect = mEffect.promote();
+ if (effect != 0) {
+ effect->binderDied();
+ }
+ }
+
+ private:
+ wp<AudioEffect> mEffect;
+ };
+
+ void binderDied();
+
+ sp<IEffect> mIEffect; // IEffect binder interface
+ sp<EffectClient> mIEffectClient; // IEffectClient implementation
+ sp<IMemory> mCblkMemory; // shared memory for deferred parameter setting
+ effect_param_cblk_t* mCblk; // control block for deferred parameter setting
+ pid_t mClientPid;
+};
+
+
+}; // namespace android
+
+#endif // ANDROID_AUDIOEFFECT_H
diff --git a/include/media/AudioIoDescriptor.h b/media/libaudioclient/include/media/AudioIoDescriptor.h
similarity index 100%
rename from include/media/AudioIoDescriptor.h
rename to media/libaudioclient/include/media/AudioIoDescriptor.h
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
new file mode 100644
index 0000000..87ada76
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -0,0 +1,389 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_MIXER_H
+#define ANDROID_AUDIO_MIXER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/AudioBufferProvider.h>
+#include <media/AudioResampler.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/BufferProviders.h>
+#include <media/nbaio/NBLog.h>
+#include <system/audio.h>
+#include <utils/Compat.h>
+#include <utils/threads.h>
+
+// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
+#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class AudioMixer
+{
+public:
+ AudioMixer(size_t frameCount, uint32_t sampleRate,
+ uint32_t maxNumTracks = MAX_NUM_TRACKS);
+
+ /*virtual*/ ~AudioMixer(); // non-virtual saves a v-table, restore if sub-classed
+
+
+ // This mixer has a hard-coded upper limit of 32 active track inputs.
+ // Adding support for > 32 tracks would require more than simply changing this value.
+ static const uint32_t MAX_NUM_TRACKS = 32;
+ // maximum number of channels supported by the mixer
+
+ // This mixer has a hard-coded upper limit of 8 channels for output.
+ static const uint32_t MAX_NUM_CHANNELS = 8;
+ static const uint32_t MAX_NUM_VOLUMES = 2; // stereo volume only
+ // maximum number of channels supported for the content
+ static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
+
+ static const uint16_t UNITY_GAIN_INT = 0x1000;
+ static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
+
+ enum { // names
+
+ // track names (MAX_NUM_TRACKS units)
+ TRACK0 = 0x1000,
+
+ // 0x2000 is unused
+
+ // setParameter targets
+ TRACK = 0x3000,
+ RESAMPLE = 0x3001,
+ RAMP_VOLUME = 0x3002, // ramp to new volume
+ VOLUME = 0x3003, // don't ramp
+ TIMESTRETCH = 0x3004,
+
+ // set Parameter names
+ // for target TRACK
+ CHANNEL_MASK = 0x4000,
+ FORMAT = 0x4001,
+ MAIN_BUFFER = 0x4002,
+ AUX_BUFFER = 0x4003,
+ DOWNMIX_TYPE = 0X4004,
+ MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+ MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
+ // for target RESAMPLE
+ SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name;
+ // parameter 'value' is the new sample rate in Hz.
+ // Only creates a sample rate converter the first time that
+ // the track sample rate is different from the mix sample rate.
+ // If the new sample rate is the same as the mix sample rate,
+ // and a sample rate converter already exists,
+ // then the sample rate converter remains present but is a no-op.
+ RESET = 0x4101, // Reset sample rate converter without changing sample rate.
+ // This clears out the resampler's input buffer.
+ REMOVE = 0x4102, // Remove the sample rate converter on this track name;
+ // the track is restored to the mix sample rate.
+ // for target RAMP_VOLUME and VOLUME (8 channels max)
+ // FIXME use float for these 3 to improve the dynamic range
+ VOLUME0 = 0x4200,
+ VOLUME1 = 0x4201,
+ AUXLEVEL = 0x4210,
+ // for target TIMESTRETCH
+ PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name;
+ // parameter 'value' is a pointer to the new playback rate.
+ };
+
+
+ // For all APIs with "name": TRACK0 <= name < TRACK0 + MAX_NUM_TRACKS
+
+ // Allocate a track name. Returns new track name if successful, -1 on failure.
+ // The failure could be because of an invalid channelMask or format, or that
+ // the track capacity of the mixer is exceeded.
+ int getTrackName(audio_channel_mask_t channelMask,
+ audio_format_t format, int sessionId);
+
+ // Free an allocated track by name
+ void deleteTrackName(int name);
+
+ // Enable or disable an allocated track by name
+ void enable(int name);
+ void disable(int name);
+
+ void setParameter(int name, int target, int param, void *value);
+
+ void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
+ void process();
+
+ uint32_t trackNames() const { return mTrackNames; }
+
+ size_t getUnreleasedFrames(int name) const;
+
+ static inline bool isValidPcmTrackFormat(audio_format_t format) {
+ switch (format) {
+ case AUDIO_FORMAT_PCM_8_BIT:
+ case AUDIO_FORMAT_PCM_16_BIT:
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ case AUDIO_FORMAT_PCM_32_BIT:
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+private:
+
+ enum {
+ // FIXME this representation permits up to 8 channels
+ NEEDS_CHANNEL_COUNT__MASK = 0x00000007,
+ };
+
+ enum {
+ NEEDS_CHANNEL_1 = 0x00000000, // mono
+ NEEDS_CHANNEL_2 = 0x00000001, // stereo
+
+ // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
+
+ NEEDS_MUTE = 0x00000100,
+ NEEDS_RESAMPLE = 0x00001000,
+ NEEDS_AUX = 0x00010000,
+ };
+
+ struct state_t;
+ struct track_t;
+
+ typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
+ int32_t* aux);
+ static const int BLOCKSIZE = 16; // 4 cache lines
+
+ struct track_t {
+ uint32_t needs;
+
+ // TODO: Eventually remove legacy integer volume settings
+ union {
+ int16_t volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
+ int32_t volumeRL;
+ };
+
+ int32_t prevVolume[MAX_NUM_VOLUMES];
+
+ // 16-byte boundary
+
+ int32_t volumeInc[MAX_NUM_VOLUMES];
+ int32_t auxInc;
+ int32_t prevAuxLevel;
+
+ // 16-byte boundary
+
+ int16_t auxLevel; // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
+ uint16_t frameCount;
+
+ uint8_t channelCount; // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
+ uint8_t unused_padding; // formerly format, was always 16
+ uint16_t enabled; // actually bool
+ audio_channel_mask_t channelMask;
+
+ // actual buffer provider used by the track hooks, see DownmixerBufferProvider below
+ // for how the Track buffer provider is wrapped by another one when dowmixing is required
+ AudioBufferProvider* bufferProvider;
+
+ // 16-byte boundary
+
+ mutable AudioBufferProvider::Buffer buffer; // 8 bytes
+
+ hook_t hook;
+ const void* in; // current location in buffer
+
+ // 16-byte boundary
+
+ AudioResampler* resampler;
+ uint32_t sampleRate;
+ int32_t* mainBuffer;
+ int32_t* auxBuffer;
+
+ // 16-byte boundary
+
+ /* Buffer providers are constructed to translate the track input data as needed.
+ *
+ * TODO: perhaps make a single PlaybackConverterProvider class to move
+ * all pre-mixer track buffer conversions outside the AudioMixer class.
+ *
+ * 1) mInputBufferProvider: The AudioTrack buffer provider.
+ * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
+ * match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
+ * requires reformat. For example, it may convert floating point input to
+ * PCM_16_bit if that's required by the downmixer.
+ * 3) downmixerBufferProvider: If not NULL, performs the channel remixing to match
+ * the number of channels required by the mixer sink.
+ * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+ * the downmixer requirements to the mixer engine input requirements.
+ * 5) mTimestretchBufferProvider: Adds timestretching for playback rate
+ */
+ AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
+ PassthruBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting.
+ PassthruBufferProvider* downmixerBufferProvider; // wrapper for channel conversion.
+ PassthruBufferProvider* mPostDownmixReformatBufferProvider;
+ PassthruBufferProvider* mTimestretchBufferProvider;
+
+ int32_t sessionId;
+
+ audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+ audio_format_t mFormat; // input track format
+ audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+ // each track must be converted to this format.
+ audio_format_t mDownmixRequiresFormat; // required downmixer format
+ // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
+ // AUDIO_FORMAT_INVALID if no required format
+
+ float mVolume[MAX_NUM_VOLUMES]; // floating point set volume
+ float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
+ float mVolumeInc[MAX_NUM_VOLUMES]; // floating point volume increment
+
+ float mAuxLevel; // floating point set aux level
+ float mPrevAuxLevel; // floating point prev aux level
+ float mAuxInc; // floating point aux increment
+
+ audio_channel_mask_t mMixerChannelMask;
+ uint32_t mMixerChannelCount;
+
+ AudioPlaybackRate mPlaybackRate;
+
+ bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
+ bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
+ bool doesResample() const { return resampler != NULL; }
+ void resetResampler() { if (resampler != NULL) resampler->reset(); }
+ void adjustVolumeRamp(bool aux, bool useFloat = false);
+ size_t getUnreleasedFrames() const { return resampler != NULL ?
+ resampler->getUnreleasedFrames() : 0; };
+
+ status_t prepareForDownmix();
+ void unprepareForDownmix();
+ status_t prepareForReformat();
+ void unprepareForReformat();
+ bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
+ void reconfigureBufferProviders();
+ };
+
+ typedef void (*process_hook_t)(state_t* state);
+
+ // pad to 32-bytes to fill cache line
+ struct state_t {
+ uint32_t enabledTracks;
+ uint32_t needsChanged;
+ size_t frameCount;
+ process_hook_t hook; // one of process__*, never NULL
+ int32_t *outputTemp;
+ int32_t *resampleTemp;
+ NBLog::Writer* mLog;
+ int32_t reserved[1];
+ // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS
+ track_t tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
+ };
+
+ // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
+ uint32_t mTrackNames;
+
+ // bitmask of configured track names; ~0 if maxNumTracks == MAX_NUM_TRACKS,
+ // but will have fewer bits set if maxNumTracks < MAX_NUM_TRACKS
+ const uint32_t mConfiguredNames;
+
+ const uint32_t mSampleRate;
+
+ NBLog::Writer mDummyLog;
+public:
+ void setLog(NBLog::Writer* log);
+private:
+ state_t mState __attribute__((aligned(32)));
+
+ // Call after changing either the enabled status of a track, or parameters of an enabled track.
+ // OK to call more often than that, but unnecessary.
+ void invalidateState(uint32_t mask);
+
+ bool setChannelMasks(int name,
+ audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
+
+ static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
+ static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+ static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
+ static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
+ int32_t* aux);
+ static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux);
+ static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux);
+
+ static void process__validate(state_t* state);
+ static void process__nop(state_t* state);
+ static void process__genericNoResampling(state_t* state);
+ static void process__genericResampling(state_t* state);
+ static void process__OneTrack16BitsStereoNoResampling(state_t* state);
+
+ static pthread_once_t sOnceControl;
+ static void sInitRoutine();
+
+ /* multi-format volume mixing function (calls template functions
+ * in AudioMixerOps.h). The template parameters are as follows:
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+ template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+ typename TO, typename TI, typename TA>
+ static void volumeMix(TO *out, size_t outFrames,
+ const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t);
+
+ // multi-format process hooks
+ template <int MIXTYPE, typename TO, typename TI, typename TA>
+ static void process_NoResampleOneTrack(state_t* state);
+
+ // multi-format track hooks
+ template <int MIXTYPE, typename TO, typename TI, typename TA>
+ static void track__Resample(track_t* t, TO* out, size_t frameCount,
+ TO* temp __unused, TA* aux);
+ template <int MIXTYPE, typename TO, typename TI, typename TA>
+ static void track__NoResample(track_t* t, TO* out, size_t frameCount,
+ TO* temp __unused, TA* aux);
+
+ static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
+ void *in, audio_format_t mixerInFormat, size_t sampleCount);
+
+ // hook types
+ enum {
+ PROCESSTYPE_NORESAMPLEONETRACK,
+ };
+ enum {
+ TRACKTYPE_NOP,
+ TRACKTYPE_RESAMPLE,
+ TRACKTYPE_NORESAMPLE,
+ TRACKTYPE_NORESAMPLEMONO,
+ };
+
+ // functions for determining the proper process and track hooks.
+ static process_hook_t getProcessHook(int processType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+ static hook_t getTrackHook(int trackType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_AUDIO_MIXER_H
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
new file mode 100644
index 0000000..1ace607
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioParameter.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2008-2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOPARAMETER_H_
+#define ANDROID_AUDIOPARAMETER_H_
+
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class AudioParameter {
+
+public:
+ AudioParameter() {}
+ AudioParameter(const String8& keyValuePairs);
+ virtual ~AudioParameter();
+
+ // reserved parameter keys for changing standard parameters with setParameters() function.
+ // Using these keys is mandatory for AudioFlinger to properly monitor audio output/input
+ // configuration changes and act accordingly.
+ // keyRouting: to change audio routing, value is an int in audio_devices_t
+ // keySamplingRate: to change sampling rate routing, value is an int
+ // keyFormat: to change audio format, value is an int in audio_format_t
+ // keyChannels: to change audio channel configuration, value is an int in audio_channels_t
+ // keyFrameCount: to change audio output frame count, value is an int
+ // keyInputSource: to change audio input source, value is an int in audio_source_t
+ // (defined in media/mediarecorder.h)
+ // keyScreenState: either "on" or "off"
+ static const char * const keyRouting;
+ static const char * const keySamplingRate;
+ static const char * const keyFormat;
+ static const char * const keyChannels;
+ static const char * const keyFrameCount;
+ static const char * const keyInputSource;
+ static const char * const keyScreenState;
+
+ // keyBtNrec: BT SCO Noise Reduction + Echo Cancellation parameters
+ // keyHwAvSync: get HW synchronization source identifier from a device
+ // keyMonoOutput: Enable mono audio playback
+ // keyStreamHwAvSync: set HW synchronization source identifier on a stream
+ static const char * const keyBtNrec;
+ static const char * const keyHwAvSync;
+ static const char * const keyMonoOutput;
+ static const char * const keyStreamHwAvSync;
+
+ // keyStreamConnect / Disconnect: value is an int in audio_devices_t
+ static const char * const keyStreamConnect;
+ static const char * const keyStreamDisconnect;
+
+ // For querying stream capabilities. All the returned values are lists.
+ // keyStreamSupportedFormats: audio_format_t
+ // keyStreamSupportedChannels: audio_channel_mask_t
+ // keyStreamSupportedSamplingRates: sampling rate values
+ static const char * const keyStreamSupportedFormats;
+ static const char * const keyStreamSupportedChannels;
+ static const char * const keyStreamSupportedSamplingRates;
+
+ static const char * const valueOn;
+ static const char * const valueOff;
+
+ static const char * const valueListSeparator;
+
+ String8 toString() const { return toStringImpl(true); }
+ String8 keysToString() const { return toStringImpl(false); }
+
+ status_t add(const String8& key, const String8& value);
+ status_t addInt(const String8& key, const int value);
+ status_t addKey(const String8& key);
+ status_t addFloat(const String8& key, const float value);
+
+ status_t remove(const String8& key);
+
+ status_t get(const String8& key, String8& value) const;
+ status_t getInt(const String8& key, int& value) const;
+ status_t getFloat(const String8& key, float& value) const;
+ status_t getAt(size_t index, String8& key) const;
+ status_t getAt(size_t index, String8& key, String8& value) const;
+
+ size_t size() const { return mParameters.size(); }
+
+private:
+ String8 mKeyValuePairs;
+ KeyedVector <String8, String8> mParameters;
+
+ String8 toStringImpl(bool useValues) const;
+};
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIOPARAMETER_H_*/
diff --git a/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
similarity index 100%
rename from include/media/AudioPolicy.h
rename to media/libaudioclient/include/media/AudioPolicy.h
diff --git a/media/libaudioclient/include/media/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
new file mode 100644
index 0000000..73ee0a7
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioPolicyHelper.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AUDIO_POLICY_HELPER_H_
+#define AUDIO_POLICY_HELPER_H_
+
+#include <system/audio.h>
+
+static inline
+audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
+{
+ // flags to stream type mapping
+ if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+ return AUDIO_STREAM_ENFORCED_AUDIBLE;
+ }
+ if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
+ return AUDIO_STREAM_BLUETOOTH_SCO;
+ }
+
+ // usage to stream type mapping
+ switch (attr->usage) {
+ case AUDIO_USAGE_MEDIA:
+ case AUDIO_USAGE_GAME:
+ case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+ case AUDIO_USAGE_ASSISTANT:
+ return AUDIO_STREAM_MUSIC;
+ case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+ return AUDIO_STREAM_ACCESSIBILITY;
+ case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+ return AUDIO_STREAM_SYSTEM;
+ case AUDIO_USAGE_VOICE_COMMUNICATION:
+ return AUDIO_STREAM_VOICE_CALL;
+
+ case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+ return AUDIO_STREAM_DTMF;
+
+ case AUDIO_USAGE_ALARM:
+ return AUDIO_STREAM_ALARM;
+ case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+ return AUDIO_STREAM_RING;
+
+ case AUDIO_USAGE_NOTIFICATION:
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+ case AUDIO_USAGE_NOTIFICATION_EVENT:
+ return AUDIO_STREAM_NOTIFICATION;
+
+ case AUDIO_USAGE_UNKNOWN:
+ default:
+ return AUDIO_STREAM_MUSIC;
+ }
+}
+
+static inline
+void stream_type_to_audio_attributes(audio_stream_type_t streamType,
+ audio_attributes_t *attr) {
+ memset(attr, 0, sizeof(audio_attributes_t));
+
+ switch (streamType) {
+ case AUDIO_STREAM_DEFAULT:
+ case AUDIO_STREAM_MUSIC:
+ attr->content_type = AUDIO_CONTENT_TYPE_MUSIC;
+ attr->usage = AUDIO_USAGE_MEDIA;
+ break;
+ case AUDIO_STREAM_VOICE_CALL:
+ attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
+ attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION;
+ break;
+ case AUDIO_STREAM_ENFORCED_AUDIBLE:
+ attr->flags |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
+ // intended fall through, attributes in common with STREAM_SYSTEM
+ case AUDIO_STREAM_SYSTEM:
+ attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ attr->usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
+ break;
+ case AUDIO_STREAM_RING:
+ attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ attr->usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+ break;
+ case AUDIO_STREAM_ALARM:
+ attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ attr->usage = AUDIO_USAGE_ALARM;
+ break;
+ case AUDIO_STREAM_NOTIFICATION:
+ attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ attr->usage = AUDIO_USAGE_NOTIFICATION;
+ break;
+ case AUDIO_STREAM_BLUETOOTH_SCO:
+ attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
+ attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION;
+ attr->flags |= AUDIO_FLAG_SCO;
+ break;
+ case AUDIO_STREAM_DTMF:
+ attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+ attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
+ break;
+ case AUDIO_STREAM_TTS:
+ attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
+ attr->usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
+ break;
+ default:
+ ALOGE("invalid stream type %d when converting to attributes", streamType);
+ }
+}
+
+#endif //AUDIO_POLICY_HELPER_H_
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
new file mode 100644
index 0000000..1b034b5
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -0,0 +1,666 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIORECORD_H
+#define ANDROID_AUDIORECORD_H
+
+#include <cutils/sched_policy.h>
+#include <media/AudioSystem.h>
+#include <media/AudioTimestamp.h>
+#include <media/IAudioRecord.h>
+#include <media/Modulo.h>
+#include <utils/threads.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+struct audio_track_cblk_t;
+class AudioRecordClientProxy;
+
+// ----------------------------------------------------------------------------
+
+class AudioRecord : public RefBase
+{
+public:
+
+ /* Events used by AudioRecord callback function (callback_t).
+ * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
+ */
+ enum event_type {
+ EVENT_MORE_DATA = 0, // Request to read available data from buffer.
+ // If this event is delivered but the callback handler
+ // does not want to read the available data, the handler must
+ // explicitly ignore the event by setting frameCount to zero.
+ EVENT_OVERRUN = 1, // Buffer overrun occurred.
+ EVENT_MARKER = 2, // Record head is at the specified marker position
+ // (See setMarkerPosition()).
+ EVENT_NEW_POS = 3, // Record head is at a new position
+ // (See setPositionUpdatePeriod()).
+ EVENT_NEW_IAUDIORECORD = 4, // IAudioRecord was re-created, either due to re-routing and
+ // voluntary invalidation by mediaserver, or mediaserver crash.
+ };
+
+ /* Client should declare a Buffer and pass address to obtainBuffer()
+ * and releaseBuffer(). See also callback_t for EVENT_MORE_DATA.
+ */
+
+ class Buffer
+ {
+ public:
+ // FIXME use m prefix
+ size_t frameCount; // number of sample frames corresponding to size;
+ // on input to obtainBuffer() it is the number of frames desired
+ // on output from obtainBuffer() it is the number of available
+ // frames to be read
+ // on input to releaseBuffer() it is currently ignored
+
+ size_t size; // input/output in bytes == frameCount * frameSize
+ // on input to obtainBuffer() it is ignored
+ // on output from obtainBuffer() it is the number of available
+ // bytes to be read, which is frameCount * frameSize
+ // on input to releaseBuffer() it is the number of bytes to
+ // release
+ // FIXME This is redundant with respect to frameCount. Consider
+ // removing size and making frameCount the primary field.
+
+ union {
+ void* raw;
+ short* i16; // signed 16-bit
+ int8_t* i8; // unsigned 8-bit, offset by 0x80
+ // input to obtainBuffer(): unused, output: pointer to buffer
+ };
+ };
+
+ /* As a convenience, if a callback is supplied, a handler thread
+ * is automatically created with the appropriate priority. This thread
+ * invokes the callback when a new buffer becomes available or various conditions occur.
+ * Parameters:
+ *
+ * event: type of event notified (see enum AudioRecord::event_type).
+ * user: Pointer to context for use by the callback receiver.
+ * info: Pointer to optional parameter according to event type:
+ * - EVENT_MORE_DATA: pointer to AudioRecord::Buffer struct. The callback must not read
+ * more bytes than indicated by 'size' field and update 'size' if
+ * fewer bytes are consumed.
+ * - EVENT_OVERRUN: unused.
+ * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
+ * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
+ * - EVENT_NEW_IAUDIORECORD: unused.
+ */
+
+ typedef void (*callback_t)(int event, void* user, void *info);
+
+ /* Returns the minimum frame count required for the successful creation of
+ * an AudioRecord object.
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - NO_INIT: audio server or audio hardware not initialized
+ * - BAD_VALUE: unsupported configuration
+ * frameCount is guaranteed to be non-zero if status is NO_ERROR,
+ * and is undefined otherwise.
+ * FIXME This API assumes a route, and so should be deprecated.
+ */
+
+ static status_t getMinFrameCount(size_t* frameCount,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask);
+
+ /* How data is transferred from AudioRecord
+ */
+ enum transfer_type {
+ TRANSFER_DEFAULT, // not specified explicitly; determine from the other parameters
+ TRANSFER_CALLBACK, // callback EVENT_MORE_DATA
+ TRANSFER_OBTAIN, // call obtainBuffer() and releaseBuffer()
+ TRANSFER_SYNC, // synchronous read()
+ };
+
+ /* Constructs an uninitialized AudioRecord. No connection with
+ * AudioFlinger takes place. Use set() after this.
+ *
+ * Parameters:
+ *
+ * opPackageName: The package name used for app ops.
+ */
+ AudioRecord(const String16& opPackageName);
+
+ /* Creates an AudioRecord object and registers it with AudioFlinger.
+ * Once created, the track needs to be started before it can be used.
+ * Unspecified values are set to appropriate default values.
+ *
+ * Parameters:
+ *
+ * inputSource: Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT).
+ * sampleRate: Data sink sampling rate in Hz. Zero means to use the source sample rate.
+ * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
+ * 16 bits per sample).
+ * channelMask: Channel mask, such that audio_is_input_channel(channelMask) is true.
+ * opPackageName: The package name used for app ops.
+ * frameCount: Minimum size of track PCM buffer in frames. This defines the
+ * application's contribution to the
+ * latency of the track. The actual size selected by the AudioRecord could
+ * be larger if the requested size is not compatible with current audio HAL
+ * latency. Zero means to use a default value.
+ * cbf: Callback function. If not null, this function is called periodically
+ * to consume new data in TRANSFER_CALLBACK mode
+ * and inform of marker, position updates, etc.
+ * user: Context for use by the callback receiver.
+ * notificationFrames: The callback function is called each time notificationFrames PCM
+ * frames are ready in record track output buffer.
+ * sessionId: Not yet supported.
+ * transferType: How data is transferred from AudioRecord.
+ * flags: See comments on audio_input_flags_t in <system/audio.h>
+ * pAttributes: If not NULL, supersedes inputSource for use case selection.
+ * threadCanCallJava: Not present in parameter list, and so is fixed at false.
+ */
+
+ AudioRecord(audio_source_t inputSource,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const String16& opPackageName,
+ size_t frameCount = 0,
+ callback_t cbf = NULL,
+ void* user = NULL,
+ uint32_t notificationFrames = 0,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+ uid_t uid = AUDIO_UID_INVALID,
+ pid_t pid = -1,
+ const audio_attributes_t* pAttributes = NULL);
+
+ /* Terminates the AudioRecord and unregisters it from AudioFlinger.
+ * Also destroys all resources associated with the AudioRecord.
+ */
+protected:
+ virtual ~AudioRecord();
+public:
+
+ /* Initialize an AudioRecord that was created using the AudioRecord() constructor.
+ * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters.
+ * set() is not multi-thread safe.
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful intialization
+ * - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use
+ * - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
+ * - NO_INIT: audio server or audio hardware not initialized
+ * - PERMISSION_DENIED: recording is not allowed for the requesting process
+ * If status is not equal to NO_ERROR, don't call any other APIs on this AudioRecord.
+ *
+ * Parameters not listed in the AudioRecord constructors above:
+ *
+ * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
+ */
+ status_t set(audio_source_t inputSource,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount = 0,
+ callback_t cbf = NULL,
+ void* user = NULL,
+ uint32_t notificationFrames = 0,
+ bool threadCanCallJava = false,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+ uid_t uid = AUDIO_UID_INVALID,
+ pid_t pid = -1,
+ const audio_attributes_t* pAttributes = NULL);
+
+ /* Result of constructing the AudioRecord. This must be checked for successful initialization
+ * before using any AudioRecord API (except for set()), because using
+ * an uninitialized AudioRecord produces undefined results.
+ * See set() method above for possible return codes.
+ */
+ status_t initCheck() const { return mStatus; }
+
+ /* Returns this track's estimated latency in milliseconds.
+ * This includes the latency due to AudioRecord buffer size, resampling if applicable,
+ * and audio hardware driver.
+ */
+ uint32_t latency() const { return mLatency; }
+
+ /* getters, see constructor and set() */
+
+ audio_format_t format() const { return mFormat; }
+ uint32_t channelCount() const { return mChannelCount; }
+ size_t frameCount() const { return mFrameCount; }
+ size_t frameSize() const { return mFrameSize; }
+ audio_source_t inputSource() const { return mAttributes.source; }
+
+ /*
+ * Return the period of the notification callback in frames.
+ * This value is set when the AudioRecord is constructed.
+ * It can be modified if the AudioRecord is rerouted.
+ */
+ uint32_t getNotificationPeriodInFrames() const { return mNotificationFramesAct; }
+
+ /* After it's created the track is not active. Call start() to
+ * make it active. If set, the callback will start being called.
+ * If event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until
+ * the specified event occurs on the specified trigger session.
+ */
+ status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ audio_session_t triggerSession = AUDIO_SESSION_NONE);
+
+ /* Stop a track. The callback will cease being called. Note that obtainBuffer() still
+ * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK.
+ */
+ void stop();
+ bool stopped() const;
+
+ /* Return the sink sample rate for this record track in Hz.
+ * If specified as zero in constructor or set(), this will be the source sample rate.
+ * Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
+ */
+ uint32_t getSampleRate() const { return mSampleRate; }
+
+ /* Sets marker position. When record reaches the number of frames specified,
+ * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
+ * with marker == 0 cancels marker notification callback.
+ * To set a marker at a position which would compute as 0,
+ * a workaround is to set the marker at a nearby position such as ~0 or 1.
+ * If the AudioRecord has been opened with no callback function associated,
+ * the operation will fail.
+ *
+ * Parameters:
+ *
+ * marker: marker position expressed in wrapping (overflow) frame units,
+ * like the return value of getPosition().
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: the AudioRecord has no callback installed.
+ */
+ status_t setMarkerPosition(uint32_t marker);
+ status_t getMarkerPosition(uint32_t *marker) const;
+
+ /* Sets position update period. Every time the number of frames specified has been recorded,
+ * a callback with event type EVENT_NEW_POS is called.
+ * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
+ * callback.
+ * If the AudioRecord has been opened with no callback function associated,
+ * the operation will fail.
+ * Extremely small values may be rounded up to a value the implementation can support.
+ *
+ * Parameters:
+ *
+ * updatePeriod: position update notification period expressed in frames.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: the AudioRecord has no callback installed.
+ */
+ status_t setPositionUpdatePeriod(uint32_t updatePeriod);
+ status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const;
+
+ /* Return the total number of frames recorded since recording started.
+ * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
+ * It is reset to zero by stop().
+ *
+ * Parameters:
+ *
+ * position: Address where to return record head position.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - BAD_VALUE: position is NULL
+ */
+ status_t getPosition(uint32_t *position) const;
+
+ /* Return the record timestamp.
+ *
+ * Parameters:
+ * timestamp: A pointer to the timestamp to be filled.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - BAD_VALUE: timestamp is NULL
+ */
+ status_t getTimestamp(ExtendedTimestamp *timestamp);
+
+ /* Returns a handle on the audio input used by this AudioRecord.
+ *
+ * Parameters:
+ * none.
+ *
+ * Returned value:
+ * handle on audio hardware input
+ */
+// FIXME The only known public caller is frameworks/opt/net/voip/src/jni/rtp/AudioGroup.cpp
+ audio_io_handle_t getInput() const __attribute__((__deprecated__))
+ { return getInputPrivate(); }
+private:
+ audio_io_handle_t getInputPrivate() const;
+public:
+
+ /* Returns the audio session ID associated with this AudioRecord.
+ *
+ * Parameters:
+ * none.
+ *
+ * Returned value:
+ * AudioRecord session ID.
+ *
+ * No lock needed because session ID doesn't change after first set().
+ */
+ audio_session_t getSessionId() const { return mSessionId; }
+
+ /* Public API for TRANSFER_OBTAIN mode.
+ * Obtains a buffer of up to "audioBuffer->frameCount" full frames.
+ * After draining these frames of data, the caller should release them with releaseBuffer().
+ * If the track buffer is not empty, obtainBuffer() returns as many contiguous
+ * full frames as are available immediately.
+ *
+ * If nonContig is non-NULL, it is an output parameter that will be set to the number of
+ * additional non-contiguous frames that are predicted to be available immediately,
+ * if the client were to release the first frames and then call obtainBuffer() again.
+ * This value is only a prediction, and needs to be confirmed.
+ * It will be set to zero for an error return.
+ *
+ * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK
+ * regardless of the value of waitCount.
+ * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a
+ * maximum timeout based on waitCount; see chart below.
+ * Buffers will be returned until the pool
+ * is exhausted, at which point obtainBuffer() will either block
+ * or return WOULD_BLOCK depending on the value of the "waitCount"
+ * parameter.
+ *
+ * Interpretation of waitCount:
+ * +n limits wait time to n * WAIT_PERIOD_MS,
+ * -1 causes an (almost) infinite wait time,
+ * 0 non-blocking.
+ *
+ * Buffer fields
+ * On entry:
+ * frameCount number of frames requested
+ * size ignored
+ * raw ignored
+ * After error return:
+ * frameCount 0
+ * size 0
+ * raw undefined
+ * After successful return:
+ * frameCount actual number of frames available, <= number requested
+ * size actual number of bytes available
+ * raw pointer to the buffer
+ */
+
+ status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
+ size_t *nonContig = NULL);
+
+ // Explicit Routing
+ /**
+ * TODO Document this method.
+ */
+ status_t setInputDevice(audio_port_handle_t deviceId);
+
+ /**
+ * TODO Document this method.
+ */
+ audio_port_handle_t getInputDevice();
+
+ /* Returns the ID of the audio device actually used by the input to which this AudioRecord
+ * is attached.
+ * A value of AUDIO_PORT_HANDLE_NONE indicates the AudioRecord is not attached to any input.
+ *
+ * Parameters:
+ * none.
+ */
+ audio_port_handle_t getRoutedDeviceId();
+
+ /* Add an AudioDeviceCallback. The caller will be notified when the audio device
+ * to which this AudioRecord is routed is updated.
+ * Replaces any previously installed callback.
+ * Parameters:
+ * callback: The callback interface
+ * Returns NO_ERROR if successful.
+ * INVALID_OPERATION if the same callback is already installed.
+ * NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
+ * BAD_VALUE if the callback is NULL
+ */
+ status_t addAudioDeviceCallback(
+ const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+ /* remove an AudioDeviceCallback.
+ * Parameters:
+ * callback: The callback interface
+ * Returns NO_ERROR if successful.
+ * INVALID_OPERATION if the callback is not installed
+ * BAD_VALUE if the callback is NULL
+ */
+ status_t removeAudioDeviceCallback(
+ const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+private:
+ /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
+ * additional non-contiguous frames that are predicted to be available immediately,
+ * if the client were to release the first frames and then call obtainBuffer() again.
+ * This value is only a prediction, and needs to be confirmed.
+ * It will be set to zero for an error return.
+ * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
+ * in case the requested amount of frames is in two or more non-contiguous regions.
+ * FIXME requested and elapsed are both relative times. Consider changing to absolute time.
+ */
+ status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+ struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+public:
+
+ /* Public API for TRANSFER_OBTAIN mode.
+ * Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill.
+ *
+ * Buffer fields:
+ * frameCount currently ignored but recommend to set to actual number of frames consumed
+ * size actual number of bytes consumed, must be multiple of frameSize
+ * raw ignored
+ */
+ void releaseBuffer(const Buffer* audioBuffer);
+
+ /* As a convenience we provide a read() interface to the audio buffer.
+ * Input parameter 'size' is in byte units.
+ * This is implemented on top of obtainBuffer/releaseBuffer. For best
+ * performance use callbacks. Returns actual number of bytes read >= 0,
+ * or one of the following negative status codes:
+ * INVALID_OPERATION AudioRecord is configured for streaming mode
+ * BAD_VALUE size is invalid
+ * WOULD_BLOCK when obtainBuffer() returns same, or
+ * AudioRecord was stopped during the read
+ * or any other error code returned by IAudioRecord::start() or restoreRecord_l().
+ * Default behavior is to only return when all data has been transferred. Set 'blocking' to
+ * false for the method to return immediately without waiting to try multiple times to read
+ * the full content of the buffer.
+ */
+ ssize_t read(void* buffer, size_t size, bool blocking = true);
+
+ /* Return the number of input frames lost in the audio driver since the last call of this
+ * function. Audio driver is expected to reset the value to 0 and restart counting upon
+ * returning the current value by this function call. Such loss typically occurs when the
+ * user space process is blocked longer than the capacity of audio driver buffers.
+ * Units: the number of input audio frames.
+ * FIXME The side-effect of resetting the counter may be incompatible with multi-client.
+ * Consider making it more like AudioTrack::getUnderrunFrames which doesn't have side effects.
+ */
+ uint32_t getInputFramesLost() const;
+
+ /* Get the flags */
+ audio_input_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; }
+
+private:
+ /* copying audio record objects is not allowed */
+ AudioRecord(const AudioRecord& other);
+ AudioRecord& operator = (const AudioRecord& other);
+
+ /* a small internal class to handle the callback */
+ class AudioRecordThread : public Thread
+ {
+ public:
+ AudioRecordThread(AudioRecord& receiver, bool bCanCallJava = false);
+
+ // Do not call Thread::requestExitAndWait() without first calling requestExit().
+ // Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
+ virtual void requestExit();
+
+ void pause(); // suspend thread from execution at next loop boundary
+ void resume(); // allow thread to execute, if not requested to exit
+ void wake(); // wake to handle changed notification conditions.
+
+ private:
+ void pauseInternal(nsecs_t ns = 0LL);
+ // like pause(), but only used internally within thread
+
+ friend class AudioRecord;
+ virtual bool threadLoop();
+ AudioRecord& mReceiver;
+ virtual ~AudioRecordThread();
+ Mutex mMyLock; // Thread::mLock is private
+ Condition mMyCond; // Thread::mThreadExitedCondition is private
+ bool mPaused; // whether thread is requested to pause at next loop entry
+ bool mPausedInt; // whether thread internally requests pause
+ nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
+ bool mIgnoreNextPausedInt; // skip any internal pause and go immediately
+ // to processAudioBuffer() as state may have changed
+ // since pause time calculated.
+ };
+
+ // body of AudioRecordThread::threadLoop()
+ // returns the maximum amount of time before we would like to run again, where:
+ // 0 immediately
+ // > 0 no later than this many nanoseconds from now
+ // NS_WHENEVER still active but no particular deadline
+ // NS_INACTIVE inactive so don't run again until re-started
+ // NS_NEVER never again
+ static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
+ nsecs_t processAudioBuffer();
+
+ // caller must hold lock on mLock for all _l methods
+
+ status_t openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
+
+ // FIXME enum is faster than strcmp() for parameter 'from'
+ status_t restoreRecord_l(const char *from);
+
+ sp<AudioRecordThread> mAudioRecordThread;
+ mutable Mutex mLock;
+
+ // Current client state: false = stopped, true = active. Protected by mLock. If more states
+ // are added, consider changing this to enum State { ... } mState as in AudioTrack.
+ bool mActive;
+
+ // for client callback handler
+ callback_t mCbf; // callback handler for events, or NULL
+ void* mUserData;
+
+ // for notification APIs
+ uint32_t mNotificationFramesReq; // requested number of frames between each
+ // notification callback
+ // as specified in constructor or set()
+ uint32_t mNotificationFramesAct; // actual number of frames between each
+ // notification callback
+ bool mRefreshRemaining; // processAudioBuffer() should refresh
+ // mRemainingFrames and mRetryOnPartialBuffer
+
+ // These are private to processAudioBuffer(), and are not protected by a lock
+ uint32_t mRemainingFrames; // number of frames to request in obtainBuffer()
+ bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
+ uint32_t mObservedSequence; // last observed value of mSequence
+
+ Modulo<uint32_t> mMarkerPosition; // in wrapping (overflow) frame units
+ bool mMarkerReached;
+ Modulo<uint32_t> mNewPosition; // in frames
+ uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
+
+ status_t mStatus;
+
+ String16 mOpPackageName; // The package name used for app ops.
+
+ size_t mFrameCount; // corresponds to current IAudioRecord, value is
+ // reported back by AudioFlinger to the client
+ size_t mReqFrameCount; // frame count to request the first or next time
+ // a new IAudioRecord is needed, non-decreasing
+
+ int64_t mFramesRead; // total frames read. reset to zero after
+ // the start() following stop(). It is not
+ // changed after restoring the track.
+ int64_t mFramesReadServerOffset; // An offset to server frames read due to
+ // restoring AudioRecord, or stop/start.
+ // constant after constructor or set()
+ uint32_t mSampleRate;
+ audio_format_t mFormat;
+ uint32_t mChannelCount;
+ size_t mFrameSize; // app-level frame size == AudioFlinger frame size
+ uint32_t mLatency; // in ms
+ audio_channel_mask_t mChannelMask;
+
+ audio_input_flags_t mFlags; // same as mOrigFlags, except for bits that may
+ // be denied by client or server, such as
+ // AUDIO_INPUT_FLAG_FAST. mLock must be
+ // held to read or write those bits reliably.
+ audio_input_flags_t mOrigFlags; // as specified in constructor or set(), const
+
+ audio_session_t mSessionId;
+ transfer_type mTransfer;
+
+ // Next 5 fields may be changed if IAudioRecord is re-created, but always != 0
+ // provided the initial set() was successful
+ sp<IAudioRecord> mAudioRecord;
+ sp<IMemory> mCblkMemory;
+ audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
+ sp<IMemory> mBufferMemory;
+ audio_io_handle_t mInput; // returned by AudioSystem::getInput()
+
+ int mPreviousPriority; // before start()
+ SchedPolicy mPreviousSchedulingGroup;
+ bool mAwaitBoost; // thread should wait for priority boost before running
+
+ // The proxy should only be referenced while a lock is held because the proxy isn't
+ // multi-thread safe.
+ // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
+ // provided that the caller also holds an extra reference to the proxy and shared memory to keep
+ // them around in case they are replaced during the obtainBuffer().
+ sp<AudioRecordClientProxy> mProxy;
+
+ bool mInOverrun; // whether recorder is currently in overrun state
+
+private:
+ class DeathNotifier : public IBinder::DeathRecipient {
+ public:
+ DeathNotifier(AudioRecord* audioRecord) : mAudioRecord(audioRecord) { }
+ protected:
+ virtual void binderDied(const wp<IBinder>& who);
+ private:
+ const wp<AudioRecord> mAudioRecord;
+ };
+
+ sp<DeathNotifier> mDeathNotifier;
+ uint32_t mSequence; // incremented for each new IAudioRecord attempt
+ uid_t mClientUid;
+ pid_t mClientPid;
+ audio_attributes_t mAttributes;
+
+ // For Device Selection API
+ // a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
+ audio_port_handle_t mSelectedDeviceId;
+ sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+ audio_port_handle_t mPortId; // unique ID allocated by audio policy
+
+};
+
+}; // namespace android
+
+#endif // ANDROID_AUDIORECORD_H
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
new file mode 100644
index 0000000..853d318
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -0,0 +1,475 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOSYSTEM_H_
+#define ANDROID_AUDIOSYSTEM_H_
+
+#include <sys/types.h>
+
+#include <media/AudioPolicy.h>
+#include <media/AudioIoDescriptor.h>
+#include <media/IAudioFlingerClient.h>
+#include <media/IAudioPolicyServiceClient.h>
+#include <system/audio.h>
+#include <system/audio_effect.h>
+#include <system/audio_policy.h>
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+typedef void (*audio_error_callback)(status_t err);
+typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
+typedef void (*record_config_callback)(int event, audio_session_t session, int source,
+ const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle);
+
+class IAudioFlinger;
+class IAudioPolicyService;
+class String8;
+
+class AudioSystem
+{
+public:
+
+ // FIXME Declare in binder opcode order, similarly to IAudioFlinger.h and IAudioFlinger.cpp
+
+ /* These are static methods to control the system-wide AudioFlinger
+ * only privileged processes can have access to them
+ */
+
+ // mute/unmute microphone
+ static status_t muteMicrophone(bool state);
+ static status_t isMicrophoneMuted(bool *state);
+
+ // set/get master volume
+ static status_t setMasterVolume(float value);
+ static status_t getMasterVolume(float* volume);
+
+ // mute/unmute audio outputs
+ static status_t setMasterMute(bool mute);
+ static status_t getMasterMute(bool* mute);
+
+ // set/get stream volume on specified output
+ static status_t setStreamVolume(audio_stream_type_t stream, float value,
+ audio_io_handle_t output);
+ static status_t getStreamVolume(audio_stream_type_t stream, float* volume,
+ audio_io_handle_t output);
+
+ // mute/unmute stream
+ static status_t setStreamMute(audio_stream_type_t stream, bool mute);
+ static status_t getStreamMute(audio_stream_type_t stream, bool* mute);
+
+ // set audio mode in audio hardware
+ static status_t setMode(audio_mode_t mode);
+
+ // returns true in *state if tracks are active on the specified stream or have been active
+ // in the past inPastMs milliseconds
+ static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs);
+ // returns true in *state if tracks are active for what qualifies as remote playback
+ // on the specified stream or have been active in the past inPastMs milliseconds. Remote
+ // playback isn't mutually exclusive with local playback.
+ static status_t isStreamActiveRemotely(audio_stream_type_t stream, bool *state,
+ uint32_t inPastMs);
+ // returns true in *state if a recorder is currently recording with the specified source
+ static status_t isSourceActive(audio_source_t source, bool *state);
+
+ // set/get audio hardware parameters. The function accepts a list of parameters
+ // key value pairs in the form: key1=value1;key2=value2;...
+ // Some keys are reserved for standard parameters (See AudioParameter class).
+ // The versions with audio_io_handle_t are intended for internal media framework use only.
+ static status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
+ static String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
+ // The versions without audio_io_handle_t are intended for JNI.
+ static status_t setParameters(const String8& keyValuePairs);
+ static String8 getParameters(const String8& keys);
+
+ static void setErrorCallback(audio_error_callback cb);
+ static void setDynPolicyCallback(dynamic_policy_callback cb);
+ static void setRecordConfigCallback(record_config_callback);
+
+ // helper function to obtain AudioFlinger service handle
+ static const sp<IAudioFlinger> get_audio_flinger();
+
+ static float linearToLog(int volume);
+ static int logToLinear(float volume);
+
+ // Returned samplingRate and frameCount output values are guaranteed
+ // to be non-zero if status == NO_ERROR
+ // FIXME This API assumes a route, and so should be deprecated.
+ static status_t getOutputSamplingRate(uint32_t* samplingRate,
+ audio_stream_type_t stream);
+ // FIXME This API assumes a route, and so should be deprecated.
+ static status_t getOutputFrameCount(size_t* frameCount,
+ audio_stream_type_t stream);
+ // FIXME This API assumes a route, and so should be deprecated.
+ static status_t getOutputLatency(uint32_t* latency,
+ audio_stream_type_t stream);
+ // returns the audio HAL sample rate
+ static status_t getSamplingRate(audio_io_handle_t ioHandle,
+ uint32_t* samplingRate);
+ // For output threads with a fast mixer, returns the number of frames per normal mixer buffer.
+ // For output threads without a fast mixer, or for input, this is same as getFrameCountHAL().
+ static status_t getFrameCount(audio_io_handle_t ioHandle,
+ size_t* frameCount);
+ // returns the audio output latency in ms. Corresponds to
+ // audio_stream_out->get_latency()
+ static status_t getLatency(audio_io_handle_t output,
+ uint32_t* latency);
+
+ // return status NO_ERROR implies *buffSize > 0
+ // FIXME This API assumes a route, and so should deprecated.
+ static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+ audio_channel_mask_t channelMask, size_t* buffSize);
+
+ static status_t setVoiceVolume(float volume);
+
+ // return the number of audio frames written by AudioFlinger to audio HAL and
+ // audio dsp to DAC since the specified output has exited standby.
+ // returned status (from utils/Errors.h) can be:
+ // - NO_ERROR: successful operation, halFrames and dspFrames point to valid data
+ // - INVALID_OPERATION: Not supported on current hardware platform
+ // - BAD_VALUE: invalid parameter
+ // NOTE: this feature is not supported on all hardware platforms and it is
+ // necessary to check returned status before using the returned values.
+ static status_t getRenderPosition(audio_io_handle_t output,
+ uint32_t *halFrames,
+ uint32_t *dspFrames);
+
+ // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
+ static uint32_t getInputFramesLost(audio_io_handle_t ioHandle);
+
+ // Allocate a new unique ID for use as an audio session ID or I/O handle.
+ // If unable to contact AudioFlinger, returns AUDIO_UNIQUE_ID_ALLOCATE instead.
+ // FIXME If AudioFlinger were to ever exhaust the unique ID namespace,
+ // this method could fail by returning either a reserved ID like AUDIO_UNIQUE_ID_ALLOCATE
+ // or an unspecified existing unique ID.
+ static audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
+
+ static void acquireAudioSessionId(audio_session_t audioSession, pid_t pid);
+ static void releaseAudioSessionId(audio_session_t audioSession, pid_t pid);
+
+ // Get the HW synchronization source used for an audio session.
+ // Return a valid source or AUDIO_HW_SYNC_INVALID if an error occurs
+ // or no HW sync source is used.
+ static audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
+
+ // Indicate JAVA services are ready (scheduling, power management ...)
+ static status_t systemReady();
+
+ // Returns the number of frames per audio HAL buffer.
+ // Corresponds to audio_stream->get_buffer_size()/audio_stream_in_frame_size() for input.
+ // See also getFrameCount().
+ static status_t getFrameCountHAL(audio_io_handle_t ioHandle,
+ size_t* frameCount);
+
+ // Events used to synchronize actions between audio sessions.
+ // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until
+ // playback is complete on another audio session.
+ // See definitions in MediaSyncEvent.java
+ enum sync_event_t {
+ SYNC_EVENT_SAME = -1, // used internally to indicate restart with same event
+ SYNC_EVENT_NONE = 0,
+ SYNC_EVENT_PRESENTATION_COMPLETE,
+
+ //
+ // Define new events here: SYNC_EVENT_START, SYNC_EVENT_STOP, SYNC_EVENT_TIME ...
+ //
+ SYNC_EVENT_CNT,
+ };
+
+ // Timeout for synchronous record start. Prevents from blocking the record thread forever
+ // if the trigger event is not fired.
+ static const uint32_t kSyncRecordStartTimeOutMs = 30000;
+
+ //
+ // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
+ //
+ static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state,
+ const char *device_address, const char *device_name);
+ static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
+ const char *device_address);
+ static status_t handleDeviceConfigChange(audio_devices_t device,
+ const char *device_address,
+ const char *device_name);
+ static status_t setPhoneState(audio_mode_t state);
+ static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
+ static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
+
+ // Client must successfully hand off the handle reference to AudioFlinger via createTrack(),
+ // or release it with releaseOutput().
+ static audio_io_handle_t getOutput(audio_stream_type_t stream,
+ uint32_t samplingRate = 0,
+ audio_format_t format = AUDIO_FORMAT_DEFAULT,
+ audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ const audio_offload_info_t *offloadInfo = NULL);
+ static status_t getOutputForAttr(const audio_attributes_t *attr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ audio_stream_type_t *stream,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t flags,
+ audio_port_handle_t selectedDeviceId,
+ audio_port_handle_t *portId);
+ static status_t startOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session);
+ static status_t stopOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session);
+ static void releaseOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session);
+
+ // Client must successfully hand off the handle reference to AudioFlinger via openRecord(),
+ // or release it with releaseInput().
+ static status_t getInputForAttr(const audio_attributes_t *attr,
+ audio_io_handle_t *input,
+ audio_session_t session,
+ pid_t pid,
+ uid_t uid,
+ const audio_config_base_t *config,
+ audio_input_flags_t flags,
+ audio_port_handle_t selectedDeviceId,
+ audio_port_handle_t *portId);
+
+ static status_t startInput(audio_io_handle_t input,
+ audio_session_t session);
+ static status_t stopInput(audio_io_handle_t input,
+ audio_session_t session);
+ static void releaseInput(audio_io_handle_t input,
+ audio_session_t session);
+ static status_t initStreamVolume(audio_stream_type_t stream,
+ int indexMin,
+ int indexMax);
+ static status_t setStreamVolumeIndex(audio_stream_type_t stream,
+ int index,
+ audio_devices_t device);
+ static status_t getStreamVolumeIndex(audio_stream_type_t stream,
+ int *index,
+ audio_devices_t device);
+
+ static uint32_t getStrategyForStream(audio_stream_type_t stream);
+ static audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+
+ static audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc);
+ static status_t registerEffect(const effect_descriptor_t *desc,
+ audio_io_handle_t io,
+ uint32_t strategy,
+ audio_session_t session,
+ int id);
+ static status_t unregisterEffect(int id);
+ static status_t setEffectEnabled(int id, bool enabled);
+
+ // clear stream to output mapping cache (gStreamOutputMap)
+ // and output configuration cache (gOutputs)
+ static void clearAudioConfigCache();
+
+ static const sp<IAudioPolicyService> get_audio_policy_service();
+
+ // helpers for android.media.AudioManager.getProperty(), see description there for meaning
+ static uint32_t getPrimaryOutputSamplingRate();
+ static size_t getPrimaryOutputFrameCount();
+
+ static status_t setLowRamDevice(bool isLowRamDevice);
+
+ // Check if hw offload is possible for given format, stream type, sample rate,
+ // bit rate, duration, video and streaming or offload property is enabled
+ static bool isOffloadSupported(const audio_offload_info_t& info);
+
+ // check presence of audio flinger service.
+ // returns NO_ERROR if binding to service succeeds, DEAD_OBJECT otherwise
+ static status_t checkAudioFlinger();
+
+ /* List available audio ports and their attributes */
+ static status_t listAudioPorts(audio_port_role_t role,
+ audio_port_type_t type,
+ unsigned int *num_ports,
+ struct audio_port *ports,
+ unsigned int *generation);
+
+ /* Get attributes for a given audio port */
+ static status_t getAudioPort(struct audio_port *port);
+
+ /* Create an audio patch between several source and sink ports */
+ static status_t createAudioPatch(const struct audio_patch *patch,
+ audio_patch_handle_t *handle);
+
+ /* Release an audio patch */
+ static status_t releaseAudioPatch(audio_patch_handle_t handle);
+
+ /* List existing audio patches */
+ static status_t listAudioPatches(unsigned int *num_patches,
+ struct audio_patch *patches,
+ unsigned int *generation);
+ /* Set audio port configuration */
+ static status_t setAudioPortConfig(const struct audio_port_config *config);
+
+
+ static status_t acquireSoundTriggerSession(audio_session_t *session,
+ audio_io_handle_t *ioHandle,
+ audio_devices_t *device);
+ static status_t releaseSoundTriggerSession(audio_session_t session);
+
+ static audio_mode_t getPhoneState();
+
+ static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
+
+ static status_t startAudioSource(const struct audio_port_config *source,
+ const audio_attributes_t *attributes,
+ audio_patch_handle_t *handle);
+ static status_t stopAudioSource(audio_patch_handle_t handle);
+
+ static status_t setMasterMono(bool mono);
+ static status_t getMasterMono(bool *mono);
+
+ // ----------------------------------------------------------------------------
+
+ class AudioPortCallback : public RefBase
+ {
+ public:
+
+ AudioPortCallback() {}
+ virtual ~AudioPortCallback() {}
+
+ virtual void onAudioPortListUpdate() = 0;
+ virtual void onAudioPatchListUpdate() = 0;
+ virtual void onServiceDied() = 0;
+
+ };
+
+ static status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
+ static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
+
+ class AudioDeviceCallback : public RefBase
+ {
+ public:
+
+ AudioDeviceCallback() {}
+ virtual ~AudioDeviceCallback() {}
+
+ virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
+ audio_port_handle_t deviceId) = 0;
+ };
+
+ static status_t addAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+ audio_io_handle_t audioIo);
+ static status_t removeAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+ audio_io_handle_t audioIo);
+
+ static audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
+
+private:
+
+ class AudioFlingerClient: public IBinder::DeathRecipient, public BnAudioFlingerClient
+ {
+ public:
+ AudioFlingerClient() :
+ mInBuffSize(0), mInSamplingRate(0),
+ mInFormat(AUDIO_FORMAT_DEFAULT), mInChannelMask(AUDIO_CHANNEL_NONE) {
+ }
+
+ void clearIoCache();
+ status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+ audio_channel_mask_t channelMask, size_t* buffSize);
+ sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
+
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioFlingerClient
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters up-to-date in client process
+ virtual void ioConfigChanged(audio_io_config_event event,
+ const sp<AudioIoDescriptor>& ioDesc);
+
+
+ status_t addAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+ audio_io_handle_t audioIo);
+ status_t removeAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+ audio_io_handle_t audioIo);
+
+ audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
+
+ private:
+ Mutex mLock;
+ DefaultKeyedVector<audio_io_handle_t, sp<AudioIoDescriptor> > mIoDescriptors;
+ DefaultKeyedVector<audio_io_handle_t, Vector < sp<AudioDeviceCallback> > >
+ mAudioDeviceCallbacks;
+ // cached values for recording getInputBufferSize() queries
+ size_t mInBuffSize; // zero indicates cache is invalid
+ uint32_t mInSamplingRate;
+ audio_format_t mInFormat;
+ audio_channel_mask_t mInChannelMask;
+ sp<AudioIoDescriptor> getIoDescriptor_l(audio_io_handle_t ioHandle);
+ };
+
+ class AudioPolicyServiceClient: public IBinder::DeathRecipient,
+ public BnAudioPolicyServiceClient
+ {
+ public:
+ AudioPolicyServiceClient() {
+ }
+
+ int addAudioPortCallback(const sp<AudioPortCallback>& callback);
+ int removeAudioPortCallback(const sp<AudioPortCallback>& callback);
+
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioPolicyServiceClient
+ virtual void onAudioPortListUpdate();
+ virtual void onAudioPatchListUpdate();
+ virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+ virtual void onRecordingConfigurationUpdate(int event, audio_session_t session,
+ audio_source_t source, const audio_config_base_t *clientConfig,
+ const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
+
+ private:
+ Mutex mLock;
+ Vector <sp <AudioPortCallback> > mAudioPortCallbacks;
+ };
+
+ static const sp<AudioFlingerClient> getAudioFlingerClient();
+ static sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
+
+ static sp<AudioFlingerClient> gAudioFlingerClient;
+ static sp<AudioPolicyServiceClient> gAudioPolicyServiceClient;
+ friend class AudioFlingerClient;
+ friend class AudioPolicyServiceClient;
+
+ static Mutex gLock; // protects gAudioFlinger and gAudioErrorCallback,
+ static Mutex gLockAPS; // protects gAudioPolicyService and gAudioPolicyServiceClient
+ static sp<IAudioFlinger> gAudioFlinger;
+ static audio_error_callback gAudioErrorCallback;
+ static dynamic_policy_callback gDynPolicyCallback;
+ static record_config_callback gRecordConfigCallback;
+
+ static size_t gInBuffSize;
+ // previous parameters for recording buffer size queries
+ static uint32_t gPrevInSamplingRate;
+ static audio_format_t gPrevInFormat;
+ static audio_channel_mask_t gPrevInChannelMask;
+
+ static sp<IAudioPolicyService> gAudioPolicyService;
+};
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIOSYSTEM_H_*/
diff --git a/include/media/AudioTimestamp.h b/media/libaudioclient/include/media/AudioTimestamp.h
similarity index 100%
rename from include/media/AudioTimestamp.h
rename to media/libaudioclient/include/media/AudioTimestamp.h
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
new file mode 100644
index 0000000..a4c8d53
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -0,0 +1,1161 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIOTRACK_H
+#define ANDROID_AUDIOTRACK_H
+
+#include <cutils/sched_policy.h>
+#include <media/AudioSystem.h>
+#include <media/AudioTimestamp.h>
+#include <media/IAudioTrack.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/Modulo.h>
+#include <utils/threads.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+struct audio_track_cblk_t;
+class AudioTrackClientProxy;
+class StaticAudioTrackClientProxy;
+
+// ----------------------------------------------------------------------------
+
+class AudioTrack : public RefBase
+{
+public:
+
+ /* Events used by AudioTrack callback function (callback_t).
+ * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
+ */
+ enum event_type {
+ EVENT_MORE_DATA = 0, // Request to write more data to buffer.
+ // This event only occurs for TRANSFER_CALLBACK.
+ // If this event is delivered but the callback handler
+ // does not want to write more data, the handler must
+ // ignore the event by setting frameCount to zero.
+ // This might occur, for example, if the application is
+ // waiting for source data or is at the end of stream.
+ //
+ // For data filling, it is preferred that the callback
+ // does not block and instead returns a short count on
+ // the amount of data actually delivered
+ // (or 0, if no data is currently available).
+ EVENT_UNDERRUN = 1, // Buffer underrun occurred. This will not occur for
+ // static tracks.
+ EVENT_LOOP_END = 2, // Sample loop end was reached; playback restarted from
+ // loop start if loop count was not 0 for a static track.
+ EVENT_MARKER = 3, // Playback head is at the specified marker position
+ // (See setMarkerPosition()).
+ EVENT_NEW_POS = 4, // Playback head is at a new position
+ // (See setPositionUpdatePeriod()).
+ EVENT_BUFFER_END = 5, // Playback has completed for a static track.
+ EVENT_NEW_IAUDIOTRACK = 6, // IAudioTrack was re-created, either due to re-routing and
+ // voluntary invalidation by mediaserver, or mediaserver crash.
+ EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played
+ // back (after stop is called) for an offloaded track.
+#if 0 // FIXME not yet implemented
+ EVENT_NEW_TIMESTAMP = 8, // Delivered periodically and when there's a significant change
+ // in the mapping from frame position to presentation time.
+ // See AudioTimestamp for the information included with event.
+#endif
+ };
+
+ /* Client should declare a Buffer and pass the address to obtainBuffer()
+ * and releaseBuffer(). See also callback_t for EVENT_MORE_DATA.
+ */
+
+ class Buffer
+ {
+ public:
+ // FIXME use m prefix
+ size_t frameCount; // number of sample frames corresponding to size;
+ // on input to obtainBuffer() it is the number of frames desired,
+ // on output from obtainBuffer() it is the number of available
+ // [empty slots for] frames to be filled
+ // on input to releaseBuffer() it is currently ignored
+
+ size_t size; // input/output in bytes == frameCount * frameSize
+ // on input to obtainBuffer() it is ignored
+ // on output from obtainBuffer() it is the number of available
+ // [empty slots for] bytes to be filled,
+ // which is frameCount * frameSize
+ // on input to releaseBuffer() it is the number of bytes to
+ // release
+ // FIXME This is redundant with respect to frameCount. Consider
+ // removing size and making frameCount the primary field.
+
+ union {
+ void* raw;
+ short* i16; // signed 16-bit
+ int8_t* i8; // unsigned 8-bit, offset by 0x80
+ }; // input to obtainBuffer(): unused, output: pointer to buffer
+ };
+
+ /* As a convenience, if a callback is supplied, a handler thread
+ * is automatically created with the appropriate priority. This thread
+ * invokes the callback when a new buffer becomes available or various conditions occur.
+ * Parameters:
+ *
+ * event: type of event notified (see enum AudioTrack::event_type).
+ * user: Pointer to context for use by the callback receiver.
+ * info: Pointer to optional parameter according to event type:
+ * - EVENT_MORE_DATA: pointer to AudioTrack::Buffer struct. The callback must not write
+ * more bytes than indicated by 'size' field and update 'size' if fewer bytes are
+ * written.
+ * - EVENT_UNDERRUN: unused.
+ * - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining.
+ * - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
+ * - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
+ * - EVENT_BUFFER_END: unused.
+ * - EVENT_NEW_IAUDIOTRACK: unused.
+ * - EVENT_STREAM_END: unused.
+ * - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
+ */
+
+ typedef void (*callback_t)(int event, void* user, void *info);
+
+ /* Returns the minimum frame count required for the successful creation of
+ * an AudioTrack object.
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - NO_INIT: audio server or audio hardware not initialized
+ * - BAD_VALUE: unsupported configuration
+ * frameCount is guaranteed to be non-zero if status is NO_ERROR,
+ * and is undefined otherwise.
+ * FIXME This API assumes a route, and so should be deprecated.
+ */
+
+ static status_t getMinFrameCount(size_t* frameCount,
+ audio_stream_type_t streamType,
+ uint32_t sampleRate);
+
+ /* How data is transferred to AudioTrack
+ */
+ enum transfer_type {
+ TRANSFER_DEFAULT, // not specified explicitly; determine from the other parameters
+ TRANSFER_CALLBACK, // callback EVENT_MORE_DATA
+ TRANSFER_OBTAIN, // call obtainBuffer() and releaseBuffer()
+ TRANSFER_SYNC, // synchronous write()
+ TRANSFER_SHARED, // shared memory
+ };
+
+ /* Constructs an uninitialized AudioTrack. No connection with
+ * AudioFlinger takes place. Use set() after this.
+ */
+ AudioTrack();
+
+ /* Creates an AudioTrack object and registers it with AudioFlinger.
+ * Once created, the track needs to be started before it can be used.
+ * Unspecified values are set to appropriate default values.
+ *
+ * Parameters:
+ *
+ * streamType: Select the type of audio stream this track is attached to
+ * (e.g. AUDIO_STREAM_MUSIC).
+ * sampleRate: Data source sampling rate in Hz. Zero means to use the sink sample rate.
+ * A non-zero value must be specified if AUDIO_OUTPUT_FLAG_DIRECT is set.
+ * 0 will not work with current policy implementation for direct output
+ * selection where an exact match is needed for sampling rate.
+ * format: Audio format. For mixed tracks, any PCM format supported by server is OK.
+ * For direct and offloaded tracks, the possible format(s) depends on the
+ * output sink.
+ * channelMask: Channel mask, such that audio_is_output_channel(channelMask) is true.
+ * frameCount: Minimum size of track PCM buffer in frames. This defines the
+ * application's contribution to the
+ * latency of the track. The actual size selected by the AudioTrack could be
+ * larger if the requested size is not compatible with current audio HAL
+ * configuration. Zero means to use a default value.
+ * flags: See comments on audio_output_flags_t in <system/audio.h>.
+ * cbf: Callback function. If not null, this function is called periodically
+ * to provide new data in TRANSFER_CALLBACK mode
+ * and inform of marker, position updates, etc.
+ * user: Context for use by the callback receiver.
+ * notificationFrames: The callback function is called each time notificationFrames PCM
+ * frames have been consumed from track input buffer by server.
+ * Zero means to use a default value, which is typically:
+ * - fast tracks: HAL buffer size, even if track frameCount is larger
+ * - normal tracks: 1/2 of track frameCount
+ * A positive value means that many frames at initial source sample rate.
+ * A negative value for this parameter specifies the negative of the
+ * requested number of notifications (sub-buffers) in the entire buffer.
+ * For fast tracks, the FastMixer will process one sub-buffer at a time.
+ * The size of each sub-buffer is determined by the HAL.
+ * To get "double buffering", for example, one should pass -2.
+ * The minimum number of sub-buffers is 1 (expressed as -1),
+ * and the maximum number of sub-buffers is 8 (expressed as -8).
+ * Negative is only permitted for fast tracks, and if frameCount is zero.
+ * TODO It is ugly to overload a parameter in this way depending on
+ * whether it is positive, negative, or zero. Consider splitting apart.
+ * sessionId: Specific session ID, or zero to use default.
+ * transferType: How data is transferred to AudioTrack.
+ * offloadInfo: If not NULL, provides offload parameters for
+ * AudioSystem::getOutputForAttr().
+ * uid: User ID of the app which initially requested this AudioTrack
+ * for power management tracking, or -1 for current user ID.
+ * pid: Process ID of the app which initially requested this AudioTrack
+ * for power management tracking, or -1 for current process ID.
+ * pAttributes: If not NULL, supersedes streamType for use case selection.
+ * doNotReconnect: If set to true, AudioTrack won't automatically recreate the IAudioTrack
+ binder to AudioFlinger.
+ It will return an error instead. The application will recreate
+ the track based on offloading or different channel configuration, etc.
+ * maxRequiredSpeed: For PCM tracks, this creates an appropriate buffer size that will allow
+ * maxRequiredSpeed playback. Values less than 1.0f and greater than
+ * AUDIO_TIMESTRETCH_SPEED_MAX will be clamped. For non-PCM tracks
+ * and direct or offloaded tracks, this parameter is ignored.
+ * threadCanCallJava: Not present in parameter list, and so is fixed at false.
+ */
+
+ AudioTrack( audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount = 0,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ callback_t cbf = NULL,
+ void* user = NULL,
+ int32_t notificationFrames = 0,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = NULL,
+ uid_t uid = AUDIO_UID_INVALID,
+ pid_t pid = -1,
+ const audio_attributes_t* pAttributes = NULL,
+ bool doNotReconnect = false,
+ float maxRequiredSpeed = 1.0f);
+
+ /* Creates an audio track and registers it with AudioFlinger.
+ * With this constructor, the track is configured for static buffer mode.
+ * Data to be rendered is passed in a shared memory buffer
+ * identified by the argument sharedBuffer, which should be non-0.
+ * If sharedBuffer is zero, this constructor is equivalent to the previous constructor
+ * but without the ability to specify a non-zero value for the frameCount parameter.
+ * The memory should be initialized to the desired data before calling start().
+ * The write() method is not supported in this case.
+ * It is recommended to pass a callback function to be notified of playback end by an
+ * EVENT_UNDERRUN event.
+ */
+
+ AudioTrack( audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const sp<IMemory>& sharedBuffer,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ callback_t cbf = NULL,
+ void* user = NULL,
+ int32_t notificationFrames = 0,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = NULL,
+ uid_t uid = AUDIO_UID_INVALID,
+ pid_t pid = -1,
+ const audio_attributes_t* pAttributes = NULL,
+ bool doNotReconnect = false,
+ float maxRequiredSpeed = 1.0f);
+
+ /* Terminates the AudioTrack and unregisters it from AudioFlinger.
+ * Also destroys all resources associated with the AudioTrack.
+ */
+protected:
+ virtual ~AudioTrack();
+public:
+
+ /* Initialize an AudioTrack that was created using the AudioTrack() constructor.
+ * Don't call set() more than once, or after the AudioTrack() constructors that take parameters.
+ * set() is not multi-thread safe.
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful initialization
+ * - INVALID_OPERATION: AudioTrack is already initialized
+ * - BAD_VALUE: invalid parameter (channelMask, format, sampleRate...)
+ * - NO_INIT: audio server or audio hardware not initialized
+ * If status is not equal to NO_ERROR, don't call any other APIs on this AudioTrack.
+ * If sharedBuffer is non-0, the frameCount parameter is ignored and
+ * replaced by the shared buffer's total allocated size in frame units.
+ *
+ * Parameters not listed in the AudioTrack constructors above:
+ *
+ * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
+ *
+ * Internal state post condition:
+ * (mStreamType == AUDIO_STREAM_DEFAULT) implies this AudioTrack has valid attributes
+ */
+ status_t set(audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t frameCount = 0,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ callback_t cbf = NULL,
+ void* user = NULL,
+ int32_t notificationFrames = 0,
+ const sp<IMemory>& sharedBuffer = 0,
+ bool threadCanCallJava = false,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = NULL,
+ uid_t uid = AUDIO_UID_INVALID,
+ pid_t pid = -1,
+ const audio_attributes_t* pAttributes = NULL,
+ bool doNotReconnect = false,
+ float maxRequiredSpeed = 1.0f);
+
+ /* Result of constructing the AudioTrack. This must be checked for successful initialization
+ * before using any AudioTrack API (except for set()), because using
+ * an uninitialized AudioTrack produces undefined results.
+ * See set() method above for possible return codes.
+ */
+ status_t initCheck() const { return mStatus; }
+
+ /* Returns this track's estimated latency in milliseconds.
+ * This includes the latency due to AudioTrack buffer size, AudioMixer (if any)
+ * and audio hardware driver.
+ */
+ uint32_t latency() const { return mLatency; }
+
+ /* Returns the number of application-level buffer underruns
+ * since the AudioTrack was created.
+ */
+ uint32_t getUnderrunCount() const;
+
+ /* getters, see constructors and set() */
+
+ audio_stream_type_t streamType() const;
+ audio_format_t format() const { return mFormat; }
+
+ /* Return frame size in bytes, which for linear PCM is
+ * channelCount * (bit depth per channel / 8).
+ * channelCount is determined from channelMask, and bit depth comes from format.
+ * For non-linear formats, the frame size is typically 1 byte.
+ */
+ size_t frameSize() const { return mFrameSize; }
+
+ uint32_t channelCount() const { return mChannelCount; }
+ size_t frameCount() const { return mFrameCount; }
+
+ /*
+ * Return the period of the notification callback in frames.
+ * This value is set when the AudioTrack is constructed.
+ * It can be modified if the AudioTrack is rerouted.
+ */
+ uint32_t getNotificationPeriodInFrames() const { return mNotificationFramesAct; }
+
+ /* Return effective size of audio buffer that an application writes to
+ * or a negative error if the track is uninitialized.
+ */
+ ssize_t getBufferSizeInFrames();
+
+ /* Returns the buffer duration in microseconds at current playback rate.
+ */
+ status_t getBufferDurationInUs(int64_t *duration);
+
+ /* Set the effective size of audio buffer that an application writes to.
+ * This is used to determine the amount of available room in the buffer,
+ * which determines when a write will block.
+ * This allows an application to raise and lower the audio latency.
+ * The requested size may be adjusted so that it is
+ * greater or equal to the absolute minimum and
+ * less than or equal to the getBufferCapacityInFrames().
+ * It may also be adjusted slightly for internal reasons.
+ *
+ * Return the final size or a negative error if the track is unitialized
+ * or does not support variable sizes.
+ */
+ ssize_t setBufferSizeInFrames(size_t size);
+
+ /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
+ sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
+
+ /* After it's created the track is not active. Call start() to
+ * make it active. If set, the callback will start being called.
+ * If the track was previously paused, volume is ramped up over the first mix buffer.
+ */
+ status_t start();
+
+ /* Stop a track.
+ * In static buffer mode, the track is stopped immediately.
+ * In streaming mode, the callback will cease being called. Note that obtainBuffer() still
+ * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
+ * In streaming mode the stop does not occur immediately: any data remaining in the buffer
+ * is first drained, mixed, and output, and only then is the track marked as stopped.
+ */
+ void stop();
+ bool stopped() const;
+
+ /* Flush a stopped or paused track. All previously buffered data is discarded immediately.
+ * This has the effect of draining the buffers without mixing or output.
+ * Flush is intended for streaming mode, for example before switching to non-contiguous content.
+ * This function is a no-op if the track is not stopped or paused, or uses a static buffer.
+ */
+ void flush();
+
+ /* Pause a track. After pause, the callback will cease being called and
+ * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
+ * and will fill up buffers until the pool is exhausted.
+ * Volume is ramped down over the next mix buffer following the pause request,
+ * and then the track is marked as paused. It can be resumed with ramp up by start().
+ */
+ void pause();
+
+ /* Set volume for this track, mostly used for games' sound effects
+ * left and right volumes. Levels must be >= 0.0 and <= 1.0.
+ * This is the older API. New applications should use setVolume(float) when possible.
+ */
+ status_t setVolume(float left, float right);
+
+ /* Set volume for all channels. This is the preferred API for new applications,
+ * especially for multi-channel content.
+ */
+ status_t setVolume(float volume);
+
+ /* Set the send level for this track. An auxiliary effect should be attached
+ * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0.
+ */
+ status_t setAuxEffectSendLevel(float level);
+ void getAuxEffectSendLevel(float* level) const;
+
+ /* Set source sample rate for this track in Hz, mostly used for games' sound effects.
+ * Zero is not permitted.
+ */
+ status_t setSampleRate(uint32_t sampleRate);
+
+ /* Return current source sample rate in Hz.
+ * If specified as zero in constructor or set(), this will be the sink sample rate.
+ */
+ uint32_t getSampleRate() const;
+
+ /* Return the original source sample rate in Hz. This corresponds to the sample rate
+ * if playback rate had normal speed and pitch.
+ */
+ uint32_t getOriginalSampleRate() const;
+
+ /* Set source playback rate for timestretch
+ * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
+ * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
+ *
+ * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
+ * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
+ *
+ * Speed increases the playback rate of media, but does not alter pitch.
+ * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
+ */
+ status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
+
+ /* Return current playback rate */
+ const AudioPlaybackRate& getPlaybackRate() const;
+
+ /* Enables looping and sets the start and end points of looping.
+ * Only supported for static buffer mode.
+ *
+ * Parameters:
+ *
+ * loopStart: loop start in frames relative to start of buffer.
+ * loopEnd: loop end in frames relative to start of buffer.
+ * loopCount: number of loops to execute. Calling setLoop() with loopCount == 0 cancels any
+ * pending or active loop. loopCount == -1 means infinite looping.
+ *
+ * For proper operation the following condition must be respected:
+ * loopCount != 0 implies 0 <= loopStart < loopEnd <= frameCount().
+ *
+ * If the loop period (loopEnd - loopStart) is too small for the implementation to support,
+ * setLoop() will return BAD_VALUE. loopCount must be >= -1.
+ *
+ */
+ status_t setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount);
+
+ /* Sets marker position. When playback reaches the number of frames specified, a callback with
+ * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
+ * notification callback. To set a marker at a position which would compute as 0,
+ * a workaround is to set the marker at a nearby position such as ~0 or 1.
+ * If the AudioTrack has been opened with no callback function associated, the operation will
+ * fail.
+ *
+ * Parameters:
+ *
+ * marker: marker position expressed in wrapping (overflow) frame units,
+ * like the return value of getPosition().
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: the AudioTrack has no callback installed.
+ */
+ status_t setMarkerPosition(uint32_t marker);
+ status_t getMarkerPosition(uint32_t *marker) const;
+
+ /* Sets position update period. Every time the number of frames specified has been played,
+ * a callback with event type EVENT_NEW_POS is called.
+ * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
+ * callback.
+ * If the AudioTrack has been opened with no callback function associated, the operation will
+ * fail.
+ * Extremely small values may be rounded up to a value the implementation can support.
+ *
+ * Parameters:
+ *
+ * updatePeriod: position update notification period expressed in frames.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: the AudioTrack has no callback installed.
+ */
+ status_t setPositionUpdatePeriod(uint32_t updatePeriod);
+ status_t getPositionUpdatePeriod(uint32_t *updatePeriod) const;
+
+ /* Sets playback head position.
+ * Only supported for static buffer mode.
+ *
+ * Parameters:
+ *
+ * position: New playback head position in frames relative to start of buffer.
+ * 0 <= position <= frameCount(). Note that end of buffer is permitted,
+ * but will result in an immediate underrun if started.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode.
+ * - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack
+ * buffer
+ */
+ status_t setPosition(uint32_t position);
+
+ /* Return the total number of frames played since playback start.
+ * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
+ * It is reset to zero by flush(), reload(), and stop().
+ *
+ * Parameters:
+ *
+ * position: Address where to return play head position.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - BAD_VALUE: position is NULL
+ */
+ status_t getPosition(uint32_t *position);
+
+ /* For static buffer mode only, this returns the current playback position in frames
+ * relative to start of buffer. It is analogous to the position units used by
+ * setLoop() and setPosition(). After underrun, the position will be at end of buffer.
+ */
+ status_t getBufferPosition(uint32_t *position);
+
+ /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids
+ * rewriting the buffer before restarting playback after a stop.
+ * This method must be called with the AudioTrack in paused or stopped state.
+ * Not allowed in streaming mode.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: the AudioTrack is not stopped or paused, or is streaming mode.
+ */
+ status_t reload();
+
+ /* Returns a handle on the audio output used by this AudioTrack.
+ *
+ * Parameters:
+ * none.
+ *
+ * Returned value:
+ * handle on audio hardware output, or AUDIO_IO_HANDLE_NONE if the
+ * track needed to be re-created but that failed
+ */
+private:
+ audio_io_handle_t getOutput() const;
+public:
+
+ /* Selects the audio device to use for output of this AudioTrack. A value of
+ * AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
+ *
+ * Parameters:
+ * The device ID of the selected device (as returned by the AudioDevicesManager API).
+ *
+ * Returned value:
+ * - NO_ERROR: successful operation
+ * TODO: what else can happen here?
+ */
+ status_t setOutputDevice(audio_port_handle_t deviceId);
+
+ /* Returns the ID of the audio device selected for this AudioTrack.
+ * A value of AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
+ *
+ * Parameters:
+ * none.
+ */
+ audio_port_handle_t getOutputDevice();
+
+ /* Returns the ID of the audio device actually used by the output to which this AudioTrack is
+ * attached.
+ * A value of AUDIO_PORT_HANDLE_NONE indicates the audio track is not attached to any output.
+ *
+ * Parameters:
+ * none.
+ */
+ audio_port_handle_t getRoutedDeviceId();
+
+ /* Returns the unique session ID associated with this track.
+ *
+ * Parameters:
+ * none.
+ *
+ * Returned value:
+ * AudioTrack session ID.
+ */
+ audio_session_t getSessionId() const { return mSessionId; }
+
+ /* Attach track auxiliary output to specified effect. Use effectId = 0
+ * to detach track from effect.
+ *
+ * Parameters:
+ *
+ * effectId: effectId obtained from AudioEffect::id().
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation
+ * - INVALID_OPERATION: the effect is not an auxiliary effect.
+ * - BAD_VALUE: The specified effect ID is invalid
+ */
+ status_t attachAuxEffect(int effectId);
+
+ /* Public API for TRANSFER_OBTAIN mode.
+ * Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames.
+ * After filling these slots with data, the caller should release them with releaseBuffer().
+ * If the track buffer is not full, obtainBuffer() returns as many contiguous
+ * [empty slots for] frames as are available immediately.
+ *
+ * If nonContig is non-NULL, it is an output parameter that will be set to the number of
+ * additional non-contiguous frames that are predicted to be available immediately,
+ * if the client were to release the first frames and then call obtainBuffer() again.
+ * This value is only a prediction, and needs to be confirmed.
+ * It will be set to zero for an error return.
+ *
+ * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK
+ * regardless of the value of waitCount.
+ * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a
+ * maximum timeout based on waitCount; see chart below.
+ * Buffers will be returned until the pool
+ * is exhausted, at which point obtainBuffer() will either block
+ * or return WOULD_BLOCK depending on the value of the "waitCount"
+ * parameter.
+ *
+ * Interpretation of waitCount:
+ * +n limits wait time to n * WAIT_PERIOD_MS,
+ * -1 causes an (almost) infinite wait time,
+ * 0 non-blocking.
+ *
+ * Buffer fields
+ * On entry:
+ * frameCount number of [empty slots for] frames requested
+ * size ignored
+ * raw ignored
+ * After error return:
+ * frameCount 0
+ * size 0
+ * raw undefined
+ * After successful return:
+ * frameCount actual number of [empty slots for] frames available, <= number requested
+ * size actual number of bytes available
+ * raw pointer to the buffer
+ */
+ status_t obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
+ size_t *nonContig = NULL);
+
+private:
+ /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
+ * additional non-contiguous frames that are predicted to be available immediately,
+ * if the client were to release the first frames and then call obtainBuffer() again.
+ * This value is only a prediction, and needs to be confirmed.
+ * It will be set to zero for an error return.
+ * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
+ * in case the requested amount of frames is in two or more non-contiguous regions.
+ * FIXME requested and elapsed are both relative times. Consider changing to absolute time.
+ */
+ status_t obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+ struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+public:
+
+ /* Public API for TRANSFER_OBTAIN mode.
+ * Release a filled buffer of frames for AudioFlinger to process.
+ *
+ * Buffer fields:
+ * frameCount currently ignored but recommend to set to actual number of frames filled
+ * size actual number of bytes filled, must be multiple of frameSize
+ * raw ignored
+ */
+ void releaseBuffer(const Buffer* audioBuffer);
+
+ /* As a convenience we provide a write() interface to the audio buffer.
+ * Input parameter 'size' is in byte units.
+ * This is implemented on top of obtainBuffer/releaseBuffer. For best
+ * performance use callbacks. Returns actual number of bytes written >= 0,
+ * or one of the following negative status codes:
+ * INVALID_OPERATION AudioTrack is configured for static buffer or streaming mode
+ * BAD_VALUE size is invalid
+ * WOULD_BLOCK when obtainBuffer() returns same, or
+ * AudioTrack was stopped during the write
+ * DEAD_OBJECT when AudioFlinger dies or the output device changes and
+ * the track cannot be automatically restored.
+ * The application needs to recreate the AudioTrack
+ * because the audio device changed or AudioFlinger died.
+ * This typically occurs for direct or offload tracks
+ * or if mDoNotReconnect is true.
+ * or any other error code returned by IAudioTrack::start() or restoreTrack_l().
+ * Default behavior is to only return when all data has been transferred. Set 'blocking' to
+ * false for the method to return immediately without waiting to try multiple times to write
+ * the full content of the buffer.
+ */
+ ssize_t write(const void* buffer, size_t size, bool blocking = true);
+
+ /*
+ * Dumps the state of an audio track.
+ * Not a general-purpose API; intended only for use by media player service to dump its tracks.
+ */
+ status_t dump(int fd, const Vector<String16>& args) const;
+
+ /*
+ * Return the total number of frames which AudioFlinger desired but were unavailable,
+ * and thus which resulted in an underrun. Reset to zero by stop().
+ */
+ uint32_t getUnderrunFrames() const;
+
+ /* Get the flags */
+ audio_output_flags_t getFlags() const { AutoMutex _l(mLock); return mFlags; }
+
+ /* Set parameters - only possible when using direct output */
+ status_t setParameters(const String8& keyValuePairs);
+
+ /* Sets the volume shaper object */
+ VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation);
+
+ /* Gets the volume shaper state */
+ sp<VolumeShaper::State> getVolumeShaperState(int id);
+
+ /* Get parameters */
+ String8 getParameters(const String8& keys);
+
+ /* Poll for a timestamp on demand.
+ * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs,
+ * or if you need to get the most recent timestamp outside of the event callback handler.
+ * Caution: calling this method too often may be inefficient;
+ * if you need a high resolution mapping between frame position and presentation time,
+ * consider implementing that at application level, based on the low resolution timestamps.
+ * Returns NO_ERROR if timestamp is valid.
+ * WOULD_BLOCK if called in STOPPED or FLUSHED state, or if called immediately after
+ * start/ACTIVE, when the number of frames consumed is less than the
+ * overall hardware latency to physical output. In WOULD_BLOCK cases,
+ * one might poll again, or use getPosition(), or use 0 position and
+ * current time for the timestamp.
+ * DEAD_OBJECT if AudioFlinger dies or the output device changes and
+ * the track cannot be automatically restored.
+ * The application needs to recreate the AudioTrack
+ * because the audio device changed or AudioFlinger died.
+ * This typically occurs for direct or offload tracks
+ * or if mDoNotReconnect is true.
+ * INVALID_OPERATION wrong state, or some other error.
+ *
+ * The timestamp parameter is undefined on return, if status is not NO_ERROR.
+ */
+ status_t getTimestamp(AudioTimestamp& timestamp);
+private:
+ status_t getTimestamp_l(AudioTimestamp& timestamp);
+public:
+
+ /* Return the extended timestamp, with additional timebase info and improved drain behavior.
+ *
+ * This is similar to the AudioTrack.java API:
+ * getTimestamp(@NonNull AudioTimestamp timestamp, @AudioTimestamp.Timebase int timebase)
+ *
+ * Some differences between this method and the getTimestamp(AudioTimestamp& timestamp) method
+ *
+ * 1. stop() by itself does not reset the frame position.
+ * A following start() resets the frame position to 0.
+ * 2. flush() by itself does not reset the frame position.
+ * The frame position advances by the number of frames flushed,
+ * when the first frame after flush reaches the audio sink.
+ * 3. BOOTTIME clock offsets are provided to help synchronize with
+ * non-audio streams, e.g. sensor data.
+ * 4. Position is returned with 64 bits of resolution.
+ *
+ * Parameters:
+ * timestamp: A pointer to the caller allocated ExtendedTimestamp.
+ *
+ * Returns NO_ERROR on success; timestamp is filled with valid data.
+ * BAD_VALUE if timestamp is NULL.
+ * WOULD_BLOCK if called immediately after start() when the number
+ * of frames consumed is less than the
+ * overall hardware latency to physical output. In WOULD_BLOCK cases,
+ * one might poll again, or use getPosition(), or use 0 position and
+ * current time for the timestamp.
+ * If WOULD_BLOCK is returned, the timestamp is still
+ * modified with the LOCATION_CLIENT portion filled.
+ * DEAD_OBJECT if AudioFlinger dies or the output device changes and
+ * the track cannot be automatically restored.
+ * The application needs to recreate the AudioTrack
+ * because the audio device changed or AudioFlinger died.
+ * This typically occurs for direct or offloaded tracks
+ * or if mDoNotReconnect is true.
+ * INVALID_OPERATION if called on a offloaded or direct track.
+ * Use getTimestamp(AudioTimestamp& timestamp) instead.
+ */
+ status_t getTimestamp(ExtendedTimestamp *timestamp);
+private:
+ status_t getTimestamp_l(ExtendedTimestamp *timestamp);
+public:
+
+ /* Add an AudioDeviceCallback. The caller will be notified when the audio device to which this
+ * AudioTrack is routed is updated.
+ * Replaces any previously installed callback.
+ * Parameters:
+ * callback: The callback interface
+ * Returns NO_ERROR if successful.
+ * INVALID_OPERATION if the same callback is already installed.
+ * NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
+ * BAD_VALUE if the callback is NULL
+ */
+ status_t addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+ /* remove an AudioDeviceCallback.
+ * Parameters:
+ * callback: The callback interface
+ * Returns NO_ERROR if successful.
+ * INVALID_OPERATION if the callback is not installed
+ * BAD_VALUE if the callback is NULL
+ */
+ status_t removeAudioDeviceCallback(
+ const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+ /* Obtain the pending duration in milliseconds for playback of pure PCM
+ * (mixable without embedded timing) data remaining in AudioTrack.
+ *
+ * This is used to estimate the drain time for the client-server buffer
+ * so the choice of ExtendedTimestamp::LOCATION_SERVER is default.
+ * One may optionally request to find the duration to play through the HAL
+ * by specifying a location ExtendedTimestamp::LOCATION_KERNEL; however,
+ * INVALID_OPERATION may be returned if the kernel location is unavailable.
+ *
+ * Returns NO_ERROR if successful.
+ * INVALID_OPERATION if ExtendedTimestamp::LOCATION_KERNEL cannot be obtained
+ * or the AudioTrack does not contain pure PCM data.
+ * BAD_VALUE if msec is nullptr or location is invalid.
+ */
+ status_t pendingDuration(int32_t *msec,
+ ExtendedTimestamp::Location location = ExtendedTimestamp::LOCATION_SERVER);
+
+ /* hasStarted() is used to determine if audio is now audible at the device after
+ * a start() command. The underlying implementation checks a nonzero timestamp position
+ * or increment for the audible assumption.
+ *
+ * hasStarted() returns true if the track has been started() and audio is audible
+ * and no subsequent pause() or flush() has been called. Immediately after pause() or
+ * flush() hasStarted() will return false.
+ *
+ * If stop() has been called, hasStarted() will return true if audio is still being
+ * delivered or has finished delivery (even if no audio was written) for both offloaded
+ * and normal tracks. This property removes a race condition in checking hasStarted()
+ * for very short clips, where stop() must be called to finish drain.
+ *
+ * In all cases, hasStarted() may turn false briefly after a subsequent start() is called
+ * until audio becomes audible again.
+ */
+ bool hasStarted(); // not const
+
+ bool isPlaying() {
+ AutoMutex lock(mLock);
+ return mState == STATE_ACTIVE || mState == STATE_STOPPING;
+ }
+protected:
+ /* copying audio tracks is not allowed */
+ AudioTrack(const AudioTrack& other);
+ AudioTrack& operator = (const AudioTrack& other);
+
+ /* a small internal class to handle the callback */
+ class AudioTrackThread : public Thread
+ {
+ public:
+ AudioTrackThread(AudioTrack& receiver, bool bCanCallJava = false);
+
+ // Do not call Thread::requestExitAndWait() without first calling requestExit().
+ // Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
+ virtual void requestExit();
+
+ void pause(); // suspend thread from execution at next loop boundary
+ void resume(); // allow thread to execute, if not requested to exit
+ void wake(); // wake to handle changed notification conditions.
+
+ private:
+ void pauseInternal(nsecs_t ns = 0LL);
+ // like pause(), but only used internally within thread
+
+ friend class AudioTrack;
+ virtual bool threadLoop();
+ AudioTrack& mReceiver;
+ virtual ~AudioTrackThread();
+ Mutex mMyLock; // Thread::mLock is private
+ Condition mMyCond; // Thread::mThreadExitedCondition is private
+ bool mPaused; // whether thread is requested to pause at next loop entry
+ bool mPausedInt; // whether thread internally requests pause
+ nsecs_t mPausedNs; // if mPausedInt then associated timeout, otherwise ignored
+ bool mIgnoreNextPausedInt; // skip any internal pause and go immediately
+ // to processAudioBuffer() as state may have changed
+ // since pause time calculated.
+ };
+
+ // body of AudioTrackThread::threadLoop()
+ // returns the maximum amount of time before we would like to run again, where:
+ // 0 immediately
+ // > 0 no later than this many nanoseconds from now
+ // NS_WHENEVER still active but no particular deadline
+ // NS_INACTIVE inactive so don't run again until re-started
+ // NS_NEVER never again
+ static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
+ nsecs_t processAudioBuffer();
+
+ // caller must hold lock on mLock for all _l methods
+
+ status_t createTrack_l();
+
+ // can only be called when mState != STATE_ACTIVE
+ void flush_l();
+
+ void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
+
+ // FIXME enum is faster than strcmp() for parameter 'from'
+ status_t restoreTrack_l(const char *from);
+
+ uint32_t getUnderrunCount_l() const;
+
+ bool isOffloaded() const;
+ bool isDirect() const;
+ bool isOffloadedOrDirect() const;
+
+ bool isOffloaded_l() const
+ { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
+
+ bool isOffloadedOrDirect_l() const
+ { return (mFlags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|
+ AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }
+
+ bool isDirect_l() const
+ { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+
+ // pure pcm data is mixable (which excludes HW_AV_SYNC, with embedded timing)
+ bool isPurePcmData_l() const
+ { return audio_is_linear_pcm(mFormat)
+ && (mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) == 0; }
+
+ // increment mPosition by the delta of mServer, and return new value of mPosition
+ Modulo<uint32_t> updateAndGetPosition_l();
+
+ // check sample rate and speed is compatible with AudioTrack
+ bool isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
+
+ void restartIfDisabled();
+
+ // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
+ sp<IAudioTrack> mAudioTrack;
+ sp<IMemory> mCblkMemory;
+ audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
+ audio_io_handle_t mOutput; // returned by AudioSystem::getOutput()
+
+ sp<AudioTrackThread> mAudioTrackThread;
+ bool mThreadCanCallJava;
+
+ float mVolume[2];
+ float mSendLevel;
+ mutable uint32_t mSampleRate; // mutable because getSampleRate() can update it
+ uint32_t mOriginalSampleRate;
+ AudioPlaybackRate mPlaybackRate;
+ float mMaxRequiredSpeed; // use PCM buffer size to allow this speed
+
+ // Corresponds to current IAudioTrack, value is reported back by AudioFlinger to the client.
+ // This allocated buffer size is maintained by the proxy.
+ size_t mFrameCount; // maximum size of buffer
+
+ size_t mReqFrameCount; // frame count to request the first or next time
+ // a new IAudioTrack is needed, non-decreasing
+
+ // The following AudioFlinger server-side values are cached in createAudioTrack_l().
+ // These values can be used for informational purposes until the track is invalidated,
+ // whereupon restoreTrack_l() calls createTrack_l() to update the values.
+ uint32_t mAfLatency; // AudioFlinger latency in ms
+ size_t mAfFrameCount; // AudioFlinger frame count
+ uint32_t mAfSampleRate; // AudioFlinger sample rate
+
+ // constant after constructor or set()
+ audio_format_t mFormat; // as requested by client, not forced to 16-bit
+ audio_stream_type_t mStreamType; // mStreamType == AUDIO_STREAM_DEFAULT implies
+ // this AudioTrack has valid attributes
+ uint32_t mChannelCount;
+ audio_channel_mask_t mChannelMask;
+ sp<IMemory> mSharedBuffer;
+ transfer_type mTransfer;
+ audio_offload_info_t mOffloadInfoCopy;
+ const audio_offload_info_t* mOffloadInfo;
+ audio_attributes_t mAttributes;
+
+ size_t mFrameSize; // frame size in bytes
+
+ status_t mStatus;
+
+ // can change dynamically when IAudioTrack invalidated
+ uint32_t mLatency; // in ms
+
+ // Indicates the current track state. Protected by mLock.
+ enum State {
+ STATE_ACTIVE,
+ STATE_STOPPED,
+ STATE_PAUSED,
+ STATE_PAUSED_STOPPING,
+ STATE_FLUSHED,
+ STATE_STOPPING,
+ } mState;
+
+ // for client callback handler
+ callback_t mCbf; // callback handler for events, or NULL
+ void* mUserData;
+
+ // for notification APIs
+
+ // next 2 fields are const after constructor or set()
+ uint32_t mNotificationFramesReq; // requested number of frames between each
+ // notification callback,
+ // at initial source sample rate
+ uint32_t mNotificationsPerBufferReq;
+ // requested number of notifications per buffer,
+ // currently only used for fast tracks with
+ // default track buffer size
+
+ uint32_t mNotificationFramesAct; // actual number of frames between each
+ // notification callback,
+ // at initial source sample rate
+ bool mRefreshRemaining; // processAudioBuffer() should refresh
+ // mRemainingFrames and mRetryOnPartialBuffer
+
+ // used for static track cbf and restoration
+ int32_t mLoopCount; // last setLoop loopCount; zero means disabled
+ uint32_t mLoopStart; // last setLoop loopStart
+ uint32_t mLoopEnd; // last setLoop loopEnd
+ int32_t mLoopCountNotified; // the last loopCount notified by callback.
+ // mLoopCountNotified counts down, matching
+ // the remaining loop count for static track
+ // playback.
+
+ // These are private to processAudioBuffer(), and are not protected by a lock
+ uint32_t mRemainingFrames; // number of frames to request in obtainBuffer()
+ bool mRetryOnPartialBuffer; // sleep and retry after partial obtainBuffer()
+ uint32_t mObservedSequence; // last observed value of mSequence
+
+ Modulo<uint32_t> mMarkerPosition; // in wrapping (overflow) frame units
+ bool mMarkerReached;
+ Modulo<uint32_t> mNewPosition; // in frames
+ uint32_t mUpdatePeriod; // in frames, zero means no EVENT_NEW_POS
+
+ Modulo<uint32_t> mServer; // in frames, last known mProxy->getPosition()
+ // which is count of frames consumed by server,
+ // reset by new IAudioTrack,
+ // whether it is reset by stop() is TBD
+ Modulo<uint32_t> mPosition; // in frames, like mServer except continues
+ // monotonically after new IAudioTrack,
+ // and could be easily widened to uint64_t
+ Modulo<uint32_t> mReleased; // count of frames released to server
+ // but not necessarily consumed by server,
+ // reset by stop() but continues monotonically
+ // after new IAudioTrack to restore mPosition,
+ // and could be easily widened to uint64_t
+ int64_t mStartUs; // the start time after flush or stop.
+ // only used for offloaded and direct tracks.
+ ExtendedTimestamp mStartEts; // Extended timestamp at start for normal
+ // AudioTracks.
+ AudioTimestamp mStartTs; // Timestamp at start for offloaded or direct
+ // AudioTracks.
+
+ bool mPreviousTimestampValid;// true if mPreviousTimestamp is valid
+ bool mTimestampStartupGlitchReported; // reduce log spam
+ bool mRetrogradeMotionReported; // reduce log spam
+ AudioTimestamp mPreviousTimestamp; // used to detect retrograde motion
+ ExtendedTimestamp::Location mPreviousLocation; // location used for previous timestamp
+
+ uint32_t mUnderrunCountOffset; // updated when restoring tracks
+
+ int64_t mFramesWritten; // total frames written. reset to zero after
+ // the start() following stop(). It is not
+ // changed after restoring the track or
+ // after flush.
+ int64_t mFramesWrittenServerOffset; // An offset to server frames due to
+ // restoring AudioTrack, or stop/start.
+ // This offset is also used for static tracks.
+ int64_t mFramesWrittenAtRestore; // Frames written at restore point (or frames
+ // delivered for static tracks).
+ // -1 indicates no previous restore point.
+
+ audio_output_flags_t mFlags; // same as mOrigFlags, except for bits that may
+ // be denied by client or server, such as
+ // AUDIO_OUTPUT_FLAG_FAST. mLock must be
+ // held to read or write those bits reliably.
+ audio_output_flags_t mOrigFlags; // as specified in constructor or set(), const
+
+ bool mDoNotReconnect;
+
+ audio_session_t mSessionId;
+ int mAuxEffectId;
+
+ mutable Mutex mLock;
+
+ int mPreviousPriority; // before start()
+ SchedPolicy mPreviousSchedulingGroup;
+ bool mAwaitBoost; // thread should wait for priority boost before running
+
+ // The proxy should only be referenced while a lock is held because the proxy isn't
+ // multi-thread safe, especially the SingleStateQueue part of the proxy.
+ // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
+ // provided that the caller also holds an extra reference to the proxy and shared memory to keep
+ // them around in case they are replaced during the obtainBuffer().
+ sp<StaticAudioTrackClientProxy> mStaticProxy; // for type safety only
+ sp<AudioTrackClientProxy> mProxy; // primary owner of the memory
+
+ bool mInUnderrun; // whether track is currently in underrun state
+ uint32_t mPausedPosition;
+
+ // For Device Selection API
+ // a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
+ audio_port_handle_t mSelectedDeviceId;
+
+ sp<VolumeHandler> mVolumeHandler;
+
+private:
+ class DeathNotifier : public IBinder::DeathRecipient {
+ public:
+ DeathNotifier(AudioTrack* audioTrack) : mAudioTrack(audioTrack) { }
+ protected:
+ virtual void binderDied(const wp<IBinder>& who);
+ private:
+ const wp<AudioTrack> mAudioTrack;
+ };
+
+ sp<DeathNotifier> mDeathNotifier;
+ uint32_t mSequence; // incremented for each new IAudioTrack attempt
+ uid_t mClientUid;
+ pid_t mClientPid;
+
+ sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+ audio_port_handle_t mPortId; // unique ID allocated by audio policy
+};
+
+}; // namespace android
+
+#endif // ANDROID_AUDIOTRACK_H
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
new file mode 100644
index 0000000..0ad4231
--- /dev/null
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IAUDIOFLINGER_H
+#define ANDROID_IAUDIOFLINGER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <binder/IInterface.h>
+#include <media/IAudioTrack.h>
+#include <media/IAudioRecord.h>
+#include <media/IAudioFlingerClient.h>
+#include <system/audio.h>
+#include <system/audio_effect.h>
+#include <system/audio_policy.h>
+#include <media/IEffect.h>
+#include <media/IEffectClient.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class IAudioFlinger : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(AudioFlinger);
+
+
+ // invariant on exit for all APIs that return an sp<>:
+ // (return value != 0) == (*status == NO_ERROR)
+
+ /* create an audio track and registers it with AudioFlinger.
+ * return null if the track cannot be created.
+ */
+ virtual sp<IAudioTrack> createTrack(
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ size_t *pFrameCount,
+ audio_output_flags_t *flags,
+ const sp<IMemory>& sharedBuffer,
+ // On successful return, AudioFlinger takes over the handle
+ // reference and will release it when the track is destroyed.
+ // However on failure, the client is responsible for release.
+ audio_io_handle_t output,
+ pid_t pid,
+ pid_t tid, // -1 means unused, otherwise must be valid non-0
+ audio_session_t *sessionId,
+ int clientUid,
+ status_t *status,
+ audio_port_handle_t portId) = 0;
+
+ virtual sp<IAudioRecord> openRecord(
+ // On successful return, AudioFlinger takes over the handle
+ // reference and will release it when the track is destroyed.
+ // However on failure, the client is responsible for release.
+ audio_io_handle_t input,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ const String16& callingPackage,
+ size_t *pFrameCount,
+ audio_input_flags_t *flags,
+ pid_t pid,
+ pid_t tid, // -1 means unused, otherwise must be valid non-0
+ int clientUid,
+ audio_session_t *sessionId,
+ size_t *notificationFrames,
+ sp<IMemory>& cblk,
+ sp<IMemory>& buffers, // return value 0 means it follows cblk
+ status_t *status,
+ audio_port_handle_t portId) = 0;
+
+ // FIXME Surprisingly, format/latency don't work for input handles
+
+ /* query the audio hardware state. This state never changes,
+ * and therefore can be cached.
+ */
+ virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const = 0;
+
+ // reserved; formerly channelCount()
+
+ virtual audio_format_t format(audio_io_handle_t output) const = 0;
+ virtual size_t frameCount(audio_io_handle_t ioHandle) const = 0;
+
+ // return estimated latency in milliseconds
+ virtual uint32_t latency(audio_io_handle_t output) const = 0;
+
+ /* set/get the audio hardware state. This will probably be used by
+ * the preference panel, mostly.
+ */
+ virtual status_t setMasterVolume(float value) = 0;
+ virtual status_t setMasterMute(bool muted) = 0;
+
+ virtual float masterVolume() const = 0;
+ virtual bool masterMute() const = 0;
+
+ /* set/get stream type state. This will probably be used by
+ * the preference panel, mostly.
+ */
+ virtual status_t setStreamVolume(audio_stream_type_t stream, float value,
+ audio_io_handle_t output) = 0;
+ virtual status_t setStreamMute(audio_stream_type_t stream, bool muted) = 0;
+
+ virtual float streamVolume(audio_stream_type_t stream,
+ audio_io_handle_t output) const = 0;
+ virtual bool streamMute(audio_stream_type_t stream) const = 0;
+
+ // set audio mode
+ virtual status_t setMode(audio_mode_t mode) = 0;
+
+ // mic mute/state
+ virtual status_t setMicMute(bool state) = 0;
+ virtual bool getMicMute() const = 0;
+
+ virtual status_t setParameters(audio_io_handle_t ioHandle,
+ const String8& keyValuePairs) = 0;
+ virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
+ const = 0;
+
+ // Register an object to receive audio input/output change and track notifications.
+ // For a given calling pid, AudioFlinger disregards any registrations after the first.
+ // Thus the IAudioFlingerClient must be a singleton per process.
+ virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
+
+ // retrieve the audio recording buffer size
+ // FIXME This API assumes a route, and so should be deprecated.
+ virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+ audio_channel_mask_t channelMask) const = 0;
+
+ virtual status_t openOutput(audio_module_handle_t module,
+ audio_io_handle_t *output,
+ audio_config_t *config,
+ audio_devices_t *devices,
+ const String8& address,
+ uint32_t *latencyMs,
+ audio_output_flags_t flags) = 0;
+ virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+ audio_io_handle_t output2) = 0;
+ virtual status_t closeOutput(audio_io_handle_t output) = 0;
+ virtual status_t suspendOutput(audio_io_handle_t output) = 0;
+ virtual status_t restoreOutput(audio_io_handle_t output) = 0;
+
+ virtual status_t openInput(audio_module_handle_t module,
+ audio_io_handle_t *input,
+ audio_config_t *config,
+ audio_devices_t *device,
+ const String8& address,
+ audio_source_t source,
+ audio_input_flags_t flags) = 0;
+ virtual status_t closeInput(audio_io_handle_t input) = 0;
+
+ virtual status_t invalidateStream(audio_stream_type_t stream) = 0;
+
+ virtual status_t setVoiceVolume(float volume) = 0;
+
+ virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+ audio_io_handle_t output) const = 0;
+
+ virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
+
+ virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) = 0;
+
+ virtual void acquireAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
+ virtual void releaseAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
+
+ virtual status_t queryNumberEffects(uint32_t *numEffects) const = 0;
+
+ virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const = 0;
+
+ virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID,
+ effect_descriptor_t *pDescriptor) const = 0;
+
+ virtual sp<IEffect> createEffect(
+ effect_descriptor_t *pDesc,
+ const sp<IEffectClient>& client,
+ int32_t priority,
+ // AudioFlinger doesn't take over handle reference from client
+ audio_io_handle_t output,
+ audio_session_t sessionId,
+ const String16& callingPackage,
+ pid_t pid,
+ status_t *status,
+ int *id,
+ int *enabled) = 0;
+
+ virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
+ audio_io_handle_t dstOutput) = 0;
+
+ virtual audio_module_handle_t loadHwModule(const char *name) = 0;
+
+ // helpers for android.media.AudioManager.getProperty(), see description there for meaning
+ // FIXME move these APIs to AudioPolicy to permit a more accurate implementation
+ // that looks on primary device for a stream with fast flag, primary flag, or first one.
+ virtual uint32_t getPrimaryOutputSamplingRate() = 0;
+ virtual size_t getPrimaryOutputFrameCount() = 0;
+
+ // Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
+ // and should be called at most once. For a definition of what "low RAM" means, see
+ // android.app.ActivityManager.isLowRamDevice().
+ virtual status_t setLowRamDevice(bool isLowRamDevice) = 0;
+
+ /* List available audio ports and their attributes */
+ virtual status_t listAudioPorts(unsigned int *num_ports,
+ struct audio_port *ports) = 0;
+
+ /* Get attributes for a given audio port */
+ virtual status_t getAudioPort(struct audio_port *port) = 0;
+
+ /* Create an audio patch between several source and sink ports */
+ virtual status_t createAudioPatch(const struct audio_patch *patch,
+ audio_patch_handle_t *handle) = 0;
+
+ /* Release an audio patch */
+ virtual status_t releaseAudioPatch(audio_patch_handle_t handle) = 0;
+
+ /* List existing audio patches */
+ virtual status_t listAudioPatches(unsigned int *num_patches,
+ struct audio_patch *patches) = 0;
+ /* Set audio port configuration */
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
+
+ /* Get the HW synchronization source used for an audio session */
+ virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) = 0;
+
+ /* Indicate JAVA services are ready (scheduling, power management ...) */
+ virtual status_t systemReady() = 0;
+
+ // Returns the number of frames per audio HAL buffer.
+ virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+
+class BnAudioFlinger : public BnInterface<IAudioFlinger>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+
+ // Requests media.log to start merging log buffers
+ virtual void requestLogMerge() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_IAUDIOFLINGER_H
diff --git a/include/media/IAudioFlingerClient.h b/media/libaudioclient/include/media/IAudioFlingerClient.h
similarity index 100%
rename from include/media/IAudioFlingerClient.h
rename to media/libaudioclient/include/media/IAudioFlingerClient.h
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
new file mode 100644
index 0000000..d111fd2
--- /dev/null
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IAUDIOPOLICYSERVICE_H
+#define ANDROID_IAUDIOPOLICYSERVICE_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <binder/IInterface.h>
+#include <media/AudioSystem.h>
+#include <media/AudioPolicy.h>
+#include <media/IAudioPolicyServiceClient.h>
+
+#include <system/audio_policy.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class IAudioPolicyService : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(AudioPolicyService);
+
+ //
+ // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
+ //
+ virtual status_t setDeviceConnectionState(audio_devices_t device,
+ audio_policy_dev_state_t state,
+ const char *device_address,
+ const char *device_name) = 0;
+ virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
+ const char *device_address) = 0;
+ virtual status_t handleDeviceConfigChange(audio_devices_t device,
+ const char *device_address,
+ const char *device_name) = 0;
+ virtual status_t setPhoneState(audio_mode_t state) = 0;
+ virtual status_t setForceUse(audio_policy_force_use_t usage,
+ audio_policy_forced_cfg_t config) = 0;
+ virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
+ virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
+ uint32_t samplingRate = 0,
+ audio_format_t format = AUDIO_FORMAT_DEFAULT,
+ audio_channel_mask_t channelMask = 0,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ const audio_offload_info_t *offloadInfo = NULL) = 0;
+ virtual status_t getOutputForAttr(const audio_attributes_t *attr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ audio_stream_type_t *stream,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t flags,
+ audio_port_handle_t selectedDeviceId,
+ audio_port_handle_t *portId) = 0;
+ virtual status_t startOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session) = 0;
+ virtual status_t stopOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session) = 0;
+ virtual void releaseOutput(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ audio_session_t session) = 0;
+ virtual status_t getInputForAttr(const audio_attributes_t *attr,
+ audio_io_handle_t *input,
+ audio_session_t session,
+ pid_t pid,
+ uid_t uid,
+ const audio_config_base_t *config,
+ audio_input_flags_t flags,
+ audio_port_handle_t selectedDeviceId,
+ audio_port_handle_t *portId) = 0;
+ virtual status_t startInput(audio_io_handle_t input,
+ audio_session_t session) = 0;
+ virtual status_t stopInput(audio_io_handle_t input,
+ audio_session_t session) = 0;
+ virtual void releaseInput(audio_io_handle_t input,
+ audio_session_t session) = 0;
+ virtual status_t initStreamVolume(audio_stream_type_t stream,
+ int indexMin,
+ int indexMax) = 0;
+ virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
+ int index,
+ audio_devices_t device) = 0;
+ virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
+ int *index,
+ audio_devices_t device) = 0;
+ virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0;
+ virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
+ virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc) = 0;
+ virtual status_t registerEffect(const effect_descriptor_t *desc,
+ audio_io_handle_t io,
+ uint32_t strategy,
+ audio_session_t session,
+ int id) = 0;
+ virtual status_t unregisterEffect(int id) = 0;
+ virtual status_t setEffectEnabled(int id, bool enabled) = 0;
+ virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const = 0;
+ virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0)
+ const = 0;
+ virtual bool isSourceActive(audio_source_t source) const = 0;
+ virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
+ effect_descriptor_t *descriptors,
+ uint32_t *count) = 0;
+ // Check if offload is possible for given format, stream type, sample rate,
+ // bit rate, duration, video and streaming or offload property is enabled
+ virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
+
+ /* List available audio ports and their attributes */
+ virtual status_t listAudioPorts(audio_port_role_t role,
+ audio_port_type_t type,
+ unsigned int *num_ports,
+ struct audio_port *ports,
+ unsigned int *generation) = 0;
+
+ /* Get attributes for a given audio port */
+ virtual status_t getAudioPort(struct audio_port *port) = 0;
+
+ /* Create an audio patch between several source and sink ports */
+ virtual status_t createAudioPatch(const struct audio_patch *patch,
+ audio_patch_handle_t *handle) = 0;
+
+ /* Release an audio patch */
+ virtual status_t releaseAudioPatch(audio_patch_handle_t handle) = 0;
+
+ /* List existing audio patches */
+ virtual status_t listAudioPatches(unsigned int *num_patches,
+ struct audio_patch *patches,
+ unsigned int *generation) = 0;
+ /* Set audio port configuration */
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
+
+ virtual void registerClient(const sp<IAudioPolicyServiceClient>& client) = 0;
+
+ virtual void setAudioPortCallbacksEnabled(bool enabled) = 0;
+
+ virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+ audio_io_handle_t *ioHandle,
+ audio_devices_t *device) = 0;
+
+ virtual status_t releaseSoundTriggerSession(audio_session_t session) = 0;
+
+ virtual audio_mode_t getPhoneState() = 0;
+
+ virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration) = 0;
+
+ virtual status_t startAudioSource(const struct audio_port_config *source,
+ const audio_attributes_t *attributes,
+ audio_patch_handle_t *handle) = 0;
+ virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
+
+ virtual status_t setMasterMono(bool mono) = 0;
+ virtual status_t getMasterMono(bool *mono) = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+
+class BnAudioPolicyService : public BnInterface<IAudioPolicyService>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_IAUDIOPOLICYSERVICE_H
diff --git a/include/media/IAudioPolicyServiceClient.h b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
similarity index 100%
rename from include/media/IAudioPolicyServiceClient.h
rename to media/libaudioclient/include/media/IAudioPolicyServiceClient.h
diff --git a/include/media/IAudioRecord.h b/media/libaudioclient/include/media/IAudioRecord.h
similarity index 100%
rename from include/media/IAudioRecord.h
rename to media/libaudioclient/include/media/IAudioRecord.h
diff --git a/media/libaudioclient/include/media/IAudioTrack.h b/media/libaudioclient/include/media/IAudioTrack.h
new file mode 100644
index 0000000..27a62d6
--- /dev/null
+++ b/media/libaudioclient/include/media/IAudioTrack.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IAUDIOTRACK_H
+#define ANDROID_IAUDIOTRACK_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+#include <utils/String8.h>
+#include <media/AudioTimestamp.h>
+#include <media/VolumeShaper.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class IAudioTrack : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(AudioTrack);
+
+ /* Get this track's control block */
+ virtual sp<IMemory> getCblk() const = 0;
+
+ /* After it's created the track is not active. Call start() to
+ * make it active.
+ */
+ virtual status_t start() = 0;
+
+ /* Stop a track. If set, the callback will cease being called and
+ * obtainBuffer will return an error. Buffers that are already released
+ * will continue to be processed, unless/until flush() is called.
+ */
+ virtual void stop() = 0;
+
+ /* Flush a stopped or paused track. All pending/released buffers are discarded.
+ * This function has no effect if the track is not stopped or paused.
+ */
+ virtual void flush() = 0;
+
+ /* Pause a track. If set, the callback will cease being called and
+ * obtainBuffer will return an error. Buffers that are already released
+ * will continue to be processed, unless/until flush() is called.
+ */
+ virtual void pause() = 0;
+
+ /* Attach track auxiliary output to specified effect. Use effectId = 0
+ * to detach track from effect.
+ */
+ virtual status_t attachAuxEffect(int effectId) = 0;
+
+ /* Send parameters to the audio hardware */
+ virtual status_t setParameters(const String8& keyValuePairs) = 0;
+
+ /* Return NO_ERROR if timestamp is valid. timestamp is undefined otherwise. */
+ virtual status_t getTimestamp(AudioTimestamp& timestamp) = 0;
+
+ /* Signal the playback thread for a change in control block */
+ virtual void signal() = 0;
+
+ /* Sets the volume shaper */
+ virtual VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) = 0;
+
+ /* gets the volume shaper state */
+ virtual sp<VolumeShaper::State> getVolumeShaperState(int id) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnAudioTrack : public BnInterface<IAudioTrack>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_IAUDIOTRACK_H
diff --git a/include/media/IEffect.h b/media/libaudioclient/include/media/IEffect.h
similarity index 100%
rename from include/media/IEffect.h
rename to media/libaudioclient/include/media/IEffect.h
diff --git a/include/media/IEffectClient.h b/media/libaudioclient/include/media/IEffectClient.h
similarity index 100%
rename from include/media/IEffectClient.h
rename to media/libaudioclient/include/media/IEffectClient.h
diff --git a/media/libaudioclient/include/media/PlayerBase.h b/media/libaudioclient/include/media/PlayerBase.h
new file mode 100644
index 0000000..fe1db7b
--- /dev/null
+++ b/media/libaudioclient/include/media/PlayerBase.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANDROID_PLAYER_BASE_H__
+#define __ANDROID_PLAYER_BASE_H__
+
+#include <audiomanager/IPlayer.h>
+#include <audiomanager/AudioManager.h>
+#include <audiomanager/IAudioManager.h>
+
+
+namespace android {
+
+class PlayerBase : public BnPlayer
+{
+public:
+ explicit PlayerBase();
+ virtual ~PlayerBase();
+
+ virtual void destroy() = 0;
+
+ //IPlayer implementation
+ virtual void start();
+ virtual void pause();
+ virtual void stop();
+ virtual void setVolume(float vol);
+ virtual void setPan(float pan);
+ virtual void setStartDelayMs(int32_t delayMs);
+ virtual void applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) override;
+
+ virtual status_t onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
+
+
+ status_t startWithStatus();
+
+ //FIXME temporary method while some player state is outside of this class
+ void reportEvent(player_state_t event);
+
+protected:
+
+ void init(player_type_t playerType, audio_usage_t usage);
+ void baseDestroy();
+
+ //IPlayer methods handlers for derived classes
+ virtual status_t playerStart() { return NO_ERROR; }
+ virtual status_t playerPause() { return NO_ERROR; }
+ virtual status_t playerStop() { return NO_ERROR; }
+ virtual status_t playerSetVolume() { return NO_ERROR; }
+
+ // mutex for IPlayer volume and pan, and player-specific volume
+ Mutex mSettingsLock;
+
+ // volume multipliers coming from the IPlayer volume and pan controls
+ float mPanMultiplierL, mPanMultiplierR;
+ float mVolumeMultiplierL, mVolumeMultiplierR;
+
+private:
+ // report events to AudioService
+ void servicePlayerEvent(player_state_t event);
+ void serviceReleasePlayer();
+
+ // native interface to AudioService
+ android::sp<android::IAudioManager> mAudioManager;
+
+ // player interface ID, uniquely identifies the player in the system
+ audio_unique_id_t mPIId;
+
+ // Mutex for state reporting
+ Mutex mPlayerStateLock;
+ player_state_t mLastReportedEvent;
+};
+
+} // namespace android
+
+#endif /* __ANDROID_PLAYER_BASE_H__ */
diff --git a/media/libaudioclient/include/media/ToneGenerator.h b/media/libaudioclient/include/media/ToneGenerator.h
new file mode 100644
index 0000000..fc3d3ee
--- /dev/null
+++ b/media/libaudioclient/include/media/ToneGenerator.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_TONEGENERATOR_H_
+#define ANDROID_TONEGENERATOR_H_
+
+#include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
+#include <utils/Compat.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class ToneGenerator {
+public:
+
+ // List of all available tones
+ // This enum must be kept consistant with constants in ToneGenerator JAVA class
+ enum tone_type {
+ // DTMF tones ITU-T Recommendation Q.23
+ TONE_DTMF_0 = 0, // 0 key: 1336Hz, 941Hz
+ TONE_DTMF_1, // 1 key: 1209Hz, 697Hz
+ TONE_DTMF_2, // 2 key: 1336Hz, 697Hz
+ TONE_DTMF_3, // 3 key: 1477Hz, 697Hz
+ TONE_DTMF_4, // 4 key: 1209Hz, 770Hz
+ TONE_DTMF_5, // 5 key: 1336Hz, 770Hz
+ TONE_DTMF_6, // 6 key: 1477Hz, 770Hz
+ TONE_DTMF_7, // 7 key: 1209Hz, 852Hz
+ TONE_DTMF_8, // 8 key: 1336Hz, 852Hz
+ TONE_DTMF_9, // 9 key: 1477Hz, 852Hz
+ TONE_DTMF_S, // * key: 1209Hz, 941Hz
+ TONE_DTMF_P, // # key: 1477Hz, 941Hz
+ TONE_DTMF_A, // A key: 1633Hz, 697Hz
+ TONE_DTMF_B, // B key: 1633Hz, 770Hz
+ TONE_DTMF_C, // C key: 1633Hz, 852Hz
+ TONE_DTMF_D, // D key: 1633Hz, 941Hz
+ // Call supervisory tones: 3GPP TS 22.001 (CEPT)
+ TONE_SUP_DIAL, // Dial tone: CEPT: 425Hz, continuous
+ FIRST_SUP_TONE = TONE_SUP_DIAL,
+ TONE_SUP_BUSY, // Busy tone, CEPT: 425Hz, 500ms ON, 500ms OFF...
+ TONE_SUP_CONGESTION, // Congestion tone CEPT, JAPAN: 425Hz, 200ms ON, 200ms OFF...
+ TONE_SUP_RADIO_ACK, // Radio path acknowlegment, CEPT, ANSI: 425Hz, 200ms ON
+ TONE_SUP_RADIO_NOTAVAIL, // Radio path not available: 425Hz, 200ms ON, 200 OFF 3 bursts
+ TONE_SUP_ERROR, // Error/Special info: 950Hz+1400Hz+1800Hz, 330ms ON, 1s OFF...
+ TONE_SUP_CALL_WAITING, // Call Waiting CEPT,JAPAN: 425Hz, 200ms ON, 600ms OFF, 200ms ON, 3s OFF...
+ TONE_SUP_RINGTONE, // Ring Tone CEPT, JAPAN: 425Hz, 1s ON, 4s OFF...
+ LAST_SUP_TONE = TONE_SUP_RINGTONE,
+ // Proprietary tones: 3GPP TS 31.111
+ TONE_PROP_BEEP, // General beep: 400Hz+1200Hz, 35ms ON
+ TONE_PROP_ACK, // Positive Acknowlgement: 1200Hz, 100ms ON, 100ms OFF 2 bursts
+ TONE_PROP_NACK, // Negative Acknowlgement: 300Hz+400Hz+500Hz, 400ms ON
+ TONE_PROP_PROMPT, // Prompt tone: 400Hz+1200Hz, 200ms ON
+ TONE_PROP_BEEP2, // General double beep: 400Hz+1200Hz, 35ms ON, 200ms OFF, 35ms on
+ // Additional call supervisory tones: specified by IS-95 only
+ TONE_SUP_INTERCEPT, // Intercept tone: alternating 440 Hz and 620 Hz tones, each on for 250 ms.
+ TONE_SUP_INTERCEPT_ABBREV, // Abbreviated intercept: intercept tone limited to 4 seconds
+ TONE_SUP_CONGESTION_ABBREV, // Abbreviated congestion: congestion tone limited to 4 seconds
+ TONE_SUP_CONFIRM, // Confirm tone: a 350 Hz tone added to a 440 Hz tone repeated 3 times in a 100 ms on, 100 ms off cycle.
+ TONE_SUP_PIP, // Pip tone: four bursts of 480 Hz tone (0.1 s on, 0.1 s off).
+
+ // CDMA Tones
+ TONE_CDMA_DIAL_TONE_LITE,
+ TONE_CDMA_NETWORK_USA_RINGBACK,
+ TONE_CDMA_INTERCEPT,
+ TONE_CDMA_ABBR_INTERCEPT,
+ TONE_CDMA_REORDER,
+ TONE_CDMA_ABBR_REORDER,
+ TONE_CDMA_NETWORK_BUSY,
+ TONE_CDMA_CONFIRM,
+ TONE_CDMA_ANSWER,
+ TONE_CDMA_NETWORK_CALLWAITING,
+ TONE_CDMA_PIP,
+
+ // ISDN
+ TONE_CDMA_CALL_SIGNAL_ISDN_NORMAL, // ISDN Alert Normal
+ TONE_CDMA_CALL_SIGNAL_ISDN_INTERGROUP, // ISDN Intergroup
+ TONE_CDMA_CALL_SIGNAL_ISDN_SP_PRI, // ISDN SP PRI
+ TONE_CDMA_CALL_SIGNAL_ISDN_PAT3, // ISDN Alert PAT3
+ TONE_CDMA_CALL_SIGNAL_ISDN_PING_RING, // ISDN Alert PING RING
+ TONE_CDMA_CALL_SIGNAL_ISDN_PAT5, // ISDN Alert PAT5
+ TONE_CDMA_CALL_SIGNAL_ISDN_PAT6, // ISDN Alert PAT6
+ TONE_CDMA_CALL_SIGNAL_ISDN_PAT7, // ISDN Alert PAT7
+ // ISDN end
+
+ // IS54
+ TONE_CDMA_HIGH_L, // IS54 High Pitch Long
+ TONE_CDMA_MED_L, // IS54 Med Pitch Long
+ TONE_CDMA_LOW_L, // IS54 Low Pitch Long
+ TONE_CDMA_HIGH_SS, // IS54 High Pitch Short Short
+ TONE_CDMA_MED_SS, // IS54 Medium Pitch Short Short
+ TONE_CDMA_LOW_SS, // IS54 Low Pitch Short Short
+ TONE_CDMA_HIGH_SSL, // IS54 High Pitch Short Short Long
+ TONE_CDMA_MED_SSL, // IS54 Medium Pitch Short Short Long
+ TONE_CDMA_LOW_SSL, // IS54 Low Pitch Short Short Long
+ TONE_CDMA_HIGH_SS_2, // IS54 High Pitch Short Short 2
+ TONE_CDMA_MED_SS_2, // IS54 Med Pitch Short Short 2
+ TONE_CDMA_LOW_SS_2, // IS54 Low Pitch Short Short 2
+ TONE_CDMA_HIGH_SLS, // IS54 High Pitch Short Long Short
+ TONE_CDMA_MED_SLS, // IS54 Med Pitch Short Long Short
+ TONE_CDMA_LOW_SLS, // IS54 Low Pitch Short Long Short
+ TONE_CDMA_HIGH_S_X4, // IS54 High Pitch Short Short Short Short
+ TONE_CDMA_MED_S_X4, // IS54 Med Pitch Short Short Short Short
+ TONE_CDMA_LOW_S_X4, // IS54 Low Pitch Short Short Short Short
+ TONE_CDMA_HIGH_PBX_L, // PBX High Pitch Long
+ TONE_CDMA_MED_PBX_L, // PBX Med Pitch Long
+ TONE_CDMA_LOW_PBX_L, // PBX Low Pitch Long
+ TONE_CDMA_HIGH_PBX_SS, // PBX High Short Short
+ TONE_CDMA_MED_PBX_SS, // PBX Med Short Short
+ TONE_CDMA_LOW_PBX_SS, // PBX Low Short Short
+ TONE_CDMA_HIGH_PBX_SSL, // PBX High Short Short Long
+ TONE_CDMA_MED_PBX_SSL, // PBX Med Short Short Long
+ TONE_CDMA_LOW_PBX_SSL, // PBX Low Short Short Long
+ TONE_CDMA_HIGH_PBX_SLS, // PBX High SLS
+ TONE_CDMA_MED_PBX_SLS, // PBX Med SLS
+ TONE_CDMA_LOW_PBX_SLS, // PBX Low SLS
+ TONE_CDMA_HIGH_PBX_S_X4, // PBX High SSSS
+ TONE_CDMA_MED_PBX_S_X4, // PBX Med SSSS
+ TONE_CDMA_LOW_PBX_S_X4, // PBX LOW SSSS
+ //IS54 end
+ // proprietary
+ TONE_CDMA_ALERT_NETWORK_LITE,
+ TONE_CDMA_ALERT_AUTOREDIAL_LITE,
+ TONE_CDMA_ONE_MIN_BEEP,
+ TONE_CDMA_KEYPAD_VOLUME_KEY_LITE,
+ TONE_CDMA_PRESSHOLDKEY_LITE,
+ TONE_CDMA_ALERT_INCALL_LITE,
+ TONE_CDMA_EMERGENCY_RINGBACK,
+ TONE_CDMA_ALERT_CALL_GUARD,
+ TONE_CDMA_SOFT_ERROR_LITE,
+ TONE_CDMA_CALLDROP_LITE,
+ // proprietary end
+ TONE_CDMA_NETWORK_BUSY_ONE_SHOT,
+ TONE_CDMA_ABBR_ALERT,
+ TONE_CDMA_SIGNAL_OFF,
+ //CDMA end
+ NUM_TONES,
+ NUM_SUP_TONES = LAST_SUP_TONE-FIRST_SUP_TONE+1
+ };
+
+ ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava = false);
+ ~ToneGenerator();
+
+ bool startTone(tone_type toneType, int durationMs = -1);
+ void stopTone();
+
+ bool isInited() { return (mState == TONE_IDLE)?false:true;}
+
+ // returns the audio session this ToneGenerator belongs to or 0 if an error occured.
+ int getSessionId() { return (mpAudioTrack == 0) ? 0 : mpAudioTrack->getSessionId(); }
+
+private:
+
+ enum tone_state {
+ TONE_IDLE, // ToneGenerator is being initialized or initialization failed
+ TONE_INIT, // ToneGenerator has been successfully initialized and is not playing
+ TONE_STARTING, // ToneGenerator is starting playing
+ TONE_PLAYING, // ToneGenerator is playing
+ TONE_STOPPING, // ToneGenerator is stoping
+ TONE_STOPPED, // ToneGenerator is stopped: the AudioTrack will be stopped
+ TONE_RESTARTING // A start request was received in active state (playing or stopping)
+ };
+
+
+ // Region specific tones.
+ // These supervisory tones are different depending on the region (USA/CANADA, JAPAN, rest of the world).
+ // When a tone in the range [FIRST_SUP_TONE, LAST_SUP_TONE] is requested, the region is determined
+ // from system property gsm.operator.iso-country and the proper tone descriptor is selected with the
+ // help of sToneMappingTable[]
+ enum regional_tone_type {
+ // ANSI supervisory tones
+ TONE_ANSI_DIAL = NUM_TONES, // Dial tone: a continuous 350 Hz + 440 Hz tone.
+ TONE_ANSI_BUSY, // Busy tone on: a 480 Hz + 620 Hz tone repeated in a 500 ms on, 500 ms off cycle.
+ TONE_ANSI_CONGESTION, // Network congestion (reorder) tone on: a 480 Hz + 620 Hz tone repeated in a 250 ms on, 250 ms off cycle.
+ TONE_ANSI_CALL_WAITING, // Call waiting tone on: 440 Hz, on for 300 ms, 9,7 s off followed by
+ // (440 Hz, on for 100 ms off for 100 ms, on for 100 ms, 9,7s off and repeated as necessary).
+ TONE_ANSI_RINGTONE, // Ring Tone: a 440 Hz + 480 Hz tone repeated in a 2 s on, 4 s off pattern.
+ // JAPAN Supervisory tones
+ TONE_JAPAN_DIAL, // Dial tone: 400Hz, continuous
+ TONE_JAPAN_BUSY, // Busy tone: 400Hz, 500ms ON, 500ms OFF...
+ TONE_JAPAN_RADIO_ACK, // Radio path acknowlegment: 400Hz, 1s ON, 2s OFF...
+ // GB Supervisory tones
+ TONE_GB_RINGTONE, // Ring Tone: A 400Hz + 450Hz tone repeated in a 0.4s on, 0.2s off, 0.4s on, 2.0s off pattern.
+ // AUSTRALIA Supervisory tones
+ TONE_AUSTRALIA_RINGTONE, // Ring tone: A 400Hz + 450Hz tone repeated in a 0.4s on, 0.2s off, 0.4s on, 2.0s off pattern.
+ TONE_AUSTRALIA_BUSY, // Busy tone: 425 Hz repeated in a 0.375s on, 0.375s off pattern.
+ TONE_AUSTRALIA_CALL_WAITING,// Call waiting tone: 425Hz tone repeated in a 0.2s on, 0.2s off, 0.2s on, 4.4s off pattern.
+ TONE_AUSTRALIA_CONGESTION, // Congestion tone: 425Hz tone repeated in a 0.375s on, 0.375s off pattern
+ NUM_ALTERNATE_TONES
+ };
+
+ enum region {
+ ANSI,
+ JAPAN,
+ GB,
+ AUSTRALIA,
+ CEPT,
+ NUM_REGIONS
+ };
+
+ static const unsigned char sToneMappingTable[NUM_REGIONS-1][NUM_SUP_TONES];
+
+ static const unsigned int TONEGEN_MAX_WAVES = 3; // Maximun number of sine waves in a tone segment
+ static const unsigned int TONEGEN_MAX_SEGMENTS = 12; // Maximun number of segments in a tone descriptor
+ static const unsigned int TONEGEN_INF = 0xFFFFFFFF; // Represents infinite time duration
+ static const CONSTEXPR float TONEGEN_GAIN = 0.9; // Default gain passed to WaveGenerator().
+
+ // ToneDescriptor class contains all parameters needed to generate a tone:
+ // - The array waveFreq[]:
+ // 1 for static tone descriptors: contains the frequencies of all individual waves making the multi-tone.
+ // 2 for active tone descritors: contains the indexes of the WaveGenerator objects in mWaveGens
+ // The number of sine waves varies from 1 to TONEGEN_MAX_WAVES.
+ // The first null value indicates that no more waves are needed.
+ // - The array segments[] is used to generate the tone pulses. A segment is a period of time
+ // during which the tone is ON or OFF. Segments with even index (starting from 0)
+ // correspond to tone ON state and segments with odd index to OFF state.
+ // The data stored in segments[] is the duration of the corresponding period in ms.
+ // The first segment encountered with a 0 duration indicates that no more segment follows.
+ // - loopCnt - Number of times to repeat a sequence of seqments after playing this
+ // - loopIndx - The segment index to go back and play is loopcnt > 0
+ // - repeatCnt indicates the number of times the sequence described by segments[] array must be repeated.
+ // When the tone generator encounters the first 0 duration segment, it will compare repeatCnt to mCurCount.
+ // If mCurCount > repeatCnt, the tone is stopped automatically. Otherwise, tone sequence will be
+ // restarted from segment repeatSegment.
+ // - repeatSegment number of the first repeated segment when repeatCnt is not null
+
+ class ToneSegment {
+ public:
+ unsigned int duration;
+ unsigned short waveFreq[TONEGEN_MAX_WAVES+1];
+ unsigned short loopCnt;
+ unsigned short loopIndx;
+ };
+
+ class ToneDescriptor {
+ public:
+ ToneSegment segments[TONEGEN_MAX_SEGMENTS+1];
+ unsigned long repeatCnt;
+ unsigned long repeatSegment;
+ };
+
+ static const ToneDescriptor sToneDescriptors[];
+
+ bool mThreadCanCallJava;
+ unsigned int mTotalSmp; // Total number of audio samples played (gives current time)
+ unsigned int mNextSegSmp; // Position of next segment transition expressed in samples
+ // NOTE: because mTotalSmp, mNextSegSmp are stored on 32 bit, current design will operate properly
+ // only if tone duration is less than about 27 Hours(@44100Hz sampling rate). If this time is exceeded,
+ // no crash will occur but tone sequence will show a glitch.
+ unsigned int mMaxSmp; // Maximum number of audio samples played (maximun tone duration)
+ int mDurationMs; // Maximum tone duration in ms
+
+ unsigned short mCurSegment; // Current segment index in ToneDescriptor segments[]
+ unsigned short mCurCount; // Current sequence repeat count
+ volatile unsigned short mState; // ToneGenerator state (tone_state)
+ unsigned short mRegion;
+ const ToneDescriptor *mpToneDesc; // pointer to active tone descriptor
+ const ToneDescriptor *mpNewToneDesc; // pointer to next active tone descriptor
+
+ unsigned short mLoopCounter; // Current tone loopback count
+
+ uint32_t mSamplingRate; // AudioFlinger Sampling rate
+ sp<AudioTrack> mpAudioTrack; // Pointer to audio track used for playback
+ Mutex mLock; // Mutex to control concurent access to ToneGenerator object from audio callback and application API
+ Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond
+ Condition mWaitCbkCond; // condition enabling interface to wait for audio callback completion after a change is requested
+ float mVolume; // Volume applied to audio track
+ audio_stream_type_t mStreamType; // Audio stream used for output
+ unsigned int mProcessSize; // Size of audio blocks generated at a time by audioCallback() (in PCM frames).
+ struct timespec mStartTime; // tone start time: needed to guaranty actual tone duration
+
+ bool initAudioTrack();
+ static void audioCallback(int event, void* user, void *info);
+ bool prepareWave();
+ unsigned int numWaves(unsigned int segmentIdx);
+ void clearWaveGens();
+ tone_type getToneForRegion(tone_type toneType);
+
+ // WaveGenerator generates a single sine wave
+ class WaveGenerator {
+ public:
+ enum gen_command {
+ WAVEGEN_START, // Start/restart wave from phase 0
+ WAVEGEN_CONT, // Continue wave from current phase
+ WAVEGEN_STOP // Stop wave on zero crossing
+ };
+
+ WaveGenerator(uint32_t samplingRate, unsigned short frequency,
+ float volume);
+ ~WaveGenerator();
+
+ void getSamples(short *outBuffer, unsigned int count,
+ unsigned int command);
+
+ private:
+ static const short GEN_AMP = 32000; // amplitude of generator
+ static const short S_Q14 = 14; // shift for Q14
+ static const short S_Q15 = 15; // shift for Q15
+
+ short mA1_Q14; // Q14 coefficient
+ // delay line of full amplitude generator
+ long mS1, mS2; // delay line S2 oldest
+ short mS2_0; // saved value for reinitialisation
+ short mAmplitude_Q15; // Q15 amplitude
+ };
+
+ KeyedVector<unsigned short, WaveGenerator *> mWaveGens; // list of active wave generators.
+};
+
+}
+; // namespace android
+
+#endif /*ANDROID_TONEGENERATOR_H_*/
diff --git a/media/libaudioclient/include/media/TrackPlayerBase.h b/media/libaudioclient/include/media/TrackPlayerBase.h
new file mode 100644
index 0000000..2d113c0
--- /dev/null
+++ b/media/libaudioclient/include/media/TrackPlayerBase.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ANDROID_TRACK_PLAYER_BASE_H__
+#define __ANDROID_TRACK_PLAYER_BASE_H__
+
+#include <media/AudioTrack.h>
+#include <media/PlayerBase.h>
+
+namespace android {
+
+class TrackPlayerBase : public PlayerBase
+{
+public:
+ explicit TrackPlayerBase();
+ virtual ~TrackPlayerBase();
+
+ void init(AudioTrack* pat, player_type_t playerType, audio_usage_t usage);
+ virtual void destroy();
+
+ //IPlayer implementation
+ virtual void applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation);
+
+ //FIXME move to protected field, so far made public to minimize changes to AudioTrack logic
+ sp<AudioTrack> mAudioTrack;
+
+ void setPlayerVolume(float vl, float vr);
+
+protected:
+
+ //PlayerBase virtuals
+ virtual status_t playerStart();
+ virtual status_t playerPause();
+ virtual status_t playerStop();
+ virtual status_t playerSetVolume();
+
+private:
+ void doDestroy();
+ status_t doSetVolume();
+
+ // volume coming from the player volume API
+ float mPlayerVolumeL, mPlayerVolumeR;
+};
+
+} // namespace android
+
+#endif /* __ANDROID_TRACK_PLAYER_BASE_H__ */
diff --git a/media/libaudiohal/Android.mk b/media/libaudiohal/Android.mk
new file mode 100644
index 0000000..e592169
--- /dev/null
+++ b/media/libaudiohal/Android.mk
@@ -0,0 +1,70 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ liblog \
+ libutils \
+ libhardware
+
+LOCAL_SRC_FILES := \
+ DeviceHalLocal.cpp \
+ DevicesFactoryHalHybrid.cpp \
+ DevicesFactoryHalLocal.cpp \
+ StreamHalLocal.cpp
+
+LOCAL_CFLAGS := -Wall -Werror
+
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL), true)
+
+# Use audiohal directly w/o hwbinder middleware.
+# This is for performance comparison and debugging only.
+
+LOCAL_SRC_FILES += \
+ EffectBufferHalLocal.cpp \
+ EffectsFactoryHalLocal.cpp \
+ EffectHalLocal.cpp
+
+LOCAL_SHARED_LIBRARIES += \
+ libeffects
+
+LOCAL_CFLAGS += -DUSE_LEGACY_LOCAL_AUDIO_HAL
+
+else # if !USE_LEGACY_LOCAL_AUDIO_HAL
+
+LOCAL_SRC_FILES += \
+ ConversionHelperHidl.cpp \
+ HalDeathHandlerHidl.cpp \
+ DeviceHalHidl.cpp \
+ DevicesFactoryHalHidl.cpp \
+ EffectBufferHalHidl.cpp \
+ EffectHalHidl.cpp \
+ EffectsFactoryHalHidl.cpp \
+ StreamHalHidl.cpp
+
+LOCAL_SHARED_LIBRARIES += \
+ libbase \
+ libfmq \
+ libhwbinder \
+ libhidlbase \
+ libhidlmemory \
+ libhidltransport \
+ android.hardware.audio@2.0 \
+ android.hardware.audio.common@2.0 \
+ android.hardware.audio.common@2.0-util \
+ android.hardware.audio.effect@2.0 \
+ android.hidl.allocator@1.0 \
+ android.hidl.memory@1.0 \
+ libmedia_helper \
+ libmediautils
+
+endif # USE_LEGACY_LOCAL_AUDIO_HAL
+
+LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
+
+LOCAL_MODULE := libaudiohal
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaudiohal/ConversionHelperHidl.cpp b/media/libaudiohal/ConversionHelperHidl.cpp
new file mode 100644
index 0000000..f60bf8b
--- /dev/null
+++ b/media/libaudiohal/ConversionHelperHidl.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+
+#define LOG_TAG "HalHidl"
+#include <media/AudioParameter.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V2_0::Result;
+
+namespace android {
+
+// static
+status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
+ AudioParameter halKeys(keys);
+ if (halKeys.size() == 0) return BAD_VALUE;
+ hidlKeys->resize(halKeys.size());
+ //FIXME: keyStreamSupportedChannels and keyStreamSupportedSamplingRates come with a
+ // "keyFormat=<value>" pair. We need to transform it into a single key string so that it is
+ // carried over to the legacy HAL via HIDL.
+ String8 value;
+ bool keepFormatValue = halKeys.size() == 2 &&
+ (halKeys.get(String8(AudioParameter::keyStreamSupportedChannels), value) == NO_ERROR ||
+ halKeys.get(String8(AudioParameter::keyStreamSupportedSamplingRates), value) == NO_ERROR);
+
+ for (size_t i = 0; i < halKeys.size(); ++i) {
+ String8 key;
+ status_t status = halKeys.getAt(i, key);
+ if (status != OK) return status;
+ if (keepFormatValue && key == AudioParameter::keyFormat) {
+ AudioParameter formatParam;
+ halKeys.getAt(i, key, value);
+ formatParam.add(key, value);
+ key = formatParam.toString();
+ }
+ (*hidlKeys)[i] = key.string();
+ }
+ return OK;
+}
+
+// static
+status_t ConversionHelperHidl::parametersFromHal(
+ const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams) {
+ AudioParameter params(kvPairs);
+ if (params.size() == 0) return BAD_VALUE;
+ hidlParams->resize(params.size());
+ for (size_t i = 0; i < params.size(); ++i) {
+ String8 key, value;
+ status_t status = params.getAt(i, key, value);
+ if (status != OK) return status;
+ (*hidlParams)[i].key = key.string();
+ (*hidlParams)[i].value = value.string();
+ }
+ return OK;
+}
+
+// static
+void ConversionHelperHidl::parametersToHal(
+ const hidl_vec<ParameterValue>& parameters, String8 *values) {
+ AudioParameter params;
+ for (size_t i = 0; i < parameters.size(); ++i) {
+ params.add(String8(parameters[i].key.c_str()), String8(parameters[i].value.c_str()));
+ }
+ values->setTo(params.toString());
+}
+
+ConversionHelperHidl::ConversionHelperHidl(const char* className)
+ : mClassName(className) {
+}
+
+// static
+status_t ConversionHelperHidl::analyzeResult(const Result& result) {
+ switch (result) {
+ case Result::OK: return OK;
+ case Result::INVALID_ARGUMENTS: return BAD_VALUE;
+ case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
+ case Result::NOT_INITIALIZED: return NO_INIT;
+ case Result::NOT_SUPPORTED: return INVALID_OPERATION;
+ default: return NO_INIT;
+ }
+}
+
+void ConversionHelperHidl::emitError(const char* funcName, const char* description) {
+ ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/ConversionHelperHidl.h b/media/libaudiohal/ConversionHelperHidl.h
new file mode 100644
index 0000000..c356f37
--- /dev/null
+++ b/media/libaudiohal/ConversionHelperHidl.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
+#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
+
+#include <android/hardware/audio/2.0/types.h>
+#include <hidl/HidlSupport.h>
+#include <utils/String8.h>
+
+using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::Return;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+
+namespace android {
+
+class ConversionHelperHidl {
+ protected:
+ static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
+ static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
+ static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+
+ ConversionHelperHidl(const char* className);
+
+ template<typename R, typename T>
+ status_t processReturn(const char* funcName, const Return<R>& ret, T *retval) {
+ if (ret.isOk()) {
+ // This way it also works for enum class to unscoped enum conversion.
+ *retval = static_cast<T>(static_cast<R>(ret));
+ return OK;
+ }
+ return processReturn(funcName, ret);
+ }
+
+ template<typename T>
+ status_t processReturn(const char* funcName, const Return<T>& ret) {
+ if (!ret.isOk()) {
+ emitError(funcName, ret.description().c_str());
+ }
+ return ret.isOk() ? OK : FAILED_TRANSACTION;
+ }
+
+ status_t processReturn(const char* funcName, const Return<hardware::audio::V2_0::Result>& ret) {
+ if (!ret.isOk()) {
+ emitError(funcName, ret.description().c_str());
+ }
+ return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+ }
+
+ template<typename T>
+ status_t processReturn(
+ const char* funcName, const Return<T>& ret, hardware::audio::V2_0::Result retval) {
+ if (!ret.isOk()) {
+ emitError(funcName, ret.description().c_str());
+ }
+ return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+ }
+
+ private:
+ const char* mClassName;
+
+ static status_t analyzeResult(const hardware::audio::V2_0::Result& result);
+
+ void emitError(const char* funcName, const char* description);
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
diff --git a/media/libaudiohal/DeviceHalHidl.cpp b/media/libaudiohal/DeviceHalHidl.cpp
new file mode 100644
index 0000000..71fbd98
--- /dev/null
+++ b/media/libaudiohal/DeviceHalHidl.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#define LOG_TAG "DeviceHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IPrimaryDevice.h>
+#include <cutils/native_handle.h>
+#include <hwbinder/IPCThreadState.h>
+#include <utils/Log.h>
+
+#include "DeviceHalHidl.h"
+#include "HidlUtils.h"
+#include "StreamHalHidl.h"
+
+using ::android::hardware::audio::common::V2_0::AudioConfig;
+using ::android::hardware::audio::common::V2_0::AudioDevice;
+using ::android::hardware::audio::common::V2_0::AudioInputFlag;
+using ::android::hardware::audio::common::V2_0::AudioOutputFlag;
+using ::android::hardware::audio::common::V2_0::AudioPatchHandle;
+using ::android::hardware::audio::common::V2_0::AudioPort;
+using ::android::hardware::audio::common::V2_0::AudioPortConfig;
+using ::android::hardware::audio::common::V2_0::AudioMode;
+using ::android::hardware::audio::common::V2_0::AudioSource;
+using ::android::hardware::audio::V2_0::DeviceAddress;
+using ::android::hardware::audio::V2_0::IPrimaryDevice;
+using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+
+namespace android {
+
+namespace {
+
+status_t deviceAddressFromHal(
+ audio_devices_t device, const char* halAddress, DeviceAddress* address) {
+ address->device = AudioDevice(device);
+
+ if (address == nullptr || strnlen(halAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+ return OK;
+ }
+ const bool isInput = (device & AUDIO_DEVICE_BIT_IN) != 0;
+ if (isInput) device &= ~AUDIO_DEVICE_BIT_IN;
+ if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_A2DP) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) != 0)) {
+ int status = sscanf(halAddress,
+ "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX",
+ &address->address.mac[0], &address->address.mac[1], &address->address.mac[2],
+ &address->address.mac[3], &address->address.mac[4], &address->address.mac[5]);
+ return status == 6 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_IP) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_IP) != 0)) {
+ int status = sscanf(halAddress,
+ "%hhu.%hhu.%hhu.%hhu",
+ &address->address.ipv4[0], &address->address.ipv4[1],
+ &address->address.ipv4[2], &address->address.ipv4[3]);
+ return status == 4 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_ALL_USB)) != 0
+ || (isInput && (device & AUDIO_DEVICE_IN_ALL_USB)) != 0) {
+ int status = sscanf(halAddress,
+ "card=%d;device=%d",
+ &address->address.alsa.card, &address->address.alsa.device);
+ return status == 2 ? OK : BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_BUS) != 0)
+ || (isInput && (device & AUDIO_DEVICE_IN_BUS) != 0)) {
+ if (halAddress != NULL) {
+ address->busAddress = halAddress;
+ return OK;
+ }
+ return BAD_VALUE;
+ } else if ((!isInput && (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) != 0
+ || (isInput && (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) != 0)) {
+ if (halAddress != NULL) {
+ address->rSubmixAddress = halAddress;
+ return OK;
+ }
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+} // namespace
+
+DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
+ : ConversionHelperHidl("Device"), mDevice(device) {
+}
+
+DeviceHalHidl::~DeviceHalHidl() {
+ if (mDevice != 0) {
+ mDevice.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+}
+
+status_t DeviceHalHidl::getSupportedDevices(uint32_t*) {
+ // Obsolete.
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalHidl::initCheck() {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("initCheck", mDevice->initCheck());
+}
+
+status_t DeviceHalHidl::setVoiceVolume(float volume) {
+ if (mDevice == 0) return NO_INIT;
+ sp<IPrimaryDevice> primaryDev = IPrimaryDevice::castFrom(mDevice);
+ if (primaryDev == 0) return INVALID_OPERATION;
+ return processReturn("setVoiceVolume", primaryDev->setVoiceVolume(volume));
+}
+
+status_t DeviceHalHidl::setMasterVolume(float volume) {
+ if (mDevice == 0) return NO_INIT;
+ sp<IPrimaryDevice> primaryDev = IPrimaryDevice::castFrom(mDevice);
+ if (primaryDev == 0) return INVALID_OPERATION;
+ return processReturn("setMasterVolume", primaryDev->setMasterVolume(volume));
+}
+
+status_t DeviceHalHidl::getMasterVolume(float *volume) {
+ if (mDevice == 0) return NO_INIT;
+ sp<IPrimaryDevice> primaryDev = IPrimaryDevice::castFrom(mDevice);
+ if (primaryDev == 0) return INVALID_OPERATION;
+ Result retval;
+ Return<void> ret = primaryDev->getMasterVolume(
+ [&](Result r, float v) {
+ retval = r;
+ if (retval == Result::OK) {
+ *volume = v;
+ }
+ });
+ return processReturn("getMasterVolume", ret, retval);
+}
+
+status_t DeviceHalHidl::setMode(audio_mode_t mode) {
+ if (mDevice == 0) return NO_INIT;
+ sp<IPrimaryDevice> primaryDev = IPrimaryDevice::castFrom(mDevice);
+ if (primaryDev == 0) return INVALID_OPERATION;
+ return processReturn("setMode", primaryDev->setMode(AudioMode(mode)));
+}
+
+status_t DeviceHalHidl::setMicMute(bool state) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("setMicMute", mDevice->setMicMute(state));
+}
+
+status_t DeviceHalHidl::getMicMute(bool *state) {
+ if (mDevice == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mDevice->getMicMute(
+ [&](Result r, bool mute) {
+ retval = r;
+ if (retval == Result::OK) {
+ *state = mute;
+ }
+ });
+ return processReturn("getMicMute", ret, retval);
+}
+
+status_t DeviceHalHidl::setMasterMute(bool state) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("setMasterMute", mDevice->setMasterMute(state));
+}
+
+status_t DeviceHalHidl::getMasterMute(bool *state) {
+ if (mDevice == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mDevice->getMasterMute(
+ [&](Result r, bool mute) {
+ retval = r;
+ if (retval == Result::OK) {
+ *state = mute;
+ }
+ });
+ return processReturn("getMasterMute", ret, retval);
+}
+
+status_t DeviceHalHidl::setParameters(const String8& kvPairs) {
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<ParameterValue> hidlParams;
+ status_t status = parametersFromHal(kvPairs, &hidlParams);
+ if (status != OK) return status;
+ return processReturn("setParameters", mDevice->setParameters(hidlParams));
+}
+
+status_t DeviceHalHidl::getParameters(const String8& keys, String8 *values) {
+ values->clear();
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<hidl_string> hidlKeys;
+ status_t status = keysFromHal(keys, &hidlKeys);
+ if (status != OK) return status;
+ Result retval;
+ Return<void> ret = mDevice->getParameters(
+ hidlKeys,
+ [&](Result r, const hidl_vec<ParameterValue>& parameters) {
+ retval = r;
+ if (retval == Result::OK) {
+ parametersToHal(parameters, values);
+ }
+ });
+ return processReturn("getParameters", ret, retval);
+}
+
+status_t DeviceHalHidl::getInputBufferSize(
+ const struct audio_config *config, size_t *size) {
+ if (mDevice == 0) return NO_INIT;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval;
+ Return<void> ret = mDevice->getInputBufferSize(
+ hidlConfig,
+ [&](Result r, uint64_t bufferSize) {
+ retval = r;
+ if (retval == Result::OK) {
+ *size = static_cast<size_t>(bufferSize);
+ }
+ });
+ return processReturn("getInputBufferSize", ret, retval);
+}
+
+status_t DeviceHalHidl::openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream) {
+ if (mDevice == 0) return NO_INIT;
+ DeviceAddress hidlDevice;
+ status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+ if (status != OK) return status;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mDevice->openOutputStream(
+ handle,
+ hidlDevice,
+ hidlConfig,
+ AudioOutputFlag(flags),
+ [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
+ retval = r;
+ if (retval == Result::OK) {
+ *outStream = new StreamOutHalHidl(result);
+ }
+ HidlUtils::audioConfigToHal(suggestedConfig, config);
+ });
+ return processReturn("openOutputStream", ret, retval);
+}
+
+status_t DeviceHalHidl::openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream) {
+ if (mDevice == 0) return NO_INIT;
+ DeviceAddress hidlDevice;
+ status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+ if (status != OK) return status;
+ AudioConfig hidlConfig;
+ HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mDevice->openInputStream(
+ handle,
+ hidlDevice,
+ hidlConfig,
+ AudioInputFlag(flags),
+ AudioSource(source),
+ [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
+ retval = r;
+ if (retval == Result::OK) {
+ *inStream = new StreamInHalHidl(result);
+ }
+ HidlUtils::audioConfigToHal(suggestedConfig, config);
+ });
+ return processReturn("openInputStream", ret, retval);
+}
+
+status_t DeviceHalHidl::supportsAudioPatches(bool *supportsPatches) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("supportsAudioPatches", mDevice->supportsAudioPatches(), supportsPatches);
+}
+
+status_t DeviceHalHidl::createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch) {
+ if (mDevice == 0) return NO_INIT;
+ hidl_vec<AudioPortConfig> hidlSources, hidlSinks;
+ HidlUtils::audioPortConfigsFromHal(num_sources, sources, &hidlSources);
+ HidlUtils::audioPortConfigsFromHal(num_sinks, sinks, &hidlSinks);
+ Result retval;
+ Return<void> ret = mDevice->createAudioPatch(
+ hidlSources, hidlSinks,
+ [&](Result r, AudioPatchHandle hidlPatch) {
+ retval = r;
+ if (retval == Result::OK) {
+ *patch = static_cast<audio_patch_handle_t>(hidlPatch);
+ }
+ });
+ return processReturn("createAudioPatch", ret, retval);
+}
+
+status_t DeviceHalHidl::releaseAudioPatch(audio_patch_handle_t patch) {
+ if (mDevice == 0) return NO_INIT;
+ return processReturn("releaseAudioPatch", mDevice->releaseAudioPatch(patch));
+}
+
+status_t DeviceHalHidl::getAudioPort(struct audio_port *port) {
+ if (mDevice == 0) return NO_INIT;
+ AudioPort hidlPort;
+ HidlUtils::audioPortFromHal(*port, &hidlPort);
+ Result retval;
+ Return<void> ret = mDevice->getAudioPort(
+ hidlPort,
+ [&](Result r, const AudioPort& p) {
+ retval = r;
+ if (retval == Result::OK) {
+ HidlUtils::audioPortToHal(p, port);
+ }
+ });
+ return processReturn("getAudioPort", ret, retval);
+}
+
+status_t DeviceHalHidl::setAudioPortConfig(const struct audio_port_config *config) {
+ if (mDevice == 0) return NO_INIT;
+ AudioPortConfig hidlConfig;
+ HidlUtils::audioPortConfigFromHal(*config, &hidlConfig);
+ return processReturn("setAudioPortConfig", mDevice->setAudioPortConfig(hidlConfig));
+}
+
+status_t DeviceHalHidl::dump(int fd) {
+ if (mDevice == 0) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mDevice->debugDump(hidlHandle);
+ native_handle_delete(hidlHandle);
+ return processReturn("dump", ret);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DeviceHalHidl.h b/media/libaudiohal/DeviceHalHidl.h
new file mode 100644
index 0000000..9da02a4
--- /dev/null
+++ b/media/libaudiohal/DeviceHalHidl.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
+#define ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <media/audiohal/DeviceHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevice;
+using ::android::hardware::Return;
+
+namespace android {
+
+class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+ virtual status_t getSupportedDevices(uint32_t *devices);
+
+ // Check to see if the audio hardware interface has been initialized.
+ virtual status_t initCheck();
+
+ // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+ virtual status_t setVoiceVolume(float volume);
+
+ // Set the audio volume for all audio activities other than voice call.
+ virtual status_t setMasterVolume(float volume);
+
+ // Get the current master volume value for the HAL.
+ virtual status_t getMasterVolume(float *volume);
+
+ // Called when the audio mode changes.
+ virtual status_t setMode(audio_mode_t mode);
+
+ // Muting control.
+ virtual status_t setMicMute(bool state);
+ virtual status_t getMicMute(bool *state);
+ virtual status_t setMasterMute(bool state);
+ virtual status_t getMasterMute(bool *state);
+
+ // Set global audio parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get global audio parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Returns audio input buffer size according to parameters passed.
+ virtual status_t getInputBufferSize(const struct audio_config *config,
+ size_t *size);
+
+ // Creates and opens the audio hardware output stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream);
+
+ // Creates and opens the audio hardware input stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream);
+
+ // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+ virtual status_t supportsAudioPatches(bool *supportsPatches);
+
+ // Creates an audio patch between several source and sink ports.
+ virtual status_t createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch);
+
+ // Releases an audio patch.
+ virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+ // Fills the list of supported attributes for a given audio port.
+ virtual status_t getAudioPort(struct audio_port *port);
+
+ // Set audio port configuration.
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+ virtual status_t dump(int fd);
+
+ private:
+ friend class DevicesFactoryHalHidl;
+ sp<IDevice> mDevice;
+
+ // Can not be constructed directly by clients.
+ explicit DeviceHalHidl(const sp<IDevice>& device);
+
+ // The destructor automatically closes the device.
+ virtual ~DeviceHalHidl();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/DeviceHalLocal.cpp b/media/libaudiohal/DeviceHalLocal.cpp
new file mode 100644
index 0000000..fc098f5
--- /dev/null
+++ b/media/libaudiohal/DeviceHalLocal.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DeviceHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "StreamHalLocal.h"
+
+namespace android {
+
+DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
+ : mDev(dev) {
+}
+
+DeviceHalLocal::~DeviceHalLocal() {
+ int status = audio_hw_device_close(mDev);
+ ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
+ mDev = 0;
+}
+
+status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
+ if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
+ *devices = mDev->get_supported_devices(mDev);
+ return OK;
+}
+
+status_t DeviceHalLocal::initCheck() {
+ return mDev->init_check(mDev);
+}
+
+status_t DeviceHalLocal::setVoiceVolume(float volume) {
+ return mDev->set_voice_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMasterVolume(float volume) {
+ if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
+ return mDev->set_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::getMasterVolume(float *volume) {
+ if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
+ return mDev->get_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMode(audio_mode_t mode) {
+ return mDev->set_mode(mDev, mode);
+}
+
+status_t DeviceHalLocal::setMicMute(bool state) {
+ return mDev->set_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMicMute(bool *state) {
+ return mDev->get_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setMasterMute(bool state) {
+ if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
+ return mDev->set_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMasterMute(bool *state) {
+ if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
+ return mDev->get_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
+ return mDev->set_parameters(mDev, kvPairs.string());
+}
+
+status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
+ char *halValues = mDev->get_parameters(mDev, keys.string());
+ if (halValues != NULL) {
+ values->setTo(halValues);
+ free(halValues);
+ } else {
+ values->clear();
+ }
+ return OK;
+}
+
+status_t DeviceHalLocal::getInputBufferSize(
+ const struct audio_config *config, size_t *size) {
+ *size = mDev->get_input_buffer_size(mDev, config);
+ return OK;
+}
+
+status_t DeviceHalLocal::openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream) {
+ audio_stream_out_t *halStream;
+ ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
+ "srate: %d format %#x channels %x address %s",
+ handle, devices, flags,
+ config->sample_rate, config->format, config->channel_mask,
+ address);
+ int openResut = mDev->open_output_stream(
+ mDev, handle, devices, flags, config, &halStream, address);
+ if (openResut == OK) {
+ *outStream = new StreamOutHalLocal(halStream, this);
+ }
+ ALOGV("open_output_stream status %d stream %p", openResut, halStream);
+ return openResut;
+}
+
+status_t DeviceHalLocal::openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream) {
+ audio_stream_in_t *halStream;
+ ALOGV("open_input_stream handle: %d devices: %x flags: %#x "
+ "srate: %d format %#x channels %x address %s source %d",
+ handle, devices, flags,
+ config->sample_rate, config->format, config->channel_mask,
+ address, source);
+ int openResult = mDev->open_input_stream(
+ mDev, handle, devices, config, &halStream, flags, address, source);
+ if (openResult == OK) {
+ *inStream = new StreamInHalLocal(halStream, this);
+ }
+ ALOGV("open_input_stream status %d stream %p", openResult, inStream);
+ return openResult;
+}
+
+status_t DeviceHalLocal::supportsAudioPatches(bool *supportsPatches) {
+ *supportsPatches = version() >= AUDIO_DEVICE_API_VERSION_3_0;
+ return OK;
+}
+
+status_t DeviceHalLocal::createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+ return mDev->create_audio_patch(
+ mDev, num_sources, sources, num_sinks, sinks, patch);
+ } else {
+ return INVALID_OPERATION;
+ }
+}
+
+status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0) {
+ return mDev->release_audio_patch(mDev, patch);
+ } else {
+ return INVALID_OPERATION;
+ }
+}
+
+status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
+ return mDev->get_audio_port(mDev, port);
+}
+
+status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
+ if (version() >= AUDIO_DEVICE_API_VERSION_3_0)
+ return mDev->set_audio_port_config(mDev, config);
+ else
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalLocal::dump(int fd) {
+ return mDev->dump(mDev, fd);
+}
+
+void DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
+ mDev->close_output_stream(mDev, stream_out);
+}
+
+void DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
+ mDev->close_input_stream(mDev, stream_in);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DeviceHalLocal.h b/media/libaudiohal/DeviceHalLocal.h
new file mode 100644
index 0000000..865f296
--- /dev/null
+++ b/media/libaudiohal/DeviceHalLocal.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
+#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
+
+#include <hardware/audio.h>
+#include <media/audiohal/DeviceHalInterface.h>
+
+namespace android {
+
+class DeviceHalLocal : public DeviceHalInterface
+{
+ public:
+ // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+ virtual status_t getSupportedDevices(uint32_t *devices);
+
+ // Check to see if the audio hardware interface has been initialized.
+ virtual status_t initCheck();
+
+ // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+ virtual status_t setVoiceVolume(float volume);
+
+ // Set the audio volume for all audio activities other than voice call.
+ virtual status_t setMasterVolume(float volume);
+
+ // Get the current master volume value for the HAL.
+ virtual status_t getMasterVolume(float *volume);
+
+ // Called when the audio mode changes.
+ virtual status_t setMode(audio_mode_t mode);
+
+ // Muting control.
+ virtual status_t setMicMute(bool state);
+ virtual status_t getMicMute(bool *state);
+ virtual status_t setMasterMute(bool state);
+ virtual status_t getMasterMute(bool *state);
+
+ // Set global audio parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get global audio parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Returns audio input buffer size according to parameters passed.
+ virtual status_t getInputBufferSize(const struct audio_config *config,
+ size_t *size);
+
+ // Creates and opens the audio hardware output stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream);
+
+ // Creates and opens the audio hardware input stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream);
+
+ // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+ virtual status_t supportsAudioPatches(bool *supportsPatches);
+
+ // Creates an audio patch between several source and sink ports.
+ virtual status_t createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch);
+
+ // Releases an audio patch.
+ virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+ // Fills the list of supported attributes for a given audio port.
+ virtual status_t getAudioPort(struct audio_port *port);
+
+ // Set audio port configuration.
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+ virtual status_t dump(int fd);
+
+ void closeOutputStream(struct audio_stream_out *stream_out);
+ void closeInputStream(struct audio_stream_in *stream_in);
+
+ private:
+ audio_hw_device_t *mDev;
+
+ friend class DevicesFactoryHalLocal;
+
+ // Can not be constructed directly by clients.
+ explicit DeviceHalLocal(audio_hw_device_t *dev);
+
+ // The destructor automatically closes the device.
+ virtual ~DeviceHalLocal();
+
+ uint32_t version() const { return mDev->common.version; }
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.cpp b/media/libaudiohal/DevicesFactoryHalHidl.cpp
new file mode 100644
index 0000000..31da263
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalHidl.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string.h>
+
+#define LOG_TAG "DevicesFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IDevice.h>
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "DeviceHalHidl.h"
+#include "DevicesFactoryHalHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevice;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::Return;
+
+namespace android {
+
+DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
+ mDevicesFactory = IDevicesFactory::getService();
+ if (mDevicesFactory != 0) {
+ // It is assumed that DevicesFactory is owned by AudioFlinger
+ // and thus have the same lifespan.
+ mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+ } else {
+ ALOGE("Failed to obtain IDevicesFactory service, terminating process.");
+ exit(1);
+ }
+}
+
+DevicesFactoryHalHidl::~DevicesFactoryHalHidl() {
+}
+
+// static
+status_t DevicesFactoryHalHidl::nameFromHal(const char *name, IDevicesFactory::Device *device) {
+ if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
+ *device = IDevicesFactory::Device::PRIMARY;
+ return OK;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
+ *device = IDevicesFactory::Device::A2DP;
+ return OK;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
+ *device = IDevicesFactory::Device::USB;
+ return OK;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
+ *device = IDevicesFactory::Device::R_SUBMIX;
+ return OK;
+ } else if(strcmp(name, AUDIO_HARDWARE_MODULE_ID_STUB) == 0) {
+ *device = IDevicesFactory::Device::STUB;
+ return OK;
+ }
+ ALOGE("Invalid device name %s", name);
+ return BAD_VALUE;
+}
+
+status_t DevicesFactoryHalHidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (mDevicesFactory == 0) return NO_INIT;
+ IDevicesFactory::Device hidlDevice;
+ status_t status = nameFromHal(name, &hidlDevice);
+ if (status != OK) return status;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mDevicesFactory->openDevice(
+ hidlDevice,
+ [&](Result r, const sp<IDevice>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ *device = new DeviceHalHidl(result);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return BAD_VALUE;
+ else return NO_INIT;
+ }
+ return FAILED_TRANSACTION;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DevicesFactoryHalHidl.h b/media/libaudiohal/DevicesFactoryHalHidl.h
new file mode 100644
index 0000000..e2f1ad1
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalHidl.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
+
+#include <android/hardware/audio/2.0/IDevicesFactory.h>
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalHidl.h"
+
+using ::android::hardware::audio::V2_0::IDevicesFactory;
+
+namespace android {
+
+class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
+{
+ public:
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+ private:
+ friend class DevicesFactoryHalHybrid;
+
+ sp<IDevicesFactory> mDevicesFactory;
+
+ static status_t nameFromHal(const char *name, IDevicesFactory::Device *device);
+
+ // Can not be constructed directly by clients.
+ DevicesFactoryHalHidl();
+
+ virtual ~DevicesFactoryHalHidl();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/DevicesFactoryHalHybrid.cpp
new file mode 100644
index 0000000..454b03b
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalHybrid.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DevicesFactoryHalHybrid"
+//#define LOG_NDEBUG 0
+
+#include "DevicesFactoryHalHybrid.h"
+#include "DevicesFactoryHalLocal.h"
+#ifndef USE_LEGACY_LOCAL_AUDIO_HAL
+#include "DevicesFactoryHalHidl.h"
+#endif
+
+namespace android {
+
+// static
+sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
+ return new DevicesFactoryHalHybrid();
+}
+
+DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
+ : mLocalFactory(new DevicesFactoryHalLocal()),
+ mHidlFactory(
+#ifdef USE_LEGACY_LOCAL_AUDIO_HAL
+ nullptr
+#else
+ new DevicesFactoryHalHidl()
+#endif
+ ) {
+}
+
+DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
+}
+
+status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0) {
+ return mHidlFactory->openDevice(name, device);
+ }
+ return mLocalFactory->openDevice(name, device);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DevicesFactoryHalHybrid.h b/media/libaudiohal/DevicesFactoryHalHybrid.h
new file mode 100644
index 0000000..abd57d6
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalHybrid.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
+
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
+{
+ public:
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+ private:
+ friend class DevicesFactoryHalInterface;
+
+ // Can not be constructed directly by clients.
+ DevicesFactoryHalHybrid();
+
+ virtual ~DevicesFactoryHalHybrid();
+
+ sp<DevicesFactoryHalInterface> mLocalFactory;
+ sp<DevicesFactoryHalInterface> mHidlFactory;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
diff --git a/media/libaudiohal/DevicesFactoryHalLocal.cpp b/media/libaudiohal/DevicesFactoryHalLocal.cpp
new file mode 100644
index 0000000..13a9acd
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalLocal.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DevicesFactoryHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <string.h>
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "DevicesFactoryHalLocal.h"
+
+namespace android {
+
+static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
+{
+ const hw_module_t *mod;
+ int rc;
+
+ rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
+ if (rc) {
+ ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
+ AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+ goto out;
+ }
+ rc = audio_hw_device_open(mod, dev);
+ if (rc) {
+ ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
+ AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+ goto out;
+ }
+ if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
+ ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
+ rc = BAD_VALUE;
+ audio_hw_device_close(*dev);
+ goto out;
+ }
+ return OK;
+
+out:
+ *dev = NULL;
+ return rc;
+}
+
+status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ audio_hw_device_t *dev;
+ status_t rc = load_audio_interface(name, &dev);
+ if (rc == OK) {
+ *device = new DeviceHalLocal(dev);
+ }
+ return rc;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/DevicesFactoryHalLocal.h b/media/libaudiohal/DevicesFactoryHalLocal.h
new file mode 100644
index 0000000..b9d18ab
--- /dev/null
+++ b/media/libaudiohal/DevicesFactoryHalLocal.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
+
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalLocal.h"
+
+namespace android {
+
+class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
+{
+ public:
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+ private:
+ friend class DevicesFactoryHalHybrid;
+
+ // Can not be constructed directly by clients.
+ DevicesFactoryHalLocal() {}
+
+ virtual ~DevicesFactoryHalLocal() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectBufferHalHidl.cpp b/media/libaudiohal/EffectBufferHalHidl.cpp
new file mode 100644
index 0000000..8b5201b
--- /dev/null
+++ b/media/libaudiohal/EffectBufferHalHidl.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+
+#define LOG_TAG "EffectBufferHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <hidlmemory/mapping.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+
+using ::android::hardware::Return;
+using ::android::hidl::allocator::V1_0::IAllocator;
+
+namespace android {
+
+// static
+uint64_t EffectBufferHalHidl::makeUniqueId() {
+ static std::atomic<uint64_t> counter{1};
+ return counter++;
+}
+
+// static
+status_t EffectBufferHalInterface::allocate(
+ size_t size, sp<EffectBufferHalInterface>* buffer) {
+ return mirror(nullptr, size, buffer);
+}
+
+// static
+status_t EffectBufferHalInterface::mirror(
+ void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
+ sp<EffectBufferHalInterface> tempBuffer = new EffectBufferHalHidl(size);
+ status_t result = static_cast<EffectBufferHalHidl*>(tempBuffer.get())->init();
+ if (result == OK) {
+ tempBuffer->setExternalData(external);
+ *buffer = tempBuffer;
+ }
+ return result;
+}
+
+EffectBufferHalHidl::EffectBufferHalHidl(size_t size)
+ : mBufferSize(size), mFrameCountChanged(false),
+ mExternalData(nullptr), mAudioBuffer{0, {nullptr}} {
+ mHidlBuffer.id = makeUniqueId();
+ mHidlBuffer.frameCount = 0;
+}
+
+EffectBufferHalHidl::~EffectBufferHalHidl() {
+}
+
+status_t EffectBufferHalHidl::init() {
+ sp<IAllocator> ashmem = IAllocator::getService("ashmem");
+ if (ashmem == 0) {
+ ALOGE("Failed to retrieve ashmem allocator service");
+ return NO_INIT;
+ }
+ status_t retval = NO_MEMORY;
+ Return<void> result = ashmem->allocate(
+ mBufferSize,
+ [&](bool success, const hidl_memory& memory) {
+ if (success) {
+ mHidlBuffer.data = memory;
+ retval = OK;
+ }
+ });
+ if (result.isOk() && retval == OK) {
+ mMemory = hardware::mapMemory(mHidlBuffer.data);
+ if (mMemory != 0) {
+ mMemory->update();
+ mAudioBuffer.raw = static_cast<void*>(mMemory->getPointer());
+ memset(mAudioBuffer.raw, 0, mMemory->getSize());
+ mMemory->commit();
+ } else {
+ ALOGE("Failed to map allocated ashmem");
+ retval = NO_MEMORY;
+ }
+ } else {
+ ALOGE("Failed to allocate %d bytes from ashmem", (int)mBufferSize);
+ }
+ return result.isOk() ? retval : FAILED_TRANSACTION;
+}
+
+audio_buffer_t* EffectBufferHalHidl::audioBuffer() {
+ return &mAudioBuffer;
+}
+
+void* EffectBufferHalHidl::externalData() const {
+ return mExternalData;
+}
+
+void EffectBufferHalHidl::setFrameCount(size_t frameCount) {
+ mHidlBuffer.frameCount = frameCount;
+ mAudioBuffer.frameCount = frameCount;
+ mFrameCountChanged = true;
+}
+
+bool EffectBufferHalHidl::checkFrameCountChange() {
+ bool result = mFrameCountChanged;
+ mFrameCountChanged = false;
+ return result;
+}
+
+void EffectBufferHalHidl::setExternalData(void* external) {
+ mExternalData = external;
+}
+
+void EffectBufferHalHidl::update() {
+ update(mBufferSize);
+}
+
+void EffectBufferHalHidl::commit() {
+ commit(mBufferSize);
+}
+
+void EffectBufferHalHidl::update(size_t size) {
+ if (mExternalData == nullptr) return;
+ mMemory->update();
+ if (size > mBufferSize) size = mBufferSize;
+ memcpy(mAudioBuffer.raw, mExternalData, size);
+ mMemory->commit();
+}
+
+void EffectBufferHalHidl::commit(size_t size) {
+ if (mExternalData == nullptr) return;
+ if (size > mBufferSize) size = mBufferSize;
+ memcpy(mExternalData, mAudioBuffer.raw, size);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectBufferHalHidl.h b/media/libaudiohal/EffectBufferHalHidl.h
new file mode 100644
index 0000000..66a81c2
--- /dev/null
+++ b/media/libaudiohal/EffectBufferHalHidl.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
+#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
+
+#include <android/hardware/audio/effect/2.0/types.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidl/HidlSupport.h>
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+
+using android::hardware::audio::effect::V2_0::AudioBuffer;
+using android::hardware::hidl_memory;
+using android::hidl::memory::V1_0::IMemory;
+
+namespace android {
+
+class EffectBufferHalHidl : public EffectBufferHalInterface
+{
+ public:
+ virtual audio_buffer_t* audioBuffer();
+ virtual void* externalData() const;
+
+ virtual void setExternalData(void* external);
+ virtual void setFrameCount(size_t frameCount);
+ virtual bool checkFrameCountChange();
+
+ virtual void update();
+ virtual void commit();
+ virtual void update(size_t size);
+ virtual void commit(size_t size);
+
+ const AudioBuffer& hidlBuffer() const { return mHidlBuffer; }
+
+ private:
+ friend class EffectBufferHalInterface;
+
+ static uint64_t makeUniqueId();
+
+ const size_t mBufferSize;
+ bool mFrameCountChanged;
+ void* mExternalData;
+ AudioBuffer mHidlBuffer;
+ sp<IMemory> mMemory;
+ audio_buffer_t mAudioBuffer;
+
+ // Can not be constructed directly by clients.
+ explicit EffectBufferHalHidl(size_t size);
+
+ virtual ~EffectBufferHalHidl();
+
+ status_t init();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_HIDL_H
diff --git a/media/libaudiohal/EffectBufferHalLocal.cpp b/media/libaudiohal/EffectBufferHalLocal.cpp
new file mode 100644
index 0000000..7951c8e
--- /dev/null
+++ b/media/libaudiohal/EffectBufferHalLocal.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectBufferHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include "EffectBufferHalLocal.h"
+
+namespace android {
+
+// static
+status_t EffectBufferHalInterface::allocate(
+ size_t size, sp<EffectBufferHalInterface>* buffer) {
+ *buffer = new EffectBufferHalLocal(size);
+ return OK;
+}
+
+// static
+status_t EffectBufferHalInterface::mirror(
+ void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
+ *buffer = new EffectBufferHalLocal(external, size);
+ return OK;
+}
+
+EffectBufferHalLocal::EffectBufferHalLocal(size_t size)
+ : mOwnBuffer(new uint8_t[size]),
+ mBufferSize(size), mFrameCountChanged(false),
+ mAudioBuffer{0, {mOwnBuffer.get()}} {
+}
+
+EffectBufferHalLocal::EffectBufferHalLocal(void* external, size_t size)
+ : mOwnBuffer(nullptr),
+ mBufferSize(size), mFrameCountChanged(false),
+ mAudioBuffer{0, {external}} {
+}
+
+EffectBufferHalLocal::~EffectBufferHalLocal() {
+}
+
+audio_buffer_t* EffectBufferHalLocal::audioBuffer() {
+ return &mAudioBuffer;
+}
+
+void* EffectBufferHalLocal::externalData() const {
+ return mAudioBuffer.raw;
+}
+
+void EffectBufferHalLocal::setFrameCount(size_t frameCount) {
+ mAudioBuffer.frameCount = frameCount;
+ mFrameCountChanged = true;
+}
+
+void EffectBufferHalLocal::setExternalData(void* external) {
+ ALOGE_IF(mOwnBuffer != nullptr, "Attempt to set external data for allocated buffer");
+ mAudioBuffer.raw = external;
+}
+
+bool EffectBufferHalLocal::checkFrameCountChange() {
+ bool result = mFrameCountChanged;
+ mFrameCountChanged = false;
+ return result;
+}
+
+void EffectBufferHalLocal::update() {
+}
+
+void EffectBufferHalLocal::commit() {
+}
+
+void EffectBufferHalLocal::update(size_t) {
+}
+
+void EffectBufferHalLocal::commit(size_t) {
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectBufferHalLocal.h b/media/libaudiohal/EffectBufferHalLocal.h
new file mode 100644
index 0000000..d2b624b
--- /dev/null
+++ b/media/libaudiohal/EffectBufferHalLocal.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
+#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
+
+#include <memory>
+
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+
+namespace android {
+
+class EffectBufferHalLocal : public EffectBufferHalInterface
+{
+ public:
+ virtual audio_buffer_t* audioBuffer();
+ virtual void* externalData() const;
+
+ virtual void setExternalData(void* external);
+ virtual void setFrameCount(size_t frameCount);
+ virtual bool checkFrameCountChange();
+
+ virtual void update();
+ virtual void commit();
+ virtual void update(size_t size);
+ virtual void commit(size_t size);
+
+ private:
+ friend class EffectBufferHalInterface;
+
+ std::unique_ptr<uint8_t[]> mOwnBuffer;
+ const size_t mBufferSize;
+ bool mFrameCountChanged;
+ audio_buffer_t mAudioBuffer;
+
+ // Can not be constructed directly by clients.
+ explicit EffectBufferHalLocal(size_t size);
+ EffectBufferHalLocal(void* external, size_t size);
+
+ virtual ~EffectBufferHalLocal();
+
+ status_t init();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectHalHidl.cpp b/media/libaudiohal/EffectHalHidl.cpp
new file mode 100644
index 0000000..b49b975
--- /dev/null
+++ b/media/libaudiohal/EffectHalHidl.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <hwbinder/IPCThreadState.h>
+#include <media/EffectsFactoryApi.h>
+#include <utils/Log.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectBufferHalHidl.h"
+#include "EffectHalHidl.h"
+#include "HidlUtils.h"
+
+using ::android::hardware::audio::effect::V2_0::AudioBuffer;
+using ::android::hardware::audio::effect::V2_0::EffectBufferAccess;
+using ::android::hardware::audio::effect::V2_0::EffectConfigParameters;
+using ::android::hardware::audio::effect::V2_0::MessageQueueFlagBits;
+using ::android::hardware::audio::effect::V2_0::Result;
+using ::android::hardware::audio::common::V2_0::AudioChannelMask;
+using ::android::hardware::audio::common::V2_0::AudioFormat;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+
+namespace android {
+
+EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
+ : mEffect(effect), mEffectId(effectId), mBuffersChanged(true) {
+}
+
+EffectHalHidl::~EffectHalHidl() {
+ if (mEffect != 0) {
+ close();
+ mEffect.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+}
+
+// static
+void EffectHalHidl::effectDescriptorToHal(
+ const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
+ HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
+ HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
+ halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
+ halDescriptor->cpuLoad = descriptor.cpuLoad;
+ halDescriptor->memoryUsage = descriptor.memoryUsage;
+ memcpy(halDescriptor->name, descriptor.name.data(), descriptor.name.size());
+ memcpy(halDescriptor->implementor,
+ descriptor.implementor.data(), descriptor.implementor.size());
+}
+
+// TODO(mnaganov): These buffer conversion functions should be shared with Effect wrapper
+// via HidlUtils. Move them there when hardware/interfaces will get un-frozen again.
+
+// static
+void EffectHalHidl::effectBufferConfigFromHal(
+ const buffer_config_t& halConfig, EffectBufferConfig* config) {
+ config->samplingRateHz = halConfig.samplingRate;
+ config->channels = AudioChannelMask(halConfig.channels);
+ config->format = AudioFormat(halConfig.format);
+ config->accessMode = EffectBufferAccess(halConfig.accessMode);
+ config->mask = EffectConfigParameters(halConfig.mask);
+}
+
+// static
+void EffectHalHidl::effectBufferConfigToHal(
+ const EffectBufferConfig& config, buffer_config_t* halConfig) {
+ halConfig->buffer.frameCount = 0;
+ halConfig->buffer.raw = NULL;
+ halConfig->samplingRate = config.samplingRateHz;
+ halConfig->channels = static_cast<uint32_t>(config.channels);
+ halConfig->bufferProvider.cookie = NULL;
+ halConfig->bufferProvider.getBuffer = NULL;
+ halConfig->bufferProvider.releaseBuffer = NULL;
+ halConfig->format = static_cast<uint8_t>(config.format);
+ halConfig->accessMode = static_cast<uint8_t>(config.accessMode);
+ halConfig->mask = static_cast<uint8_t>(config.mask);
+}
+
+// static
+void EffectHalHidl::effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config) {
+ effectBufferConfigFromHal(halConfig.inputCfg, &config->inputCfg);
+ effectBufferConfigFromHal(halConfig.outputCfg, &config->outputCfg);
+}
+
+// static
+void EffectHalHidl::effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig) {
+ effectBufferConfigToHal(config.inputCfg, &halConfig->inputCfg);
+ effectBufferConfigToHal(config.outputCfg, &halConfig->outputCfg);
+}
+
+// static
+status_t EffectHalHidl::analyzeResult(const Result& result) {
+ switch (result) {
+ case Result::OK: return OK;
+ case Result::INVALID_ARGUMENTS: return BAD_VALUE;
+ case Result::INVALID_STATE: return NOT_ENOUGH_DATA;
+ case Result::NOT_INITIALIZED: return NO_INIT;
+ case Result::NOT_SUPPORTED: return INVALID_OPERATION;
+ case Result::RESULT_TOO_BIG: return NO_MEMORY;
+ default: return NO_INIT;
+ }
+}
+
+status_t EffectHalHidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (mInBuffer == 0 || buffer->audioBuffer() != mInBuffer->audioBuffer()) {
+ mBuffersChanged = true;
+ }
+ mInBuffer = buffer;
+ return OK;
+}
+
+status_t EffectHalHidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (mOutBuffer == 0 || buffer->audioBuffer() != mOutBuffer->audioBuffer()) {
+ mBuffersChanged = true;
+ }
+ mOutBuffer = buffer;
+ return OK;
+}
+
+status_t EffectHalHidl::process() {
+ return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
+}
+
+status_t EffectHalHidl::processReverse() {
+ return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
+}
+
+status_t EffectHalHidl::prepareForProcessing() {
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ Return<void> ret = mEffect->prepareForProcessing(
+ [&](Result r, const MQDescriptorSync<Result>& statusMQ) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempStatusMQ->isValid() && tempStatusMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempStatusMQ->getEventFlagWord(), &mEfGroup);
+ }
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+ }
+ if (!tempStatusMQ || !tempStatusMQ->isValid() || !mEfGroup) {
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for effects");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for effects is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
+ return NO_INIT;
+ }
+ mStatusMQ = std::move(tempStatusMQ);
+ return OK;
+}
+
+bool EffectHalHidl::needToResetBuffers() {
+ if (mBuffersChanged) return true;
+ bool inBufferFrameCountUpdated = mInBuffer->checkFrameCountChange();
+ bool outBufferFrameCountUpdated = mOutBuffer->checkFrameCountChange();
+ return inBufferFrameCountUpdated || outBufferFrameCountUpdated;
+}
+
+status_t EffectHalHidl::processImpl(uint32_t mqFlag) {
+ if (mEffect == 0 || mInBuffer == 0 || mOutBuffer == 0) return NO_INIT;
+ status_t status;
+ if (!mStatusMQ && (status = prepareForProcessing()) != OK) {
+ return status;
+ }
+ if (needToResetBuffers() && (status = setProcessBuffers()) != OK) {
+ return status;
+ }
+ // The data is already in the buffers, just need to flush it and wake up the server side.
+ std::atomic_thread_fence(std::memory_order_release);
+ mEfGroup->wake(mqFlag);
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(
+ static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::DONE_PROCESSING)) {
+ Result retval = Result::NOT_INITIALIZED;
+ mStatusMQ->read(&retval);
+ if (retval == Result::OK || retval == Result::INVALID_STATE) {
+ // Sync back the changed contents of the buffer.
+ std::atomic_thread_fence(std::memory_order_acquire);
+ }
+ return analyzeResult(retval);
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t EffectHalHidl::setProcessBuffers() {
+ Return<Result> ret = mEffect->setProcessBuffers(
+ static_cast<EffectBufferHalHidl*>(mInBuffer.get())->hidlBuffer(),
+ static_cast<EffectBufferHalHidl*>(mOutBuffer.get())->hidlBuffer());
+ if (ret.isOk() && ret == Result::OK) {
+ mBuffersChanged = false;
+ return OK;
+ }
+ return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData) {
+ if (mEffect == 0) return NO_INIT;
+
+ // Special cases.
+ if (cmdCode == EFFECT_CMD_SET_CONFIG || cmdCode == EFFECT_CMD_SET_CONFIG_REVERSE) {
+ return setConfigImpl(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ } else if (cmdCode == EFFECT_CMD_GET_CONFIG || cmdCode == EFFECT_CMD_GET_CONFIG_REVERSE) {
+ return getConfigImpl(cmdCode, replySize, pReplyData);
+ }
+
+ // Common case.
+ hidl_vec<uint8_t> hidlData;
+ if (pCmdData != nullptr && cmdSize > 0) {
+ hidlData.setToExternal(reinterpret_cast<uint8_t*>(pCmdData), cmdSize);
+ }
+ status_t status;
+ uint32_t replySizeStub = 0;
+ if (replySize == nullptr || pReplyData == nullptr) replySize = &replySizeStub;
+ Return<void> ret = mEffect->command(cmdCode, hidlData, *replySize,
+ [&](int32_t s, const hidl_vec<uint8_t>& result) {
+ status = s;
+ if (status == 0) {
+ if (*replySize > result.size()) *replySize = result.size();
+ if (pReplyData != nullptr && *replySize > 0) {
+ memcpy(pReplyData, &result[0], *replySize);
+ }
+ }
+ });
+ return ret.isOk() ? status : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::getDescriptor(effect_descriptor_t *pDescriptor) {
+ if (mEffect == 0) return NO_INIT;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffect->getDescriptor(
+ [&](Result r, const EffectDescriptor& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ effectDescriptorToHal(result, pDescriptor);
+ }
+ });
+ return ret.isOk() ? analyzeResult(retval) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::close() {
+ if (mEffect == 0) return NO_INIT;
+ Return<Result> ret = mEffect->close();
+ return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
+}
+
+status_t EffectHalHidl::getConfigImpl(
+ uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
+ if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
+ return BAD_VALUE;
+ }
+ status_t result = FAILED_TRANSACTION;
+ Return<void> ret;
+ if (cmdCode == EFFECT_CMD_GET_CONFIG) {
+ ret = mEffect->getConfig([&] (Result r, const EffectConfig &hidlConfig) {
+ result = analyzeResult(r);
+ if (r == Result::OK) {
+ effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+ }
+ });
+ } else {
+ ret = mEffect->getConfigReverse([&] (Result r, const EffectConfig &hidlConfig) {
+ result = analyzeResult(r);
+ if (r == Result::OK) {
+ effectConfigToHal(hidlConfig, static_cast<effect_config_t*>(pReplyData));
+ }
+ });
+ }
+ if (!ret.isOk()) {
+ result = FAILED_TRANSACTION;
+ }
+ return result;
+}
+
+status_t EffectHalHidl::setConfigImpl(
+ uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) {
+ if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) ||
+ replySize == NULL || *replySize != sizeof(int32_t) || pReplyData == NULL) {
+ return BAD_VALUE;
+ }
+ const effect_config_t *halConfig = static_cast<effect_config_t*>(pCmdData);
+ if (halConfig->inputCfg.bufferProvider.getBuffer != NULL ||
+ halConfig->inputCfg.bufferProvider.releaseBuffer != NULL ||
+ halConfig->outputCfg.bufferProvider.getBuffer != NULL ||
+ halConfig->outputCfg.bufferProvider.releaseBuffer != NULL) {
+ ALOGE("Buffer provider callbacks are not supported");
+ }
+ EffectConfig hidlConfig;
+ effectConfigFromHal(*halConfig, &hidlConfig);
+ Return<Result> ret = cmdCode == EFFECT_CMD_SET_CONFIG ?
+ mEffect->setConfig(hidlConfig, nullptr, nullptr) :
+ mEffect->setConfigReverse(hidlConfig, nullptr, nullptr);
+ status_t result = FAILED_TRANSACTION;
+ if (ret.isOk()) {
+ result = analyzeResult(ret);
+ *static_cast<int32_t*>(pReplyData) = result;
+ }
+ return result;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectHalHidl.h b/media/libaudiohal/EffectHalHidl.h
new file mode 100644
index 0000000..6ffdaf1
--- /dev/null
+++ b/media/libaudiohal/EffectHalHidl.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
+#define ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
+
+#include <android/hardware/audio/effect/2.0/IEffect.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <fmq/EventFlag.h>
+#include <fmq/MessageQueue.h>
+#include <system/audio_effect.h>
+
+using ::android::hardware::audio::effect::V2_0::EffectBufferConfig;
+using ::android::hardware::audio::effect::V2_0::EffectConfig;
+using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
+using ::android::hardware::audio::effect::V2_0::IEffect;
+using ::android::hardware::EventFlag;
+using ::android::hardware::MessageQueue;
+
+namespace android {
+
+class EffectHalHidl : public EffectHalInterface
+{
+ public:
+ // Set the input buffer.
+ virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+ // Set the output buffer.
+ virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+ // Effect process function.
+ virtual status_t process();
+
+ // Process reverse stream function. This function is used to pass
+ // a reference stream to the effect engine.
+ virtual status_t processReverse();
+
+ // Send a command and receive a response to/from effect engine.
+ virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData);
+
+ // Returns the effect descriptor.
+ virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
+
+ // Free resources on the remote side.
+ virtual status_t close();
+
+ // Whether it's a local implementation.
+ virtual bool isLocal() const { return false; }
+
+ uint64_t effectId() const { return mEffectId; }
+
+ static void effectDescriptorToHal(
+ const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor);
+
+ private:
+ friend class EffectsFactoryHalHidl;
+ typedef MessageQueue<
+ hardware::audio::effect::V2_0::Result, hardware::kSynchronizedReadWrite> StatusMQ;
+
+ sp<IEffect> mEffect;
+ const uint64_t mEffectId;
+ sp<EffectBufferHalInterface> mInBuffer;
+ sp<EffectBufferHalInterface> mOutBuffer;
+ bool mBuffersChanged;
+ std::unique_ptr<StatusMQ> mStatusMQ;
+ EventFlag* mEfGroup;
+
+ static status_t analyzeResult(const hardware::audio::effect::V2_0::Result& result);
+ static void effectBufferConfigFromHal(
+ const buffer_config_t& halConfig, EffectBufferConfig* config);
+ static void effectBufferConfigToHal(
+ const EffectBufferConfig& config, buffer_config_t* halConfig);
+ static void effectConfigFromHal(const effect_config_t& halConfig, EffectConfig* config);
+ static void effectConfigToHal(const EffectConfig& config, effect_config_t* halConfig);
+
+ // Can not be constructed directly by clients.
+ EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
+
+ // The destructor automatically releases the effect.
+ virtual ~EffectHalHidl();
+
+ status_t getConfigImpl(uint32_t cmdCode, uint32_t *replySize, void *pReplyData);
+ status_t prepareForProcessing();
+ bool needToResetBuffers();
+ status_t processImpl(uint32_t mqFlag);
+ status_t setConfigImpl(
+ uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData);
+ status_t setProcessBuffers();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_HIDL_H
diff --git a/media/libaudiohal/EffectHalLocal.cpp b/media/libaudiohal/EffectHalLocal.cpp
new file mode 100644
index 0000000..dd465c3
--- /dev/null
+++ b/media/libaudiohal/EffectHalLocal.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <media/EffectsFactoryApi.h>
+#include <utils/Log.h>
+
+#include "EffectHalLocal.h"
+
+namespace android {
+
+EffectHalLocal::EffectHalLocal(effect_handle_t handle)
+ : mHandle(handle) {
+}
+
+EffectHalLocal::~EffectHalLocal() {
+ int status = EffectRelease(mHandle);
+ ALOGW_IF(status, "Error releasing effect %p: %s", mHandle, strerror(-status));
+ mHandle = 0;
+}
+
+status_t EffectHalLocal::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ mInBuffer = buffer;
+ return OK;
+}
+
+status_t EffectHalLocal::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ mOutBuffer = buffer;
+ return OK;
+}
+
+status_t EffectHalLocal::process() {
+ if (mInBuffer == nullptr || mOutBuffer == nullptr) {
+ ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
+ ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
+ return NO_INIT;
+ }
+ return (*mHandle)->process(mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
+}
+
+status_t EffectHalLocal::processReverse() {
+ if ((*mHandle)->process_reverse != NULL) {
+ if (mInBuffer == nullptr || mOutBuffer == nullptr) {
+ ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
+ ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
+ return NO_INIT;
+ }
+ return (*mHandle)->process_reverse(
+ mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
+ } else {
+ return INVALID_OPERATION;
+ }
+}
+
+status_t EffectHalLocal::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData) {
+ return (*mHandle)->command(mHandle, cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+}
+
+status_t EffectHalLocal::getDescriptor(effect_descriptor_t *pDescriptor) {
+ return (*mHandle)->get_descriptor(mHandle, pDescriptor);
+}
+
+status_t EffectHalLocal::close() {
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectHalLocal.h b/media/libaudiohal/EffectHalLocal.h
new file mode 100644
index 0000000..693fb50
--- /dev/null
+++ b/media/libaudiohal/EffectHalLocal.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
+#define ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
+
+#include <hardware/audio_effect.h>
+#include <media/audiohal/EffectHalInterface.h>
+
+namespace android {
+
+class EffectHalLocal : public EffectHalInterface
+{
+ public:
+ // Set the input buffer.
+ virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+ // Set the output buffer.
+ virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
+
+ // Effect process function.
+ virtual status_t process();
+
+ // Process reverse stream function. This function is used to pass
+ // a reference stream to the effect engine.
+ virtual status_t processReverse();
+
+ // Send a command and receive a response to/from effect engine.
+ virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData);
+
+ // Returns the effect descriptor.
+ virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
+
+ // Free resources on the remote side.
+ virtual status_t close();
+
+ // Whether it's a local implementation.
+ virtual bool isLocal() const { return true; }
+
+ effect_handle_t handle() const { return mHandle; }
+
+ private:
+ effect_handle_t mHandle;
+ sp<EffectBufferHalInterface> mInBuffer;
+ sp<EffectBufferHalInterface> mOutBuffer;
+
+ friend class EffectsFactoryHalLocal;
+
+ // Can not be constructed directly by clients.
+ explicit EffectHalLocal(effect_handle_t handle);
+
+ // The destructor automatically releases the effect.
+ virtual ~EffectHalLocal();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.cpp b/media/libaudiohal/EffectsFactoryHalHidl.cpp
new file mode 100644
index 0000000..a8081b7
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalHidl.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectsFactoryHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <cutils/native_handle.h>
+
+#include "ConversionHelperHidl.h"
+#include "EffectHalHidl.h"
+#include "EffectsFactoryHalHidl.h"
+#include "HidlUtils.h"
+
+using ::android::hardware::audio::common::V2_0::Uuid;
+using ::android::hardware::audio::effect::V2_0::IEffect;
+using ::android::hardware::audio::effect::V2_0::Result;
+using ::android::hardware::Return;
+
+namespace android {
+
+// static
+sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
+ return new EffectsFactoryHalHidl();
+}
+
+// static
+bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
+ return memcmp(pEffectUuid, EFFECT_UUID_NULL, sizeof(effect_uuid_t)) == 0;
+}
+
+EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
+ mEffectsFactory = IEffectsFactory::getService();
+ if (mEffectsFactory == 0) {
+ ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
+ exit(1);
+ }
+}
+
+EffectsFactoryHalHidl::~EffectsFactoryHalHidl() {
+}
+
+status_t EffectsFactoryHalHidl::queryAllDescriptors() {
+ if (mEffectsFactory == 0) return NO_INIT;
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->getAllDescriptors(
+ [&](Result r, const hidl_vec<EffectDescriptor>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ mLastDescriptors = result;
+ }
+ });
+ if (ret.isOk()) {
+ return retval == Result::OK ? OK : NO_INIT;
+ }
+ mLastDescriptors.resize(0);
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
+ status_t queryResult = queryAllDescriptors();
+ if (queryResult == OK) {
+ *pNumEffects = mLastDescriptors.size();
+ }
+ return queryResult;
+}
+
+status_t EffectsFactoryHalHidl::getDescriptor(
+ uint32_t index, effect_descriptor_t *pDescriptor) {
+ // TODO: We need somehow to track the changes on the server side
+ // or figure out how to convert everybody to query all the descriptors at once.
+ // TODO: check for nullptr
+ if (mLastDescriptors.size() == 0) {
+ status_t queryResult = queryAllDescriptors();
+ if (queryResult != OK) return queryResult;
+ }
+ if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
+ EffectHalHidl::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
+ return OK;
+}
+
+status_t EffectsFactoryHalHidl::getDescriptor(
+ const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
+ // TODO: check for nullptr
+ if (mEffectsFactory == 0) return NO_INIT;
+ Uuid hidlUuid;
+ HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
+ [&](Result r, const EffectDescriptor& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ EffectHalHidl::effectDescriptorToHal(result, pDescriptor);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
+ else return NO_INIT;
+ }
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::createEffect(
+ const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect) {
+ if (mEffectsFactory == 0) return NO_INIT;
+ Uuid hidlUuid;
+ HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+ Result retval = Result::NOT_INITIALIZED;
+ Return<void> ret = mEffectsFactory->createEffect(
+ hidlUuid, sessionId, ioId,
+ [&](Result r, const sp<IEffect>& result, uint64_t effectId) {
+ retval = r;
+ if (retval == Result::OK) {
+ *effect = new EffectHalHidl(result, effectId);
+ }
+ });
+ if (ret.isOk()) {
+ if (retval == Result::OK) return OK;
+ else if (retval == Result::INVALID_ARGUMENTS) return NAME_NOT_FOUND;
+ else return NO_INIT;
+ }
+ return processReturn(__FUNCTION__, ret);
+}
+
+status_t EffectsFactoryHalHidl::dumpEffects(int fd) {
+ if (mEffectsFactory == 0) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mEffectsFactory->debugDump(hidlHandle);
+ native_handle_delete(hidlHandle);
+ return processReturn(__FUNCTION__, ret);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalHidl.h b/media/libaudiohal/EffectsFactoryHalHidl.h
new file mode 100644
index 0000000..e89f042
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalHidl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
+
+#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
+#include <android/hardware/audio/effect/2.0/types.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
+namespace android {
+
+using ::android::hardware::audio::effect::V2_0::EffectDescriptor;
+using ::android::hardware::audio::effect::V2_0::IEffectsFactory;
+using ::android::hardware::hidl_vec;
+
+class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Returns the number of different effects in all loaded libraries.
+ virtual status_t queryNumberEffects(uint32_t *pNumEffects);
+
+ // Returns a descriptor of the next available effect.
+ virtual status_t getDescriptor(uint32_t index,
+ effect_descriptor_t *pDescriptor);
+
+ virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+ effect_descriptor_t *pDescriptor);
+
+ // Creates an effect engine of the specified type.
+ // To release the effect engine, it is necessary to release references
+ // to the returned effect object.
+ virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+ int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect);
+
+ virtual status_t dumpEffects(int fd);
+
+ private:
+ friend class EffectsFactoryHalInterface;
+
+ sp<IEffectsFactory> mEffectsFactory;
+ hidl_vec<EffectDescriptor> mLastDescriptors;
+
+ // Can not be constructed directly by clients.
+ EffectsFactoryHalHidl();
+ virtual ~EffectsFactoryHalHidl();
+
+ status_t queryAllDescriptors();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/EffectsFactoryHalLocal.cpp b/media/libaudiohal/EffectsFactoryHalLocal.cpp
new file mode 100644
index 0000000..bbdef5d
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalLocal.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/EffectsFactoryApi.h>
+
+#include "EffectHalLocal.h"
+#include "EffectsFactoryHalLocal.h"
+
+namespace android {
+
+// static
+sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
+ return new EffectsFactoryHalLocal();
+}
+
+// static
+bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
+ return EffectIsNullUuid(pEffectUuid);
+}
+
+status_t EffectsFactoryHalLocal::queryNumberEffects(uint32_t *pNumEffects) {
+ return EffectQueryNumberEffects(pNumEffects);
+}
+
+status_t EffectsFactoryHalLocal::getDescriptor(
+ uint32_t index, effect_descriptor_t *pDescriptor) {
+ return EffectQueryEffect(index, pDescriptor);
+}
+
+status_t EffectsFactoryHalLocal::getDescriptor(
+ const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
+ return EffectGetDescriptor(pEffectUuid, pDescriptor);
+}
+
+status_t EffectsFactoryHalLocal::createEffect(
+ const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect) {
+ effect_handle_t handle;
+ int result = EffectCreate(pEffectUuid, sessionId, ioId, &handle);
+ if (result == 0) {
+ *effect = new EffectHalLocal(handle);
+ }
+ return result;
+}
+
+status_t EffectsFactoryHalLocal::dumpEffects(int fd) {
+ return EffectDumpEffects(fd);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalLocal.h b/media/libaudiohal/EffectsFactoryHalLocal.h
new file mode 100644
index 0000000..d5b81be
--- /dev/null
+++ b/media/libaudiohal/EffectsFactoryHalLocal.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
+
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
+namespace android {
+
+class EffectsFactoryHalLocal : public EffectsFactoryHalInterface
+{
+ public:
+ // Returns the number of different effects in all loaded libraries.
+ virtual status_t queryNumberEffects(uint32_t *pNumEffects);
+
+ // Returns a descriptor of the next available effect.
+ virtual status_t getDescriptor(uint32_t index,
+ effect_descriptor_t *pDescriptor);
+
+ virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+ effect_descriptor_t *pDescriptor);
+
+ // Creates an effect engine of the specified type.
+ // To release the effect engine, it is necessary to release references
+ // to the returned effect object.
+ virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+ int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect);
+
+ virtual status_t dumpEffects(int fd);
+
+ private:
+ friend class EffectsFactoryHalInterface;
+
+ // Can not be constructed directly by clients.
+ EffectsFactoryHalLocal() {}
+
+ virtual ~EffectsFactoryHalLocal() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/HalDeathHandlerHidl.cpp b/media/libaudiohal/HalDeathHandlerHidl.cpp
new file mode 100644
index 0000000..a742671
--- /dev/null
+++ b/media/libaudiohal/HalDeathHandlerHidl.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HalDeathHandler"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <media/audiohal/hidl/HalDeathHandler.h>
+
+namespace android {
+
+ANDROID_SINGLETON_STATIC_INSTANCE(HalDeathHandler);
+
+// static
+sp<HalDeathHandler> HalDeathHandler::getInstance() {
+ return &Singleton<HalDeathHandler>::getInstance();
+}
+
+HalDeathHandler::HalDeathHandler() : mSelf(this) {
+}
+
+HalDeathHandler::~HalDeathHandler() {
+}
+
+void HalDeathHandler::registerAtExitHandler(void* cookie, AtExitHandler handler) {
+ std::lock_guard<std::mutex> guard(mHandlersLock);
+ mHandlers.insert({cookie, handler});
+}
+
+void HalDeathHandler::unregisterAtExitHandler(void* cookie) {
+ std::lock_guard<std::mutex> guard(mHandlersLock);
+ mHandlers.erase(cookie);
+}
+
+void HalDeathHandler::serviceDied(uint64_t /*cookie*/, const wp<IBase>& /*who*/) {
+ // No matter which of the service objects has died,
+ // we need to run all the registered handlers and crash our process.
+ std::lock_guard<std::mutex> guard(mHandlersLock);
+ for (const auto& handler : mHandlers) {
+ handler.second();
+ }
+ LOG_ALWAYS_FATAL("HAL server crashed, need to restart");
+}
+
+} // namespace android
diff --git a/media/libaudiohal/StreamHalHidl.cpp b/media/libaudiohal/StreamHalHidl.cpp
new file mode 100644
index 0000000..42785d5
--- /dev/null
+++ b/media/libaudiohal/StreamHalHidl.cpp
@@ -0,0 +1,711 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "StreamHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <android/hardware/audio/2.0/IStreamOutCallback.h>
+#include <hwbinder/IPCThreadState.h>
+#include <mediautils/SchedulingPolicyService.h>
+#include <utils/Log.h>
+
+#include "DeviceHalHidl.h"
+#include "EffectHalHidl.h"
+#include "StreamHalHidl.h"
+
+using ::android::hardware::audio::common::V2_0::AudioChannelMask;
+using ::android::hardware::audio::common::V2_0::AudioFormat;
+using ::android::hardware::audio::common::V2_0::ThreadInfo;
+using ::android::hardware::audio::V2_0::AudioDrain;
+using ::android::hardware::audio::V2_0::IStreamOutCallback;
+using ::android::hardware::audio::V2_0::MessageQueueFlagBits;
+using ::android::hardware::audio::V2_0::MmapBufferInfo;
+using ::android::hardware::audio::V2_0::MmapPosition;
+using ::android::hardware::audio::V2_0::ParameterValue;
+using ::android::hardware::audio::V2_0::Result;
+using ::android::hardware::audio::V2_0::TimeSpec;
+using ::android::hardware::MQDescriptorSync;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ReadCommand = ::android::hardware::audio::V2_0::IStreamIn::ReadCommand;
+
+namespace android {
+
+StreamHalHidl::StreamHalHidl(IStream *stream)
+ : ConversionHelperHidl("Stream"),
+ mStream(stream),
+ mHalThreadPriority(HAL_THREAD_PRIORITY_DEFAULT) {
+}
+
+StreamHalHidl::~StreamHalHidl() {
+ mStream = nullptr;
+}
+
+status_t StreamHalHidl::getSampleRate(uint32_t *rate) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getSampleRate", mStream->getSampleRate(), rate);
+}
+
+status_t StreamHalHidl::getBufferSize(size_t *size) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getBufferSize", mStream->getBufferSize(), size);
+}
+
+status_t StreamHalHidl::getChannelMask(audio_channel_mask_t *mask) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getChannelMask", mStream->getChannelMask(), mask);
+}
+
+status_t StreamHalHidl::getFormat(audio_format_t *format) {
+ if (!mStream) return NO_INIT;
+ return processReturn("getFormat", mStream->getFormat(), format);
+}
+
+status_t StreamHalHidl::getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+ if (!mStream) return NO_INIT;
+ Return<void> ret = mStream->getAudioProperties(
+ [&](uint32_t sr, AudioChannelMask m, AudioFormat f) {
+ *sampleRate = sr;
+ *mask = static_cast<audio_channel_mask_t>(m);
+ *format = static_cast<audio_format_t>(f);
+ });
+ return processReturn("getAudioProperties", ret);
+}
+
+status_t StreamHalHidl::setParameters(const String8& kvPairs) {
+ if (!mStream) return NO_INIT;
+ hidl_vec<ParameterValue> hidlParams;
+ status_t status = parametersFromHal(kvPairs, &hidlParams);
+ if (status != OK) return status;
+ return processReturn("setParameters", mStream->setParameters(hidlParams));
+}
+
+status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
+ values->clear();
+ if (!mStream) return NO_INIT;
+ hidl_vec<hidl_string> hidlKeys;
+ status_t status = keysFromHal(keys, &hidlKeys);
+ if (status != OK) return status;
+ Result retval;
+ Return<void> ret = mStream->getParameters(
+ hidlKeys,
+ [&](Result r, const hidl_vec<ParameterValue>& parameters) {
+ retval = r;
+ if (retval == Result::OK) {
+ parametersToHal(parameters, values);
+ }
+ });
+ return processReturn("getParameters", ret, retval);
+}
+
+status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
+ if (!mStream) return NO_INIT;
+ return processReturn("addEffect", mStream->addEffect(
+ static_cast<EffectHalHidl*>(effect.get())->effectId()));
+}
+
+status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
+ if (!mStream) return NO_INIT;
+ return processReturn("removeEffect", mStream->removeEffect(
+ static_cast<EffectHalHidl*>(effect.get())->effectId()));
+}
+
+status_t StreamHalHidl::standby() {
+ if (!mStream) return NO_INIT;
+ return processReturn("standby", mStream->standby());
+}
+
+status_t StreamHalHidl::dump(int fd) {
+ if (!mStream) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mStream->debugDump(hidlHandle);
+ native_handle_delete(hidlHandle);
+ return processReturn("dump", ret);
+}
+
+status_t StreamHalHidl::start() {
+ if (!mStream) return NO_INIT;
+ return processReturn("start", mStream->start());
+}
+
+status_t StreamHalHidl::stop() {
+ if (!mStream) return NO_INIT;
+ return processReturn("stop", mStream->stop());
+}
+
+status_t StreamHalHidl::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ Result retval;
+ Return<void> ret = mStream->createMmapBuffer(
+ minSizeFrames,
+ [&](Result r, const MmapBufferInfo& hidlInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ const native_handle *handle = hidlInfo.sharedMemory.handle();
+ if (handle->numFds > 0) {
+ info->shared_memory_fd = handle->data[0];
+ info->buffer_size_frames = hidlInfo.bufferSizeFrames;
+ info->burst_size_frames = hidlInfo.burstSizeFrames;
+ // info->shared_memory_address is not needed in HIDL context
+ info->shared_memory_address = NULL;
+ } else {
+ retval = Result::NOT_INITIALIZED;
+ }
+ }
+ });
+ return processReturn("createMmapBuffer", ret, retval);
+}
+
+status_t StreamHalHidl::getMmapPosition(struct audio_mmap_position *position) {
+ Result retval;
+ Return<void> ret = mStream->getMmapPosition(
+ [&](Result r, const MmapPosition& hidlPosition) {
+ retval = r;
+ if (retval == Result::OK) {
+ position->time_nanoseconds = hidlPosition.timeNanoseconds;
+ position->position_frames = hidlPosition.positionFrames;
+ }
+ });
+ return processReturn("getMmapPosition", ret, retval);
+}
+
+status_t StreamHalHidl::setHalThreadPriority(int priority) {
+ mHalThreadPriority = priority;
+ return OK;
+}
+
+bool StreamHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
+ if (mHalThreadPriority == HAL_THREAD_PRIORITY_DEFAULT) {
+ return true;
+ }
+ int err = requestPriority(
+ threadPid, threadId,
+ mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
+ ALOGE_IF(err, "failed to set priority %d for pid %d tid %d; error %d",
+ mHalThreadPriority, threadPid, threadId, err);
+ // Audio will still work, but latency will be higher and sometimes unacceptable.
+ return err == 0;
+}
+
+namespace {
+
+/* Notes on callback ownership.
+
+This is how (Hw)Binder ownership model looks like. The server implementation
+is owned by Binder framework (via sp<>). Proxies are owned by clients.
+When the last proxy disappears, Binder framework releases the server impl.
+
+Thus, it is not needed to keep any references to StreamOutCallback (this is
+the server impl) -- it will live as long as HAL server holds a strong ref to
+IStreamOutCallback proxy. We clear that reference by calling 'clearCallback'
+from the destructor of StreamOutHalHidl.
+
+The callback only keeps a weak reference to the stream. The stream is owned
+by AudioFlinger.
+
+*/
+
+struct StreamOutCallback : public IStreamOutCallback {
+ StreamOutCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
+
+ // IStreamOutCallback implementation
+ Return<void> onWriteReady() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onWriteReady();
+ }
+ return Void();
+ }
+
+ Return<void> onDrainReady() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onDrainReady();
+ }
+ return Void();
+ }
+
+ Return<void> onError() override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != 0) {
+ stream->onError();
+ }
+ return Void();
+ }
+
+ private:
+ wp<StreamOutHalHidl> mStream;
+};
+
+} // namespace
+
+StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
+ : StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
+}
+
+StreamOutHalHidl::~StreamOutHalHidl() {
+ if (mStream != 0) {
+ if (mCallback.unsafe_get()) {
+ processReturn("clearCallback", mStream->clearCallback());
+ }
+ processReturn("close", mStream->close());
+ mStream.clear();
+ }
+ mCallback.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ if (mEfGroup) {
+ EventFlag::deleteEventFlag(&mEfGroup);
+ }
+}
+
+status_t StreamOutHalHidl::getFrameSize(size_t *size) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getFrameSize", mStream->getFrameSize(), size);
+}
+
+status_t StreamOutHalHidl::getLatency(uint32_t *latency) {
+ if (mStream == 0) return NO_INIT;
+ if (mWriterClient == gettid() && mCommandMQ) {
+ return callWriterThread(
+ WriteCommand::GET_LATENCY, "getLatency", nullptr, 0,
+ [&](const WriteStatus& writeStatus) {
+ *latency = writeStatus.reply.latencyMs;
+ });
+ } else {
+ return processReturn("getLatency", mStream->getLatency(), latency);
+ }
+}
+
+status_t StreamOutHalHidl::setVolume(float left, float right) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("setVolume", mStream->setVolume(left, right));
+}
+
+status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
+ if (mStream == 0) return NO_INIT;
+ *written = 0;
+
+ if (bytes == 0 && !mDataMQ) {
+ // Can't determine the size for the MQ buffer. Wait for a non-empty write request.
+ ALOGW_IF(mCallback.unsafe_get(), "First call to async write with 0 bytes");
+ return OK;
+ }
+
+ status_t status;
+ if (!mDataMQ && (status = prepareForWriting(bytes)) != OK) {
+ return status;
+ }
+
+ return callWriterThread(
+ WriteCommand::WRITE, "write", static_cast<const uint8_t*>(buffer), bytes,
+ [&] (const WriteStatus& writeStatus) {
+ *written = writeStatus.reply.written;
+ // Diagnostics of the cause of b/35813113.
+ ALOGE_IF(*written > bytes,
+ "hal reports more bytes written than asked for: %lld > %lld",
+ (long long)*written, (long long)bytes);
+ });
+}
+
+status_t StreamOutHalHidl::callWriterThread(
+ WriteCommand cmd, const char* cmdName,
+ const uint8_t* data, size_t dataSize, StreamOutHalHidl::WriterCallback callback) {
+ if (!mCommandMQ->write(&cmd)) {
+ ALOGE("command message queue write failed for \"%s\"", cmdName);
+ return -EAGAIN;
+ }
+ if (data != nullptr) {
+ size_t availableToWrite = mDataMQ->availableToWrite();
+ if (dataSize > availableToWrite) {
+ ALOGW("truncating write data from %lld to %lld due to insufficient data queue space",
+ (long long)dataSize, (long long)availableToWrite);
+ dataSize = availableToWrite;
+ }
+ if (!mDataMQ->write(data, dataSize)) {
+ ALOGE("data message queue write failed for \"%s\"", cmdName);
+ }
+ }
+ mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY));
+
+ // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL)) {
+ WriteStatus writeStatus;
+ writeStatus.retval = Result::NOT_INITIALIZED;
+ if (!mStatusMQ->read(&writeStatus)) {
+ ALOGE("status message read failed for \"%s\"", cmdName);
+ }
+ if (writeStatus.retval == Result::OK) {
+ ret = OK;
+ callback(writeStatus);
+ } else {
+ ret = processReturn(cmdName, writeStatus.retval);
+ }
+ return ret;
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t StreamOutHalHidl::prepareForWriting(size_t bufferSize) {
+ std::unique_ptr<CommandMQ> tempCommandMQ;
+ std::unique_ptr<DataMQ> tempDataMQ;
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ pid_t halThreadPid, halThreadTid;
+ Return<void> ret = mStream->prepareForWriting(
+ 1, bufferSize,
+ [&](Result r,
+ const CommandMQ::Descriptor& commandMQ,
+ const DataMQ::Descriptor& dataMQ,
+ const StatusMQ::Descriptor& statusMQ,
+ const ThreadInfo& halThreadInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempCommandMQ.reset(new CommandMQ(commandMQ));
+ tempDataMQ.reset(new DataMQ(dataMQ));
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
+ }
+ halThreadPid = halThreadInfo.pid;
+ halThreadTid = halThreadInfo.tid;
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return processReturn("prepareForWriting", ret, retval);
+ }
+ if (!tempCommandMQ || !tempCommandMQ->isValid() ||
+ !tempDataMQ || !tempDataMQ->isValid() ||
+ !tempStatusMQ || !tempStatusMQ->isValid() ||
+ !mEfGroup) {
+ ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
+ ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
+ "Command message queue for writing is invalid");
+ ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for writing");
+ ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for writing is invalid");
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for writing");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for writing is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for writing failed");
+ return NO_INIT;
+ }
+ requestHalThreadPriority(halThreadPid, halThreadTid);
+
+ mCommandMQ = std::move(tempCommandMQ);
+ mDataMQ = std::move(tempDataMQ);
+ mStatusMQ = std::move(tempStatusMQ);
+ mWriterClient = gettid();
+ return OK;
+}
+
+status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
+ if (mStream == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getRenderPosition(
+ [&](Result r, uint32_t d) {
+ retval = r;
+ if (retval == Result::OK) {
+ *dspFrames = d;
+ }
+ });
+ return processReturn("getRenderPosition", ret, retval);
+}
+
+status_t StreamOutHalHidl::getNextWriteTimestamp(int64_t *timestamp) {
+ if (mStream == 0) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getNextWriteTimestamp(
+ [&](Result r, int64_t t) {
+ retval = r;
+ if (retval == Result::OK) {
+ *timestamp = t;
+ }
+ });
+ return processReturn("getRenderPosition", ret, retval);
+}
+
+status_t StreamOutHalHidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+ if (mStream == 0) return NO_INIT;
+ status_t status = processReturn(
+ "setCallback", mStream->setCallback(new StreamOutCallback(this)));
+ if (status == OK) {
+ mCallback = callback;
+ }
+ return status;
+}
+
+status_t StreamOutHalHidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+ if (mStream == 0) return NO_INIT;
+ Return<void> ret = mStream->supportsPauseAndResume(
+ [&](bool p, bool r) {
+ *supportsPause = p;
+ *supportsResume = r;
+ });
+ return processReturn("supportsPauseAndResume", ret);
+}
+
+status_t StreamOutHalHidl::pause() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->pause());
+}
+
+status_t StreamOutHalHidl::resume() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->resume());
+}
+
+status_t StreamOutHalHidl::supportsDrain(bool *supportsDrain) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("supportsDrain", mStream->supportsDrain(), supportsDrain);
+}
+
+status_t StreamOutHalHidl::drain(bool earlyNotify) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn(
+ "drain", mStream->drain(earlyNotify ? AudioDrain::EARLY_NOTIFY : AudioDrain::ALL));
+}
+
+status_t StreamOutHalHidl::flush() {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("pause", mStream->flush());
+}
+
+status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+ if (mStream == 0) return NO_INIT;
+ if (mWriterClient == gettid() && mCommandMQ) {
+ return callWriterThread(
+ WriteCommand::GET_PRESENTATION_POSITION, "getPresentationPosition", nullptr, 0,
+ [&](const WriteStatus& writeStatus) {
+ *frames = writeStatus.reply.presentationPosition.frames;
+ timestamp->tv_sec = writeStatus.reply.presentationPosition.timeStamp.tvSec;
+ timestamp->tv_nsec = writeStatus.reply.presentationPosition.timeStamp.tvNSec;
+ });
+ } else {
+ Result retval;
+ Return<void> ret = mStream->getPresentationPosition(
+ [&](Result r, uint64_t hidlFrames, const TimeSpec& hidlTimeStamp) {
+ retval = r;
+ if (retval == Result::OK) {
+ *frames = hidlFrames;
+ timestamp->tv_sec = hidlTimeStamp.tvSec;
+ timestamp->tv_nsec = hidlTimeStamp.tvNSec;
+ }
+ });
+ return processReturn("getPresentationPosition", ret, retval);
+ }
+}
+
+void StreamOutHalHidl::onWriteReady() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onWriteReady");
+ callback->onWriteReady();
+}
+
+void StreamOutHalHidl::onDrainReady() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onDrainReady");
+ callback->onDrainReady();
+}
+
+void StreamOutHalHidl::onError() {
+ sp<StreamOutHalInterfaceCallback> callback = mCallback.promote();
+ if (callback == 0) return;
+ ALOGV("asyncCallback onError");
+ callback->onError();
+}
+
+
+StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
+ : StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
+}
+
+StreamInHalHidl::~StreamInHalHidl() {
+ if (mStream != 0) {
+ processReturn("close", mStream->close());
+ mStream.clear();
+ hardware::IPCThreadState::self()->flushCommands();
+ }
+ if (mEfGroup) {
+ EventFlag::deleteEventFlag(&mEfGroup);
+ }
+}
+
+status_t StreamInHalHidl::getFrameSize(size_t *size) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getFrameSize", mStream->getFrameSize(), size);
+}
+
+status_t StreamInHalHidl::setGain(float gain) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("setGain", mStream->setGain(gain));
+}
+
+status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
+ if (mStream == 0) return NO_INIT;
+ *read = 0;
+
+ if (bytes == 0 && !mDataMQ) {
+ // Can't determine the size for the MQ buffer. Wait for a non-empty read request.
+ return OK;
+ }
+
+ status_t status;
+ if (!mDataMQ && (status = prepareForReading(bytes)) != OK) {
+ return status;
+ }
+
+ ReadParameters params;
+ params.command = ReadCommand::READ;
+ params.params.read = bytes;
+ return callReaderThread(params, "read",
+ [&](const ReadStatus& readStatus) {
+ const size_t availToRead = mDataMQ->availableToRead();
+ if (!mDataMQ->read(static_cast<uint8_t*>(buffer), std::min(bytes, availToRead))) {
+ ALOGE("data message queue read failed for \"read\"");
+ }
+ ALOGW_IF(availToRead != readStatus.reply.read,
+ "HAL read report inconsistent: mq = %d, status = %d",
+ (int32_t)availToRead, (int32_t)readStatus.reply.read);
+ *read = readStatus.reply.read;
+ });
+}
+
+status_t StreamInHalHidl::callReaderThread(
+ const ReadParameters& params, const char* cmdName,
+ StreamInHalHidl::ReaderCallback callback) {
+ if (!mCommandMQ->write(¶ms)) {
+ ALOGW("command message queue write failed");
+ return -EAGAIN;
+ }
+ mEfGroup->wake(static_cast<uint32_t>(MessageQueueFlagBits::NOT_FULL));
+
+ // TODO: Remove manual event flag handling once blocking MQ is implemented. b/33815422
+ uint32_t efState = 0;
+retry:
+ status_t ret = mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY), &efState);
+ if (efState & static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY)) {
+ ReadStatus readStatus;
+ readStatus.retval = Result::NOT_INITIALIZED;
+ if (!mStatusMQ->read(&readStatus)) {
+ ALOGE("status message read failed for \"%s\"", cmdName);
+ }
+ if (readStatus.retval == Result::OK) {
+ ret = OK;
+ callback(readStatus);
+ } else {
+ ret = processReturn(cmdName, readStatus.retval);
+ }
+ return ret;
+ }
+ if (ret == -EAGAIN || ret == -EINTR) {
+ // Spurious wakeup. This normally retries no more than once.
+ goto retry;
+ }
+ return ret;
+}
+
+status_t StreamInHalHidl::prepareForReading(size_t bufferSize) {
+ std::unique_ptr<CommandMQ> tempCommandMQ;
+ std::unique_ptr<DataMQ> tempDataMQ;
+ std::unique_ptr<StatusMQ> tempStatusMQ;
+ Result retval;
+ pid_t halThreadPid, halThreadTid;
+ Return<void> ret = mStream->prepareForReading(
+ 1, bufferSize,
+ [&](Result r,
+ const CommandMQ::Descriptor& commandMQ,
+ const DataMQ::Descriptor& dataMQ,
+ const StatusMQ::Descriptor& statusMQ,
+ const ThreadInfo& halThreadInfo) {
+ retval = r;
+ if (retval == Result::OK) {
+ tempCommandMQ.reset(new CommandMQ(commandMQ));
+ tempDataMQ.reset(new DataMQ(dataMQ));
+ tempStatusMQ.reset(new StatusMQ(statusMQ));
+ if (tempDataMQ->isValid() && tempDataMQ->getEventFlagWord()) {
+ EventFlag::createEventFlag(tempDataMQ->getEventFlagWord(), &mEfGroup);
+ }
+ halThreadPid = halThreadInfo.pid;
+ halThreadTid = halThreadInfo.tid;
+ }
+ });
+ if (!ret.isOk() || retval != Result::OK) {
+ return processReturn("prepareForReading", ret, retval);
+ }
+ if (!tempCommandMQ || !tempCommandMQ->isValid() ||
+ !tempDataMQ || !tempDataMQ->isValid() ||
+ !tempStatusMQ || !tempStatusMQ->isValid() ||
+ !mEfGroup) {
+ ALOGE_IF(!tempCommandMQ, "Failed to obtain command message queue for writing");
+ ALOGE_IF(tempCommandMQ && !tempCommandMQ->isValid(),
+ "Command message queue for writing is invalid");
+ ALOGE_IF(!tempDataMQ, "Failed to obtain data message queue for reading");
+ ALOGE_IF(tempDataMQ && !tempDataMQ->isValid(), "Data message queue for reading is invalid");
+ ALOGE_IF(!tempStatusMQ, "Failed to obtain status message queue for reading");
+ ALOGE_IF(tempStatusMQ && !tempStatusMQ->isValid(),
+ "Status message queue for reading is invalid");
+ ALOGE_IF(!mEfGroup, "Event flag creation for reading failed");
+ return NO_INIT;
+ }
+ requestHalThreadPriority(halThreadPid, halThreadTid);
+
+ mCommandMQ = std::move(tempCommandMQ);
+ mDataMQ = std::move(tempDataMQ);
+ mStatusMQ = std::move(tempStatusMQ);
+ mReaderClient = gettid();
+ return OK;
+}
+
+status_t StreamInHalHidl::getInputFramesLost(uint32_t *framesLost) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn("getInputFramesLost", mStream->getInputFramesLost(), framesLost);
+}
+
+status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
+ if (mStream == 0) return NO_INIT;
+ if (mReaderClient == gettid() && mCommandMQ) {
+ ReadParameters params;
+ params.command = ReadCommand::GET_CAPTURE_POSITION;
+ return callReaderThread(params, "getCapturePosition",
+ [&](const ReadStatus& readStatus) {
+ *frames = readStatus.reply.capturePosition.frames;
+ *time = readStatus.reply.capturePosition.time;
+ });
+ } else {
+ Result retval;
+ Return<void> ret = mStream->getCapturePosition(
+ [&](Result r, uint64_t hidlFrames, uint64_t hidlTime) {
+ retval = r;
+ if (retval == Result::OK) {
+ *frames = hidlFrames;
+ *time = hidlTime;
+ }
+ });
+ return processReturn("getCapturePosition", ret, retval);
+ }
+}
+
+} // namespace android
diff --git a/media/libaudiohal/StreamHalHidl.h b/media/libaudiohal/StreamHalHidl.h
new file mode 100644
index 0000000..a7df276
--- /dev/null
+++ b/media/libaudiohal/StreamHalHidl.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_HIDL_H
+#define ANDROID_HARDWARE_STREAM_HAL_HIDL_H
+
+#include <atomic>
+
+#include <android/hardware/audio/2.0/IStream.h>
+#include <android/hardware/audio/2.0/IStreamIn.h>
+#include <android/hardware/audio/2.0/IStreamOut.h>
+#include <fmq/EventFlag.h>
+#include <fmq/MessageQueue.h>
+#include <media/audiohal/StreamHalInterface.h>
+
+#include "ConversionHelperHidl.h"
+
+using ::android::hardware::audio::V2_0::IStream;
+using ::android::hardware::audio::V2_0::IStreamIn;
+using ::android::hardware::audio::V2_0::IStreamOut;
+using ::android::hardware::EventFlag;
+using ::android::hardware::MessageQueue;
+using ::android::hardware::Return;
+using ReadParameters = ::android::hardware::audio::V2_0::IStreamIn::ReadParameters;
+using ReadStatus = ::android::hardware::audio::V2_0::IStreamIn::ReadStatus;
+using WriteCommand = ::android::hardware::audio::V2_0::IStreamOut::WriteCommand;
+using WriteStatus = ::android::hardware::audio::V2_0::IStreamOut::WriteStatus;
+
+namespace android {
+
+class DeviceHalHidl;
+
+class StreamHalHidl : public virtual StreamHalInterface, public ConversionHelperHidl
+{
+ public:
+ // Return the sampling rate in Hz - eg. 44100.
+ virtual status_t getSampleRate(uint32_t *rate);
+
+ // Return size of input/output buffer in bytes for this stream - eg. 4800.
+ virtual status_t getBufferSize(size_t *size);
+
+ // Return the channel mask.
+ virtual status_t getChannelMask(audio_channel_mask_t *mask);
+
+ // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+ virtual status_t getFormat(audio_format_t *format);
+
+ // Convenience method.
+ virtual status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+
+ // Set audio stream parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get audio stream parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Add or remove the effect on the stream.
+ virtual status_t addEffect(sp<EffectHalInterface> effect);
+ virtual status_t removeEffect(sp<EffectHalInterface> effect);
+
+ // Put the audio hardware input/output into standby mode.
+ virtual status_t standby();
+
+ virtual status_t dump(int fd);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ // Set the priority of the thread that interacts with the HAL
+ // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+ virtual status_t setHalThreadPriority(int priority);
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ explicit StreamHalHidl(IStream *stream);
+
+ // The destructor automatically closes the stream.
+ virtual ~StreamHalHidl();
+
+ bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
+
+ private:
+ const int HAL_THREAD_PRIORITY_DEFAULT = -1;
+ IStream *mStream;
+ int mHalThreadPriority;
+};
+
+class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Return the audio hardware driver estimated latency in milliseconds.
+ virtual status_t getLatency(uint32_t *latency);
+
+ // Use this method in situations where audio mixing is done in the hardware.
+ virtual status_t setVolume(float left, float right);
+
+ // Write audio buffer to driver.
+ virtual status_t write(const void *buffer, size_t bytes, size_t *written);
+
+ // Return the number of audio frames written by the audio dsp to DAC since
+ // the output has exited standby.
+ virtual status_t getRenderPosition(uint32_t *dspFrames);
+
+ // Get the local time at which the next write to the audio driver will be presented.
+ virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+
+ // Set the callback for notifying completion of non-blocking write and drain.
+ virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
+
+ // Returns whether pause and resume operations are supported.
+ virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t pause();
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t resume();
+
+ // Returns whether drain operation is supported.
+ virtual status_t supportsDrain(bool *supportsDrain);
+
+ // Requests notification when data buffered by the driver/hardware has been played.
+ virtual status_t drain(bool earlyNotify);
+
+ // Notifies to the audio driver to flush the queued data.
+ virtual status_t flush();
+
+ // Return a recent count of the number of audio frames presented to an external observer.
+ virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+ // Methods used by StreamOutCallback (HIDL).
+ void onWriteReady();
+ void onDrainReady();
+ void onError();
+
+ private:
+ friend class DeviceHalHidl;
+ typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
+ typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
+ typedef MessageQueue<WriteStatus, hardware::kSynchronizedReadWrite> StatusMQ;
+
+ wp<StreamOutHalInterfaceCallback> mCallback;
+ sp<IStreamOut> mStream;
+ std::unique_ptr<CommandMQ> mCommandMQ;
+ std::unique_ptr<DataMQ> mDataMQ;
+ std::unique_ptr<StatusMQ> mStatusMQ;
+ std::atomic<pid_t> mWriterClient;
+ EventFlag* mEfGroup;
+
+ // Can not be constructed directly by clients.
+ StreamOutHalHidl(const sp<IStreamOut>& stream);
+
+ virtual ~StreamOutHalHidl();
+
+ using WriterCallback = std::function<void(const WriteStatus& writeStatus)>;
+ status_t callWriterThread(
+ WriteCommand cmd, const char* cmdName,
+ const uint8_t* data, size_t dataSize, WriterCallback callback);
+ status_t prepareForWriting(size_t bufferSize);
+};
+
+class StreamInHalHidl : public StreamInHalInterface, public StreamHalHidl {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Set the input gain for the audio driver.
+ virtual status_t setGain(float gain);
+
+ // Read audio buffer in from driver.
+ virtual status_t read(void *buffer, size_t bytes, size_t *read);
+
+ // Return the amount of input frames lost in the audio driver.
+ virtual status_t getInputFramesLost(uint32_t *framesLost);
+
+ // Return a recent count of the number of audio frames received and
+ // the clock time associated with that frame count.
+ virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+
+ private:
+ friend class DeviceHalHidl;
+ typedef MessageQueue<ReadParameters, hardware::kSynchronizedReadWrite> CommandMQ;
+ typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
+ typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
+
+ sp<IStreamIn> mStream;
+ std::unique_ptr<CommandMQ> mCommandMQ;
+ std::unique_ptr<DataMQ> mDataMQ;
+ std::unique_ptr<StatusMQ> mStatusMQ;
+ std::atomic<pid_t> mReaderClient;
+ EventFlag* mEfGroup;
+
+ // Can not be constructed directly by clients.
+ StreamInHalHidl(const sp<IStreamIn>& stream);
+
+ virtual ~StreamInHalHidl();
+
+ using ReaderCallback = std::function<void(const ReadStatus& readStatus)>;
+ status_t callReaderThread(
+ const ReadParameters& params, const char* cmdName, ReaderCallback callback);
+ status_t prepareForReading(size_t bufferSize);
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/StreamHalLocal.cpp b/media/libaudiohal/StreamHalLocal.cpp
new file mode 100644
index 0000000..05800a0
--- /dev/null
+++ b/media/libaudiohal/StreamHalLocal.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "StreamHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "EffectHalLocal.h"
+#include "StreamHalLocal.h"
+
+namespace android {
+
+StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
+ : mDevice(device), mStream(stream) {
+}
+
+StreamHalLocal::~StreamHalLocal() {
+ mStream = 0;
+ mDevice.clear();
+}
+
+status_t StreamHalLocal::getSampleRate(uint32_t *rate) {
+ *rate = mStream->get_sample_rate(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getBufferSize(size_t *size) {
+ *size = mStream->get_buffer_size(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getChannelMask(audio_channel_mask_t *mask) {
+ *mask = mStream->get_channels(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getFormat(audio_format_t *format) {
+ *format = mStream->get_format(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) {
+ *sampleRate = mStream->get_sample_rate(mStream);
+ *mask = mStream->get_channels(mStream);
+ *format = mStream->get_format(mStream);
+ return OK;
+}
+
+status_t StreamHalLocal::setParameters(const String8& kvPairs) {
+ return mStream->set_parameters(mStream, kvPairs.string());
+}
+
+status_t StreamHalLocal::getParameters(const String8& keys, String8 *values) {
+ char *halValues = mStream->get_parameters(mStream, keys.string());
+ if (halValues != NULL) {
+ values->setTo(halValues);
+ free(halValues);
+ } else {
+ values->clear();
+ }
+ return OK;
+}
+
+status_t StreamHalLocal::addEffect(sp<EffectHalInterface> effect) {
+ LOG_ALWAYS_FATAL_IF(!effect->isLocal(), "Only local effects can be added for a local stream");
+ return mStream->add_audio_effect(mStream,
+ static_cast<EffectHalLocal*>(effect.get())->handle());
+}
+
+status_t StreamHalLocal::removeEffect(sp<EffectHalInterface> effect) {
+ LOG_ALWAYS_FATAL_IF(!effect->isLocal(), "Only local effects can be removed for a local stream");
+ return mStream->remove_audio_effect(mStream,
+ static_cast<EffectHalLocal*>(effect.get())->handle());
+}
+
+status_t StreamHalLocal::standby() {
+ return mStream->standby(mStream);
+}
+
+status_t StreamHalLocal::dump(int fd) {
+ return mStream->dump(mStream, fd);
+}
+
+status_t StreamHalLocal::setHalThreadPriority(int) {
+ // Don't need to do anything as local hal is executed by audioflinger directly
+ // on the same thread.
+ return OK;
+}
+
+StreamOutHalLocal::StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device)
+ : StreamHalLocal(&stream->common, device), mStream(stream) {
+}
+
+StreamOutHalLocal::~StreamOutHalLocal() {
+ mCallback.clear();
+ mDevice->closeOutputStream(mStream);
+ mStream = 0;
+}
+
+status_t StreamOutHalLocal::getFrameSize(size_t *size) {
+ *size = audio_stream_out_frame_size(mStream);
+ return OK;
+}
+
+status_t StreamOutHalLocal::getLatency(uint32_t *latency) {
+ *latency = mStream->get_latency(mStream);
+ return OK;
+}
+
+status_t StreamOutHalLocal::setVolume(float left, float right) {
+ if (mStream->set_volume == NULL) return INVALID_OPERATION;
+ return mStream->set_volume(mStream, left, right);
+}
+
+status_t StreamOutHalLocal::write(const void *buffer, size_t bytes, size_t *written) {
+ ssize_t writeResult = mStream->write(mStream, buffer, bytes);
+ if (writeResult > 0) {
+ *written = writeResult;
+ return OK;
+ } else {
+ *written = 0;
+ return writeResult;
+ }
+}
+
+status_t StreamOutHalLocal::getRenderPosition(uint32_t *dspFrames) {
+ return mStream->get_render_position(mStream, dspFrames);
+}
+
+status_t StreamOutHalLocal::getNextWriteTimestamp(int64_t *timestamp) {
+ if (mStream->get_next_write_timestamp == NULL) return INVALID_OPERATION;
+ return mStream->get_next_write_timestamp(mStream, timestamp);
+}
+
+status_t StreamOutHalLocal::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+ if (mStream->set_callback == NULL) return INVALID_OPERATION;
+ status_t result = mStream->set_callback(mStream, StreamOutHalLocal::asyncCallback, this);
+ if (result == OK) {
+ mCallback = callback;
+ }
+ return result;
+}
+
+// static
+int StreamOutHalLocal::asyncCallback(stream_callback_event_t event, void*, void *cookie) {
+ // We act as if we gave a wp<StreamOutHalLocal> to HAL. This way we should handle
+ // correctly the case when the callback is invoked while StreamOutHalLocal's destructor is
+ // already running, because the destructor is invoked after the refcount has been atomically
+ // decremented.
+ wp<StreamOutHalLocal> weakSelf(static_cast<StreamOutHalLocal*>(cookie));
+ sp<StreamOutHalLocal> self = weakSelf.promote();
+ if (self == 0) return 0;
+ sp<StreamOutHalInterfaceCallback> callback = self->mCallback.promote();
+ if (callback == 0) return 0;
+ ALOGV("asyncCallback() event %d", event);
+ switch (event) {
+ case STREAM_CBK_EVENT_WRITE_READY:
+ callback->onWriteReady();
+ break;
+ case STREAM_CBK_EVENT_DRAIN_READY:
+ callback->onDrainReady();
+ break;
+ case STREAM_CBK_EVENT_ERROR:
+ callback->onError();
+ break;
+ default:
+ ALOGW("asyncCallback() unknown event %d", event);
+ break;
+ }
+ return 0;
+}
+
+status_t StreamOutHalLocal::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+ *supportsPause = mStream->pause != NULL;
+ *supportsResume = mStream->resume != NULL;
+ return OK;
+}
+
+status_t StreamOutHalLocal::pause() {
+ if (mStream->pause == NULL) return INVALID_OPERATION;
+ return mStream->pause(mStream);
+}
+
+status_t StreamOutHalLocal::resume() {
+ if (mStream->resume == NULL) return INVALID_OPERATION;
+ return mStream->resume(mStream);
+}
+
+status_t StreamOutHalLocal::supportsDrain(bool *supportsDrain) {
+ *supportsDrain = mStream->drain != NULL;
+ return OK;
+}
+
+status_t StreamOutHalLocal::drain(bool earlyNotify) {
+ if (mStream->drain == NULL) return INVALID_OPERATION;
+ return mStream->drain(mStream, earlyNotify ? AUDIO_DRAIN_EARLY_NOTIFY : AUDIO_DRAIN_ALL);
+}
+
+status_t StreamOutHalLocal::flush() {
+ if (mStream->flush == NULL) return INVALID_OPERATION;
+ return mStream->flush(mStream);
+}
+
+status_t StreamOutHalLocal::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+ if (mStream->get_presentation_position == NULL) return INVALID_OPERATION;
+ return mStream->get_presentation_position(mStream, frames, timestamp);
+}
+
+status_t StreamOutHalLocal::start() {
+ if (mStream->start == NULL) return INVALID_OPERATION;
+ return mStream->start(mStream);
+}
+
+status_t StreamOutHalLocal::stop() {
+ if (mStream->stop == NULL) return INVALID_OPERATION;
+ return mStream->stop(mStream);
+}
+
+status_t StreamOutHalLocal::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
+ return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
+}
+
+status_t StreamOutHalLocal::getMmapPosition(struct audio_mmap_position *position) {
+ if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
+ return mStream->get_mmap_position(mStream, position);
+}
+
+StreamInHalLocal::StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device)
+ : StreamHalLocal(&stream->common, device), mStream(stream) {
+}
+
+StreamInHalLocal::~StreamInHalLocal() {
+ mDevice->closeInputStream(mStream);
+ mStream = 0;
+}
+
+status_t StreamInHalLocal::getFrameSize(size_t *size) {
+ *size = audio_stream_in_frame_size(mStream);
+ return OK;
+}
+
+status_t StreamInHalLocal::setGain(float gain) {
+ return mStream->set_gain(mStream, gain);
+}
+
+status_t StreamInHalLocal::read(void *buffer, size_t bytes, size_t *read) {
+ ssize_t readResult = mStream->read(mStream, buffer, bytes);
+ if (readResult > 0) {
+ *read = readResult;
+ return OK;
+ } else {
+ *read = 0;
+ return readResult;
+ }
+}
+
+status_t StreamInHalLocal::getInputFramesLost(uint32_t *framesLost) {
+ *framesLost = mStream->get_input_frames_lost(mStream);
+ return OK;
+}
+
+status_t StreamInHalLocal::getCapturePosition(int64_t *frames, int64_t *time) {
+ if (mStream->get_capture_position == NULL) return INVALID_OPERATION;
+ return mStream->get_capture_position(mStream, frames, time);
+}
+
+status_t StreamInHalLocal::start() {
+ if (mStream->start == NULL) return INVALID_OPERATION;
+ return mStream->start(mStream);
+}
+
+status_t StreamInHalLocal::stop() {
+ if (mStream->stop == NULL) return INVALID_OPERATION;
+ return mStream->stop(mStream);
+}
+
+status_t StreamInHalLocal::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) {
+ if (mStream->create_mmap_buffer == NULL) return INVALID_OPERATION;
+ return mStream->create_mmap_buffer(mStream, minSizeFrames, info);
+}
+
+status_t StreamInHalLocal::getMmapPosition(struct audio_mmap_position *position) {
+ if (mStream->get_mmap_position == NULL) return INVALID_OPERATION;
+ return mStream->get_mmap_position(mStream, position);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/StreamHalLocal.h b/media/libaudiohal/StreamHalLocal.h
new file mode 100644
index 0000000..8c96c1f
--- /dev/null
+++ b/media/libaudiohal/StreamHalLocal.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
+#define ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
+
+#include <media/audiohal/StreamHalInterface.h>
+
+namespace android {
+
+class DeviceHalLocal;
+
+class StreamHalLocal : public virtual StreamHalInterface
+{
+ public:
+ // Return the sampling rate in Hz - eg. 44100.
+ virtual status_t getSampleRate(uint32_t *rate);
+
+ // Return size of input/output buffer in bytes for this stream - eg. 4800.
+ virtual status_t getBufferSize(size_t *size);
+
+ // Return the channel mask.
+ virtual status_t getChannelMask(audio_channel_mask_t *mask);
+
+ // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+ virtual status_t getFormat(audio_format_t *format);
+
+ // Convenience method.
+ virtual status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format);
+
+ // Set audio stream parameters.
+ virtual status_t setParameters(const String8& kvPairs);
+
+ // Get audio stream parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values);
+
+ // Add or remove the effect on the stream.
+ virtual status_t addEffect(sp<EffectHalInterface> effect);
+ virtual status_t removeEffect(sp<EffectHalInterface> effect);
+
+ // Put the audio hardware input/output into standby mode.
+ virtual status_t standby();
+
+ virtual status_t dump(int fd);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start() = 0;
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop() = 0;
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) = 0;
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
+
+ // Set the priority of the thread that interacts with the HAL
+ // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+ virtual status_t setHalThreadPriority(int priority);
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device);
+
+ // The destructor automatically closes the stream.
+ virtual ~StreamHalLocal();
+
+ sp<DeviceHalLocal> mDevice;
+
+ private:
+ audio_stream_t *mStream;
+};
+
+class StreamOutHalLocal : public StreamOutHalInterface, public StreamHalLocal {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Return the audio hardware driver estimated latency in milliseconds.
+ virtual status_t getLatency(uint32_t *latency);
+
+ // Use this method in situations where audio mixing is done in the hardware.
+ virtual status_t setVolume(float left, float right);
+
+ // Write audio buffer to driver.
+ virtual status_t write(const void *buffer, size_t bytes, size_t *written);
+
+ // Return the number of audio frames written by the audio dsp to DAC since
+ // the output has exited standby.
+ virtual status_t getRenderPosition(uint32_t *dspFrames);
+
+ // Get the local time at which the next write to the audio driver will be presented.
+ virtual status_t getNextWriteTimestamp(int64_t *timestamp);
+
+ // Set the callback for notifying completion of non-blocking write and drain.
+ virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback);
+
+ // Returns whether pause and resume operations are supported.
+ virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume);
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t pause();
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t resume();
+
+ // Returns whether drain operation is supported.
+ virtual status_t supportsDrain(bool *supportsDrain);
+
+ // Requests notification when data buffered by the driver/hardware has been played.
+ virtual status_t drain(bool earlyNotify);
+
+ // Notifies to the audio driver to flush the queued data.
+ virtual status_t flush();
+
+ // Return a recent count of the number of audio frames presented to an external observer.
+ virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ private:
+ audio_stream_out_t *mStream;
+ wp<StreamOutHalInterfaceCallback> mCallback;
+
+ friend class DeviceHalLocal;
+
+ // Can not be constructed directly by clients.
+ StreamOutHalLocal(audio_stream_out_t *stream, sp<DeviceHalLocal> device);
+
+ virtual ~StreamOutHalLocal();
+
+ static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+};
+
+class StreamInHalLocal : public StreamInHalInterface, public StreamHalLocal {
+ public:
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size);
+
+ // Set the input gain for the audio driver.
+ virtual status_t setGain(float gain);
+
+ // Read audio buffer in from driver.
+ virtual status_t read(void *buffer, size_t bytes, size_t *read);
+
+ // Return the amount of input frames lost in the audio driver.
+ virtual status_t getInputFramesLost(uint32_t *framesLost);
+
+ // Return a recent count of the number of audio frames received and
+ // the clock time associated with that frame count.
+ virtual status_t getCapturePosition(int64_t *frames, int64_t *time);
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start();
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop();
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+
+ private:
+ audio_stream_in_t *mStream;
+
+ friend class DeviceHalLocal;
+
+ // Can not be constructed directly by clients.
+ StreamInHalLocal(audio_stream_in_t *stream, sp<DeviceHalLocal> device);
+
+ virtual ~StreamInHalLocal();
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudiohal/include/DeviceHalInterface.h b/media/libaudiohal/include/DeviceHalInterface.h
new file mode 100644
index 0000000..caf01be
--- /dev/null
+++ b/media/libaudiohal/include/DeviceHalInterface.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class StreamInHalInterface;
+class StreamOutHalInterface;
+
+class DeviceHalInterface : public RefBase
+{
+ public:
+ // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+ virtual status_t getSupportedDevices(uint32_t *devices) = 0;
+
+ // Check to see if the audio hardware interface has been initialized.
+ virtual status_t initCheck() = 0;
+
+ // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+ virtual status_t setVoiceVolume(float volume) = 0;
+
+ // Set the audio volume for all audio activities other than voice call.
+ virtual status_t setMasterVolume(float volume) = 0;
+
+ // Get the current master volume value for the HAL.
+ virtual status_t getMasterVolume(float *volume) = 0;
+
+ // Called when the audio mode changes.
+ virtual status_t setMode(audio_mode_t mode) = 0;
+
+ // Muting control.
+ virtual status_t setMicMute(bool state) = 0;
+ virtual status_t getMicMute(bool *state) = 0;
+ virtual status_t setMasterMute(bool state) = 0;
+ virtual status_t getMasterMute(bool *state) = 0;
+
+ // Set global audio parameters.
+ virtual status_t setParameters(const String8& kvPairs) = 0;
+
+ // Get global audio parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values) = 0;
+
+ // Returns audio input buffer size according to parameters passed.
+ virtual status_t getInputBufferSize(const struct audio_config *config,
+ size_t *size) = 0;
+
+ // Creates and opens the audio hardware output stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openOutputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ const char *address,
+ sp<StreamOutHalInterface> *outStream) = 0;
+
+ // Creates and opens the audio hardware input stream. The stream is closed
+ // by releasing all references to the returned object.
+ virtual status_t openInputStream(
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source,
+ sp<StreamInHalInterface> *inStream) = 0;
+
+ // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+ virtual status_t supportsAudioPatches(bool *supportsPatches) = 0;
+
+ // Creates an audio patch between several source and sink ports.
+ virtual status_t createAudioPatch(
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *patch) = 0;
+
+ // Releases an audio patch.
+ virtual status_t releaseAudioPatch(audio_patch_handle_t patch) = 0;
+
+ // Fills the list of supported attributes for a given audio port.
+ virtual status_t getAudioPort(struct audio_port *port) = 0;
+
+ // Set audio port configuration.
+ virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
+
+ virtual status_t dump(int fd) = 0;
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ DeviceHalInterface() {}
+
+ // The destructor automatically closes the device.
+ virtual ~DeviceHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
diff --git a/media/libaudiohal/include/DevicesFactoryHalInterface.h b/media/libaudiohal/include/DevicesFactoryHalInterface.h
new file mode 100644
index 0000000..14af384
--- /dev/null
+++ b/media/libaudiohal/include/DevicesFactoryHalInterface.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
+
+#include <media/audiohal/DeviceHalInterface.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class DevicesFactoryHalInterface : public RefBase
+{
+ public:
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device) = 0;
+
+ static sp<DevicesFactoryHalInterface> create();
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ DevicesFactoryHalInterface() {}
+
+ virtual ~DevicesFactoryHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
diff --git a/media/libaudiohal/include/EffectBufferHalInterface.h b/media/libaudiohal/include/EffectBufferHalInterface.h
new file mode 100644
index 0000000..e862f6e
--- /dev/null
+++ b/media/libaudiohal/include/EffectBufferHalInterface.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_INTERFACE_H
+
+#include <system/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+// Abstraction for an audio buffer. It may be a "mirror" for
+// a buffer that the effect chain doesn't own, or a buffer owned by
+// the effect chain.
+class EffectBufferHalInterface : public RefBase
+{
+ public:
+ virtual audio_buffer_t* audioBuffer() = 0;
+ virtual void* externalData() const = 0;
+ // To be used when interacting with the code that doesn't know about
+ // "mirrored" buffers.
+ virtual void* ptr() {
+ return externalData() != nullptr ? externalData() : audioBuffer()->raw;
+ }
+
+ virtual void setExternalData(void* external) = 0;
+ virtual void setFrameCount(size_t frameCount) = 0;
+ virtual bool checkFrameCountChange() = 0; // returns whether frame count has been updated
+ // since the last call to this method
+
+ virtual void update() = 0; // copies data from the external buffer, noop for allocated buffers
+ virtual void commit() = 0; // copies data to the external buffer, noop for allocated buffers
+ virtual void update(size_t size) = 0; // copies partial data from external buffer
+ virtual void commit(size_t size) = 0; // copies partial data to external buffer
+
+ static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
+ static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ EffectBufferHalInterface() {}
+
+ virtual ~EffectBufferHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_INTERFACE_H
diff --git a/media/libaudiohal/include/EffectHalInterface.h b/media/libaudiohal/include/EffectHalInterface.h
new file mode 100644
index 0000000..92622aa
--- /dev/null
+++ b/media/libaudiohal/include/EffectHalInterface.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
+
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class EffectHalInterface : public RefBase
+{
+ public:
+ // Set the input buffer.
+ virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
+
+ // Set the output buffer.
+ virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer) = 0;
+
+ // Effect process function. Takes input samples as specified
+ // in input buffer descriptor and output processed samples as specified
+ // in output buffer descriptor.
+ virtual status_t process() = 0;
+
+ // Process reverse stream function. This function is used to pass
+ // a reference stream to the effect engine.
+ virtual status_t processReverse() = 0;
+
+ // Send a command and receive a response to/from effect engine.
+ virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+ uint32_t *replySize, void *pReplyData) = 0;
+
+ // Returns the effect descriptor.
+ virtual status_t getDescriptor(effect_descriptor_t *pDescriptor) = 0;
+
+ // Free resources on the remote side.
+ virtual status_t close() = 0;
+
+ // Whether it's a local implementation.
+ virtual bool isLocal() const = 0;
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ EffectHalInterface() {}
+
+ // The destructor automatically releases the effect.
+ virtual ~EffectHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
diff --git a/media/libaudiohal/include/EffectsFactoryHalInterface.h b/media/libaudiohal/include/EffectsFactoryHalInterface.h
new file mode 100644
index 0000000..a616e86
--- /dev/null
+++ b/media/libaudiohal/include/EffectsFactoryHalInterface.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
+
+#include <media/audiohal/EffectHalInterface.h>
+#include <system/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class EffectsFactoryHalInterface : public RefBase
+{
+ public:
+ // Returns the number of different effects in all loaded libraries.
+ virtual status_t queryNumberEffects(uint32_t *pNumEffects) = 0;
+
+ // Returns a descriptor of the next available effect.
+ virtual status_t getDescriptor(uint32_t index,
+ effect_descriptor_t *pDescriptor) = 0;
+
+ virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+ effect_descriptor_t *pDescriptor) = 0;
+
+ // Creates an effect engine of the specified type.
+ // To release the effect engine, it is necessary to release references
+ // to the returned effect object.
+ virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+ int32_t sessionId, int32_t ioId,
+ sp<EffectHalInterface> *effect) = 0;
+
+ virtual status_t dumpEffects(int fd) = 0;
+
+ static sp<EffectsFactoryHalInterface> create();
+
+ // Helper function to compare effect uuid to EFFECT_UUID_NULL.
+ static bool isNullUuid(const effect_uuid_t *pEffectUuid);
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ EffectsFactoryHalInterface() {}
+
+ virtual ~EffectsFactoryHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
diff --git a/media/libaudiohal/include/StreamHalInterface.h b/media/libaudiohal/include/StreamHalInterface.h
new file mode 100644
index 0000000..7419c34
--- /dev/null
+++ b/media/libaudiohal/include/StreamHalInterface.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
+
+#include <media/audiohal/EffectHalInterface.h>
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class StreamHalInterface : public virtual RefBase
+{
+ public:
+ // Return the sampling rate in Hz - eg. 44100.
+ virtual status_t getSampleRate(uint32_t *rate) = 0;
+
+ // Return size of input/output buffer in bytes for this stream - eg. 4800.
+ virtual status_t getBufferSize(size_t *size) = 0;
+
+ // Return the channel mask.
+ virtual status_t getChannelMask(audio_channel_mask_t *mask) = 0;
+
+ // Return the audio format - e.g. AUDIO_FORMAT_PCM_16_BIT.
+ virtual status_t getFormat(audio_format_t *format) = 0;
+
+ // Convenience method.
+ virtual status_t getAudioProperties(
+ uint32_t *sampleRate, audio_channel_mask_t *mask, audio_format_t *format) = 0;
+
+ // Set audio stream parameters.
+ virtual status_t setParameters(const String8& kvPairs) = 0;
+
+ // Get audio stream parameters.
+ virtual status_t getParameters(const String8& keys, String8 *values) = 0;
+
+ // Return the frame size (number of bytes per sample) of a stream.
+ virtual status_t getFrameSize(size_t *size) = 0;
+
+ // Add or remove the effect on the stream.
+ virtual status_t addEffect(sp<EffectHalInterface> effect) = 0;
+ virtual status_t removeEffect(sp<EffectHalInterface> effect) = 0;
+
+ // Put the audio hardware input/output into standby mode.
+ virtual status_t standby() = 0;
+
+ virtual status_t dump(int fd) = 0;
+
+ // Start a stream operating in mmap mode.
+ virtual status_t start() = 0;
+
+ // Stop a stream operating in mmap mode.
+ virtual status_t stop() = 0;
+
+ // Retrieve information on the data buffer in mmap mode.
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) = 0;
+
+ // Get current read/write position in the mmap buffer
+ virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
+
+ // Set the priority of the thread that interacts with the HAL
+ // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+ virtual status_t setHalThreadPriority(int priority) = 0;
+
+ protected:
+ // Subclasses can not be constructed directly by clients.
+ StreamHalInterface() {}
+
+ // The destructor automatically closes the stream.
+ virtual ~StreamHalInterface() {}
+};
+
+class StreamOutHalInterfaceCallback : public virtual RefBase {
+ public:
+ virtual void onWriteReady() {}
+ virtual void onDrainReady() {}
+ virtual void onError() {}
+
+ protected:
+ StreamOutHalInterfaceCallback() {}
+ virtual ~StreamOutHalInterfaceCallback() {}
+};
+
+class StreamOutHalInterface : public virtual StreamHalInterface {
+ public:
+ // Return the audio hardware driver estimated latency in milliseconds.
+ virtual status_t getLatency(uint32_t *latency) = 0;
+
+ // Use this method in situations where audio mixing is done in the hardware.
+ virtual status_t setVolume(float left, float right) = 0;
+
+ // Write audio buffer to driver.
+ virtual status_t write(const void *buffer, size_t bytes, size_t *written) = 0;
+
+ // Return the number of audio frames written by the audio dsp to DAC since
+ // the output has exited standby.
+ virtual status_t getRenderPosition(uint32_t *dspFrames) = 0;
+
+ // Get the local time at which the next write to the audio driver will be presented.
+ virtual status_t getNextWriteTimestamp(int64_t *timestamp) = 0;
+
+ // Set the callback for notifying completion of non-blocking write and drain.
+ // The callback must be owned by someone else. The output stream does not own it
+ // to avoid strong pointer loops.
+ virtual status_t setCallback(wp<StreamOutHalInterfaceCallback> callback) = 0;
+
+ // Returns whether pause and resume operations are supported.
+ virtual status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume) = 0;
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t pause() = 0;
+
+ // Notifies to the audio driver to resume playback following a pause.
+ virtual status_t resume() = 0;
+
+ // Returns whether drain operation is supported.
+ virtual status_t supportsDrain(bool *supportsDrain) = 0;
+
+ // Requests notification when data buffered by the driver/hardware has been played.
+ virtual status_t drain(bool earlyNotify) = 0;
+
+ // Notifies to the audio driver to flush the queued data.
+ virtual status_t flush() = 0;
+
+ // Return a recent count of the number of audio frames presented to an external observer.
+ virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp) = 0;
+
+ protected:
+ virtual ~StreamOutHalInterface() {}
+};
+
+class StreamInHalInterface : public virtual StreamHalInterface {
+ public:
+ // Set the input gain for the audio driver.
+ virtual status_t setGain(float gain) = 0;
+
+ // Read audio buffer in from driver.
+ virtual status_t read(void *buffer, size_t bytes, size_t *read) = 0;
+
+ // Return the amount of input frames lost in the audio driver.
+ virtual status_t getInputFramesLost(uint32_t *framesLost) = 0;
+
+ // Return a recent count of the number of audio frames received and
+ // the clock time associated with that frame count.
+ virtual status_t getCapturePosition(int64_t *frames, int64_t *time) = 0;
+
+ protected:
+ virtual ~StreamInHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_STREAM_HAL_INTERFACE_H
diff --git a/media/libaudiohal/include/hidl/HalDeathHandler.h b/media/libaudiohal/include/hidl/HalDeathHandler.h
new file mode 100644
index 0000000..c9b7084
--- /dev/null
+++ b/media/libaudiohal/include/hidl/HalDeathHandler.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
+#define ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
+
+#include <functional>
+#include <mutex>
+#include <unordered_map>
+
+#include <hidl/HidlSupport.h>
+#include <utils/Singleton.h>
+
+using android::hardware::hidl_death_recipient;
+using android::hidl::base::V1_0::IBase;
+
+namespace android {
+
+class HalDeathHandler : public hidl_death_recipient, private Singleton<HalDeathHandler> {
+ public:
+ typedef std::function<void()> AtExitHandler;
+
+ // Note that the exit handler gets called using a thread from
+ // RPC threadpool, thus it needs to be thread-safe.
+ void registerAtExitHandler(void* cookie, AtExitHandler handler);
+ void unregisterAtExitHandler(void* cookie);
+
+ // hidl_death_recipient
+ virtual void serviceDied(uint64_t cookie, const wp<IBase>& who);
+
+ // Used both for (un)registering handlers, and for passing to
+ // '(un)linkToDeath'.
+ static sp<HalDeathHandler> getInstance();
+
+ private:
+ friend class Singleton<HalDeathHandler>;
+ typedef std::unordered_map<void*, AtExitHandler> Handlers;
+
+ HalDeathHandler();
+ virtual ~HalDeathHandler();
+
+ sp<HalDeathHandler> mSelf; // Allows the singleton instance to live forever.
+ std::mutex mHandlersLock;
+ Handlers mHandlers;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_HIDL_HAL_DEATH_HANDLER_H
diff --git a/media/libaudioprocessing/Android.mk b/media/libaudioprocessing/Android.mk
new file mode 100644
index 0000000..c850984
--- /dev/null
+++ b/media/libaudioprocessing/Android.mk
@@ -0,0 +1,39 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ AudioMixer.cpp.arm \
+ AudioResampler.cpp.arm \
+ AudioResamplerCubic.cpp.arm \
+ AudioResamplerSinc.cpp.arm \
+ AudioResamplerDyn.cpp.arm \
+ BufferProviders.cpp \
+ RecordBufferConverter.cpp \
+
+LOCAL_C_INCLUDES := \
+ $(TOP) \
+ $(call include-path-for, audio-utils) \
+ $(LOCAL_PATH)/include \
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
+
+LOCAL_SHARED_LIBRARIES := \
+ libaudiohal \
+ libaudioutils \
+ libcutils \
+ liblog \
+ libnbaio \
+ libsonic \
+ libutils \
+
+LOCAL_MODULE := libaudioprocessing
+
+LOCAL_CFLAGS := -Werror -Wall
+
+# uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
+#LOCAL_CFLAGS += -DUSE_NEON=false
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
new file mode 100644
index 0000000..ae1be09
--- /dev/null
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -0,0 +1,2090 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioMixer"
+//#define LOG_NDEBUG 0
+
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+#include <sys/types.h>
+
+#include <utils/Errors.h>
+#include <utils/Log.h>
+
+#include <cutils/compiler.h>
+#include <utils/Debug.h>
+
+#include <system/audio.h>
+
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <media/AudioMixer.h>
+
+#include "AudioMixerOps.h"
+
+// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer.
+#ifndef FCC_2
+#define FCC_2 2
+#endif
+
+// Look for MONO_HACK for any Mono hack involving legacy mono channel to
+// stereo channel conversion.
+
+/* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is
+ * being used. This is a considerable amount of log spam, so don't enable unless you
+ * are verifying the hook based code.
+ */
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+//define ALOGVV printf // for test-mixer.cpp
+#else
+#define ALOGVV(a...) do { } while (0)
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+// TODO: Move these macro/inlines to a header file.
+template <typename T>
+static inline
+T max(const T& x, const T& y) {
+ return x > y ? x : y;
+}
+
+// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
+// original code will be used for stereo sinks, the new mixer for multichannel.
+static const bool kUseNewMixer = true;
+
+// Set kUseFloat to true to allow floating input into the mixer engine.
+// If kUseNewMixer is false, this is ignored or may be overridden internally
+// because of downmix/upmix support.
+static const bool kUseFloat = true;
+
+// Set to default copy buffer size in frames for input processing.
+static const size_t kCopyBufferFrameCount = 256;
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+template <typename T>
+T min(const T& a, const T& b)
+{
+ return a < b ? a : b;
+}
+
+// ----------------------------------------------------------------------------
+
+// Ensure mConfiguredNames bitmask is initialized properly on all architectures.
+// The value of 1 << x is undefined in C when x >= 32.
+
+AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTracks)
+ : mTrackNames(0), mConfiguredNames((maxNumTracks >= 32 ? 0 : 1 << maxNumTracks) - 1),
+ mSampleRate(sampleRate)
+{
+ ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u",
+ maxNumTracks, MAX_NUM_TRACKS);
+
+ // AudioMixer is not yet capable of more than 32 active track inputs
+ ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS);
+
+ pthread_once(&sOnceControl, &sInitRoutine);
+
+ mState.enabledTracks= 0;
+ mState.needsChanged = 0;
+ mState.frameCount = frameCount;
+ mState.hook = process__nop;
+ mState.outputTemp = NULL;
+ mState.resampleTemp = NULL;
+ mState.mLog = &mDummyLog;
+ // mState.reserved
+
+ // FIXME Most of the following initialization is probably redundant since
+ // tracks[i] should only be referenced if (mTrackNames & (1 << i)) != 0
+ // and mTrackNames is initially 0. However, leave it here until that's verified.
+ track_t* t = mState.tracks;
+ for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
+ t->resampler = NULL;
+ t->downmixerBufferProvider = NULL;
+ t->mReformatBufferProvider = NULL;
+ t->mTimestretchBufferProvider = NULL;
+ t++;
+ }
+
+}
+
+AudioMixer::~AudioMixer()
+{
+ track_t* t = mState.tracks;
+ for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
+ delete t->resampler;
+ delete t->downmixerBufferProvider;
+ delete t->mReformatBufferProvider;
+ delete t->mTimestretchBufferProvider;
+ t++;
+ }
+ delete [] mState.outputTemp;
+ delete [] mState.resampleTemp;
+}
+
+void AudioMixer::setLog(NBLog::Writer *log)
+{
+ mState.mLog = log;
+}
+
+static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
+ return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+}
+
+int AudioMixer::getTrackName(audio_channel_mask_t channelMask,
+ audio_format_t format, int sessionId)
+{
+ if (!isValidPcmTrackFormat(format)) {
+ ALOGE("AudioMixer::getTrackName invalid format (%#x)", format);
+ return -1;
+ }
+ uint32_t names = (~mTrackNames) & mConfiguredNames;
+ if (names != 0) {
+ int n = __builtin_ctz(names);
+ ALOGV("add track (%d)", n);
+ // assume default parameters for the track, except where noted below
+ track_t* t = &mState.tracks[n];
+ t->needs = 0;
+
+ // Integer volume.
+ // Currently integer volume is kept for the legacy integer mixer.
+ // Will be removed when the legacy mixer path is removed.
+ t->volume[0] = UNITY_GAIN_INT;
+ t->volume[1] = UNITY_GAIN_INT;
+ t->prevVolume[0] = UNITY_GAIN_INT << 16;
+ t->prevVolume[1] = UNITY_GAIN_INT << 16;
+ t->volumeInc[0] = 0;
+ t->volumeInc[1] = 0;
+ t->auxLevel = 0;
+ t->auxInc = 0;
+ t->prevAuxLevel = 0;
+
+ // Floating point volume.
+ t->mVolume[0] = UNITY_GAIN_FLOAT;
+ t->mVolume[1] = UNITY_GAIN_FLOAT;
+ t->mPrevVolume[0] = UNITY_GAIN_FLOAT;
+ t->mPrevVolume[1] = UNITY_GAIN_FLOAT;
+ t->mVolumeInc[0] = 0.;
+ t->mVolumeInc[1] = 0.;
+ t->mAuxLevel = 0.;
+ t->mAuxInc = 0.;
+ t->mPrevAuxLevel = 0.;
+
+ // no initialization needed
+ // t->frameCount
+ t->channelCount = audio_channel_count_from_out_mask(channelMask);
+ t->enabled = false;
+ ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
+ "Non-stereo channel mask: %d\n", channelMask);
+ t->channelMask = channelMask;
+ t->sessionId = sessionId;
+ // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
+ t->bufferProvider = NULL;
+ t->buffer.raw = NULL;
+ // no initialization needed
+ // t->buffer.frameCount
+ t->hook = NULL;
+ t->in = NULL;
+ t->resampler = NULL;
+ t->sampleRate = mSampleRate;
+ // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
+ t->mainBuffer = NULL;
+ t->auxBuffer = NULL;
+ t->mInputBufferProvider = NULL;
+ t->mReformatBufferProvider = NULL;
+ t->downmixerBufferProvider = NULL;
+ t->mPostDownmixReformatBufferProvider = NULL;
+ t->mTimestretchBufferProvider = NULL;
+ t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
+ t->mFormat = format;
+ t->mMixerInFormat = selectMixerInFormat(format);
+ t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
+ t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
+ AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
+ t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
+ t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
+ // Check the downmixing (or upmixing) requirements.
+ status_t status = t->prepareForDownmix();
+ if (status != OK) {
+ ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
+ return -1;
+ }
+ // prepareForDownmix() may change mDownmixRequiresFormat
+ ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
+ t->prepareForReformat();
+ mTrackNames |= 1 << n;
+ return TRACK0 + n;
+ }
+ ALOGE("AudioMixer::getTrackName out of available tracks");
+ return -1;
+}
+
+void AudioMixer::invalidateState(uint32_t mask)
+{
+ if (mask != 0) {
+ mState.needsChanged |= mask;
+ mState.hook = process__validate;
+ }
+ }
+
+// Called when channel masks have changed for a track name
+// TODO: Fix DownmixerBufferProvider not to (possibly) change mixer input format,
+// which will simplify this logic.
+bool AudioMixer::setChannelMasks(int name,
+ audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
+ track_t &track = mState.tracks[name];
+
+ if (trackChannelMask == track.channelMask
+ && mixerChannelMask == track.mMixerChannelMask) {
+ return false; // no need to change
+ }
+ // always recompute for both channel masks even if only one has changed.
+ const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
+ const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
+ const bool mixerChannelCountChanged = track.mMixerChannelCount != mixerChannelCount;
+
+ ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX)
+ && trackChannelCount
+ && mixerChannelCount);
+ track.channelMask = trackChannelMask;
+ track.channelCount = trackChannelCount;
+ track.mMixerChannelMask = mixerChannelMask;
+ track.mMixerChannelCount = mixerChannelCount;
+
+ // channel masks have changed, does this track need a downmixer?
+ // update to try using our desired format (if we aren't already using it)
+ const audio_format_t prevDownmixerFormat = track.mDownmixRequiresFormat;
+ const status_t status = mState.tracks[name].prepareForDownmix();
+ ALOGE_IF(status != OK,
+ "prepareForDownmix error %d, track channel mask %#x, mixer channel mask %#x",
+ status, track.channelMask, track.mMixerChannelMask);
+
+ if (prevDownmixerFormat != track.mDownmixRequiresFormat) {
+ track.prepareForReformat(); // because of downmixer, track format may change!
+ }
+
+ if (track.resampler && mixerChannelCountChanged) {
+ // resampler channels may have changed.
+ const uint32_t resetToSampleRate = track.sampleRate;
+ delete track.resampler;
+ track.resampler = NULL;
+ track.sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
+ // recreate the resampler with updated format, channels, saved sampleRate.
+ track.setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
+ }
+ return true;
+}
+
+void AudioMixer::track_t::unprepareForDownmix() {
+ ALOGV("AudioMixer::unprepareForDownmix(%p)", this);
+
+ if (mPostDownmixReformatBufferProvider != nullptr) {
+ // release any buffers held by the mPostDownmixReformatBufferProvider
+ // before deallocating the downmixerBufferProvider.
+ mPostDownmixReformatBufferProvider->reset();
+ }
+
+ mDownmixRequiresFormat = AUDIO_FORMAT_INVALID;
+ if (downmixerBufferProvider != NULL) {
+ // this track had previously been configured with a downmixer, delete it
+ ALOGV(" deleting old downmixer");
+ delete downmixerBufferProvider;
+ downmixerBufferProvider = NULL;
+ reconfigureBufferProviders();
+ } else {
+ ALOGV(" nothing to do, no downmixer to delete");
+ }
+}
+
+status_t AudioMixer::track_t::prepareForDownmix()
+{
+ ALOGV("AudioMixer::prepareForDownmix(%p) with mask 0x%x",
+ this, channelMask);
+
+ // discard the previous downmixer if there was one
+ unprepareForDownmix();
+ // MONO_HACK Only remix (upmix or downmix) if the track and mixer/device channel masks
+ // are not the same and not handled internally, as mono -> stereo currently is.
+ if (channelMask == mMixerChannelMask
+ || (channelMask == AUDIO_CHANNEL_OUT_MONO
+ && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
+ return NO_ERROR;
+ }
+ // DownmixerBufferProvider is only used for position masks.
+ if (audio_channel_mask_get_representation(channelMask)
+ == AUDIO_CHANNEL_REPRESENTATION_POSITION
+ && DownmixerBufferProvider::isMultichannelCapable()) {
+ DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(channelMask,
+ mMixerChannelMask,
+ AUDIO_FORMAT_PCM_16_BIT /* TODO: use mMixerInFormat, now only PCM 16 */,
+ sampleRate, sessionId, kCopyBufferFrameCount);
+
+ if (pDbp->isValid()) { // if constructor completed properly
+ mDownmixRequiresFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
+ downmixerBufferProvider = pDbp;
+ reconfigureBufferProviders();
+ return NO_ERROR;
+ }
+ delete pDbp;
+ }
+
+ // Effect downmixer does not accept the channel conversion. Let's use our remixer.
+ RemixBufferProvider* pRbp = new RemixBufferProvider(channelMask,
+ mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount);
+ // Remix always finds a conversion whereas Downmixer effect above may fail.
+ downmixerBufferProvider = pRbp;
+ reconfigureBufferProviders();
+ return NO_ERROR;
+}
+
+void AudioMixer::track_t::unprepareForReformat() {
+ ALOGV("AudioMixer::unprepareForReformat(%p)", this);
+ bool requiresReconfigure = false;
+ if (mReformatBufferProvider != NULL) {
+ delete mReformatBufferProvider;
+ mReformatBufferProvider = NULL;
+ requiresReconfigure = true;
+ }
+ if (mPostDownmixReformatBufferProvider != NULL) {
+ delete mPostDownmixReformatBufferProvider;
+ mPostDownmixReformatBufferProvider = NULL;
+ requiresReconfigure = true;
+ }
+ if (requiresReconfigure) {
+ reconfigureBufferProviders();
+ }
+}
+
+status_t AudioMixer::track_t::prepareForReformat()
+{
+ ALOGV("AudioMixer::prepareForReformat(%p) with format %#x", this, mFormat);
+ // discard previous reformatters
+ unprepareForReformat();
+ // only configure reformatters as needed
+ const audio_format_t targetFormat = mDownmixRequiresFormat != AUDIO_FORMAT_INVALID
+ ? mDownmixRequiresFormat : mMixerInFormat;
+ bool requiresReconfigure = false;
+ if (mFormat != targetFormat) {
+ mReformatBufferProvider = new ReformatBufferProvider(
+ audio_channel_count_from_out_mask(channelMask),
+ mFormat,
+ targetFormat,
+ kCopyBufferFrameCount);
+ requiresReconfigure = true;
+ }
+ if (targetFormat != mMixerInFormat) {
+ mPostDownmixReformatBufferProvider = new ReformatBufferProvider(
+ audio_channel_count_from_out_mask(mMixerChannelMask),
+ targetFormat,
+ mMixerInFormat,
+ kCopyBufferFrameCount);
+ requiresReconfigure = true;
+ }
+ if (requiresReconfigure) {
+ reconfigureBufferProviders();
+ }
+ return NO_ERROR;
+}
+
+void AudioMixer::track_t::reconfigureBufferProviders()
+{
+ bufferProvider = mInputBufferProvider;
+ if (mReformatBufferProvider) {
+ mReformatBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mReformatBufferProvider;
+ }
+ if (downmixerBufferProvider) {
+ downmixerBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = downmixerBufferProvider;
+ }
+ if (mPostDownmixReformatBufferProvider) {
+ mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mPostDownmixReformatBufferProvider;
+ }
+ if (mTimestretchBufferProvider) {
+ mTimestretchBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mTimestretchBufferProvider;
+ }
+}
+
+void AudioMixer::deleteTrackName(int name)
+{
+ ALOGV("AudioMixer::deleteTrackName(%d)", name);
+ name -= TRACK0;
+ LOG_ALWAYS_FATAL_IF(name < 0 || name >= (int)MAX_NUM_TRACKS, "bad track name %d", name);
+ ALOGV("deleteTrackName(%d)", name);
+ track_t& track(mState.tracks[ name ]);
+ if (track.enabled) {
+ track.enabled = false;
+ invalidateState(1<<name);
+ }
+ // delete the resampler
+ delete track.resampler;
+ track.resampler = NULL;
+ // delete the downmixer
+ mState.tracks[name].unprepareForDownmix();
+ // delete the reformatter
+ mState.tracks[name].unprepareForReformat();
+ // delete the timestretch provider
+ delete track.mTimestretchBufferProvider;
+ track.mTimestretchBufferProvider = NULL;
+ mTrackNames &= ~(1<<name);
+}
+
+void AudioMixer::enable(int name)
+{
+ name -= TRACK0;
+ ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+ track_t& track = mState.tracks[name];
+
+ if (!track.enabled) {
+ track.enabled = true;
+ ALOGV("enable(%d)", name);
+ invalidateState(1 << name);
+ }
+}
+
+void AudioMixer::disable(int name)
+{
+ name -= TRACK0;
+ ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+ track_t& track = mState.tracks[name];
+
+ if (track.enabled) {
+ track.enabled = false;
+ ALOGV("disable(%d)", name);
+ invalidateState(1 << name);
+ }
+}
+
+/* Sets the volume ramp variables for the AudioMixer.
+ *
+ * The volume ramp variables are used to transition from the previous
+ * volume to the set volume. ramp controls the duration of the transition.
+ * Its value is typically one state framecount period, but may also be 0,
+ * meaning "immediate."
+ *
+ * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment
+ * even if there is a nonzero floating point increment (in that case, the volume
+ * change is immediate). This restriction should be changed when the legacy mixer
+ * is removed (see #2).
+ * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed
+ * when no longer needed.
+ *
+ * @param newVolume set volume target in floating point [0.0, 1.0].
+ * @param ramp number of frames to increment over. if ramp is 0, the volume
+ * should be set immediately. Currently ramp should not exceed 65535 (frames).
+ * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return.
+ * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return.
+ * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return.
+ * @param pSetVolume pointer to the float target volume, set on return.
+ * @param pPrevVolume pointer to the float previous volume, set on return.
+ * @param pVolumeInc pointer to the float increment per output audio frame, set on return.
+ * @return true if the volume has changed, false if volume is same.
+ */
+static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
+ int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
+ float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
+ // check floating point volume to see if it is identical to the previously
+ // set volume.
+ // We do not use a tolerance here (and reject changes too small)
+ // as it may be confusing to use a different value than the one set.
+ // If the resulting volume is too small to ramp, it is a direct set of the volume.
+ if (newVolume == *pSetVolume) {
+ return false;
+ }
+ if (newVolume < 0) {
+ newVolume = 0; // should not have negative volumes
+ } else {
+ switch (fpclassify(newVolume)) {
+ case FP_SUBNORMAL:
+ case FP_NAN:
+ newVolume = 0;
+ break;
+ case FP_ZERO:
+ break; // zero volume is fine
+ case FP_INFINITE:
+ // Infinite volume could be handled consistently since
+ // floating point math saturates at infinities,
+ // but we limit volume to unity gain float.
+ // ramp = 0; break;
+ //
+ newVolume = AudioMixer::UNITY_GAIN_FLOAT;
+ break;
+ case FP_NORMAL:
+ default:
+ // Floating point does not have problems with overflow wrap
+ // that integer has. However, we limit the volume to
+ // unity gain here.
+ // TODO: Revisit the volume limitation and perhaps parameterize.
+ if (newVolume > AudioMixer::UNITY_GAIN_FLOAT) {
+ newVolume = AudioMixer::UNITY_GAIN_FLOAT;
+ }
+ break;
+ }
+ }
+
+ // set floating point volume ramp
+ if (ramp != 0) {
+ // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
+ // is no computational mismatch; hence equality is checked here.
+ ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
+ " prev:%f set_to:%f", *pPrevVolume, *pSetVolume);
+ const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
+ const float maxv = max(newVolume, *pPrevVolume); // could be inf, cannot be nan, subnormal
+
+ if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
+ && maxv + inc != maxv) { // inc must make forward progress
+ *pVolumeInc = inc;
+ // ramp is set now.
+ // Note: if newVolume is 0, then near the end of the ramp,
+ // it may be possible that the ramped volume may be subnormal or
+ // temporarily negative by a small amount or subnormal due to floating
+ // point inaccuracies.
+ } else {
+ ramp = 0; // ramp not allowed
+ }
+ }
+
+ // compute and check integer volume, no need to check negative values
+ // The integer volume is limited to "unity_gain" to avoid wrapping and other
+ // audio artifacts, so it never reaches the range limit of U4.28.
+ // We safely use signed 16 and 32 bit integers here.
+ const float scaledVolume = newVolume * AudioMixer::UNITY_GAIN_INT; // not neg, subnormal, nan
+ const int32_t intVolume = (scaledVolume >= (float)AudioMixer::UNITY_GAIN_INT) ?
+ AudioMixer::UNITY_GAIN_INT : (int32_t)scaledVolume;
+
+ // set integer volume ramp
+ if (ramp != 0) {
+ // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
+ // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
+ // is no computational mismatch; hence equality is checked here.
+ ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
+ " prev:%d set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
+ const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
+
+ if (inc != 0) { // inc must make forward progress
+ *pIntVolumeInc = inc;
+ } else {
+ ramp = 0; // ramp not allowed
+ }
+ }
+
+ // if no ramp, or ramp not allowed, then clear float and integer increments
+ if (ramp == 0) {
+ *pVolumeInc = 0;
+ *pPrevVolume = newVolume;
+ *pIntVolumeInc = 0;
+ *pIntPrevVolume = intVolume << 16;
+ }
+ *pSetVolume = newVolume;
+ *pIntSetVolume = intVolume;
+ return true;
+}
+
+void AudioMixer::setParameter(int name, int target, int param, void *value)
+{
+ name -= TRACK0;
+ ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+ track_t& track = mState.tracks[name];
+
+ int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
+ int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
+
+ switch (target) {
+
+ case TRACK:
+ switch (param) {
+ case CHANNEL_MASK: {
+ const audio_channel_mask_t trackChannelMask =
+ static_cast<audio_channel_mask_t>(valueInt);
+ if (setChannelMasks(name, trackChannelMask, track.mMixerChannelMask)) {
+ ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
+ invalidateState(1 << name);
+ }
+ } break;
+ case MAIN_BUFFER:
+ if (track.mainBuffer != valueBuf) {
+ track.mainBuffer = valueBuf;
+ ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
+ invalidateState(1 << name);
+ }
+ break;
+ case AUX_BUFFER:
+ if (track.auxBuffer != valueBuf) {
+ track.auxBuffer = valueBuf;
+ ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
+ invalidateState(1 << name);
+ }
+ break;
+ case FORMAT: {
+ audio_format_t format = static_cast<audio_format_t>(valueInt);
+ if (track.mFormat != format) {
+ ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
+ track.mFormat = format;
+ ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
+ track.prepareForReformat();
+ invalidateState(1 << name);
+ }
+ } break;
+ // FIXME do we want to support setting the downmix type from AudioFlinger?
+ // for a specific track? or per mixer?
+ /* case DOWNMIX_TYPE:
+ break */
+ case MIXER_FORMAT: {
+ audio_format_t format = static_cast<audio_format_t>(valueInt);
+ if (track.mMixerFormat != format) {
+ track.mMixerFormat = format;
+ ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
+ }
+ } break;
+ case MIXER_CHANNEL_MASK: {
+ const audio_channel_mask_t mixerChannelMask =
+ static_cast<audio_channel_mask_t>(valueInt);
+ if (setChannelMasks(name, track.channelMask, mixerChannelMask)) {
+ ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
+ invalidateState(1 << name);
+ }
+ } break;
+ default:
+ LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
+ }
+ break;
+
+ case RESAMPLE:
+ switch (param) {
+ case SAMPLE_RATE:
+ ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
+ if (track.setResampler(uint32_t(valueInt), mSampleRate)) {
+ ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
+ uint32_t(valueInt));
+ invalidateState(1 << name);
+ }
+ break;
+ case RESET:
+ track.resetResampler();
+ invalidateState(1 << name);
+ break;
+ case REMOVE:
+ delete track.resampler;
+ track.resampler = NULL;
+ track.sampleRate = mSampleRate;
+ invalidateState(1 << name);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
+ }
+ break;
+
+ case RAMP_VOLUME:
+ case VOLUME:
+ switch (param) {
+ case AUXLEVEL:
+ if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+ target == RAMP_VOLUME ? mState.frameCount : 0,
+ &track.auxLevel, &track.prevAuxLevel, &track.auxInc,
+ &track.mAuxLevel, &track.mPrevAuxLevel, &track.mAuxInc)) {
+ ALOGV("setParameter(%s, AUXLEVEL: %04x)",
+ target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track.auxLevel);
+ invalidateState(1 << name);
+ }
+ break;
+ default:
+ if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
+ if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+ target == RAMP_VOLUME ? mState.frameCount : 0,
+ &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0],
+ &track.volumeInc[param - VOLUME0],
+ &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0],
+ &track.mVolumeInc[param - VOLUME0])) {
+ ALOGV("setParameter(%s, VOLUME%d: %04x)",
+ target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
+ track.volume[param - VOLUME0]);
+ invalidateState(1 << name);
+ }
+ } else {
+ LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
+ }
+ }
+ break;
+ case TIMESTRETCH:
+ switch (param) {
+ case PLAYBACK_RATE: {
+ const AudioPlaybackRate *playbackRate =
+ reinterpret_cast<AudioPlaybackRate*>(value);
+ ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
+ "bad parameters speed %f, pitch %f",playbackRate->mSpeed,
+ playbackRate->mPitch);
+ if (track.setPlaybackRate(*playbackRate)) {
+ ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
+ "%f %f %d %d",
+ playbackRate->mSpeed,
+ playbackRate->mPitch,
+ playbackRate->mStretchMode,
+ playbackRate->mFallbackMode);
+ // invalidateState(1 << name);
+ }
+ } break;
+ default:
+ LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
+ }
+ break;
+
+ default:
+ LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
+ }
+}
+
+bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
+{
+ if (trackSampleRate != devSampleRate || resampler != NULL) {
+ if (sampleRate != trackSampleRate) {
+ sampleRate = trackSampleRate;
+ if (resampler == NULL) {
+ ALOGV("Creating resampler from track %d Hz to device %d Hz",
+ trackSampleRate, devSampleRate);
+ AudioResampler::src_quality quality;
+ // force lowest quality level resampler if use case isn't music or video
+ // FIXME this is flawed for dynamic sample rates, as we choose the resampler
+ // quality level based on the initial ratio, but that could change later.
+ // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
+ if (isMusicRate(trackSampleRate)) {
+ quality = AudioResampler::DEFAULT_QUALITY;
+ } else {
+ quality = AudioResampler::DYN_LOW_QUALITY;
+ }
+
+ // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+ // but if none exists, it is the channel count (1 for mono).
+ const int resamplerChannelCount = downmixerBufferProvider != NULL
+ ? mMixerChannelCount : channelCount;
+ ALOGVV("Creating resampler:"
+ " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
+ mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
+ resampler = AudioResampler::create(
+ mMixerInFormat,
+ resamplerChannelCount,
+ devSampleRate, quality);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+bool AudioMixer::track_t::setPlaybackRate(const AudioPlaybackRate &playbackRate)
+{
+ if ((mTimestretchBufferProvider == NULL &&
+ fabs(playbackRate.mSpeed - mPlaybackRate.mSpeed) < AUDIO_TIMESTRETCH_SPEED_MIN_DELTA &&
+ fabs(playbackRate.mPitch - mPlaybackRate.mPitch) < AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) ||
+ isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
+ return false;
+ }
+ mPlaybackRate = playbackRate;
+ if (mTimestretchBufferProvider == NULL) {
+ // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+ // but if none exists, it is the channel count (1 for mono).
+ const int timestretchChannelCount = downmixerBufferProvider != NULL
+ ? mMixerChannelCount : channelCount;
+ mTimestretchBufferProvider = new TimestretchBufferProvider(timestretchChannelCount,
+ mMixerInFormat, sampleRate, playbackRate);
+ reconfigureBufferProviders();
+ } else {
+ reinterpret_cast<TimestretchBufferProvider*>(mTimestretchBufferProvider)
+ ->setPlaybackRate(playbackRate);
+ }
+ return true;
+}
+
+/* Checks to see if the volume ramp has completed and clears the increment
+ * variables appropriately.
+ *
+ * FIXME: There is code to handle int/float ramp variable switchover should it not
+ * complete within a mixer buffer processing call, but it is preferred to avoid switchover
+ * due to precision issues. The switchover code is included for legacy code purposes
+ * and can be removed once the integer volume is removed.
+ *
+ * It is not sufficient to clear only the volumeInc integer variable because
+ * if one channel requires ramping, all channels are ramped.
+ *
+ * There is a bit of duplicated code here, but it keeps backward compatibility.
+ */
+inline void AudioMixer::track_t::adjustVolumeRamp(bool aux, bool useFloat)
+{
+ if (useFloat) {
+ for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
+ if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
+ (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
+ volumeInc[i] = 0;
+ prevVolume[i] = volume[i] << 16;
+ mVolumeInc[i] = 0.;
+ mPrevVolume[i] = mVolume[i];
+ } else {
+ //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
+ prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
+ }
+ }
+ } else {
+ for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
+ if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
+ ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
+ volumeInc[i] = 0;
+ prevVolume[i] = volume[i] << 16;
+ mVolumeInc[i] = 0.;
+ mPrevVolume[i] = mVolume[i];
+ } else {
+ //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]);
+ mPrevVolume[i] = float_from_u4_28(prevVolume[i]);
+ }
+ }
+ }
+ /* TODO: aux is always integer regardless of output buffer type */
+ if (aux) {
+ if (((auxInc>0) && (((prevAuxLevel+auxInc)>>16) >= auxLevel)) ||
+ ((auxInc<0) && (((prevAuxLevel+auxInc)>>16) <= auxLevel))) {
+ auxInc = 0;
+ prevAuxLevel = auxLevel << 16;
+ mAuxInc = 0.;
+ mPrevAuxLevel = mAuxLevel;
+ } else {
+ //ALOGV("aux ramp: %d %d %d", auxLevel << 16, prevAuxLevel, auxInc);
+ }
+ }
+}
+
+size_t AudioMixer::getUnreleasedFrames(int name) const
+{
+ name -= TRACK0;
+ if (uint32_t(name) < MAX_NUM_TRACKS) {
+ return mState.tracks[name].getUnreleasedFrames();
+ }
+ return 0;
+}
+
+void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
+{
+ name -= TRACK0;
+ ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+
+ if (mState.tracks[name].mInputBufferProvider == bufferProvider) {
+ return; // don't reset any buffer providers if identical.
+ }
+ if (mState.tracks[name].mReformatBufferProvider != NULL) {
+ mState.tracks[name].mReformatBufferProvider->reset();
+ } else if (mState.tracks[name].downmixerBufferProvider != NULL) {
+ mState.tracks[name].downmixerBufferProvider->reset();
+ } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) {
+ mState.tracks[name].mPostDownmixReformatBufferProvider->reset();
+ } else if (mState.tracks[name].mTimestretchBufferProvider != NULL) {
+ mState.tracks[name].mTimestretchBufferProvider->reset();
+ }
+
+ mState.tracks[name].mInputBufferProvider = bufferProvider;
+ mState.tracks[name].reconfigureBufferProviders();
+}
+
+
+void AudioMixer::process()
+{
+ mState.hook(&mState);
+}
+
+
+void AudioMixer::process__validate(state_t* state)
+{
+ ALOGW_IF(!state->needsChanged,
+ "in process__validate() but nothing's invalid");
+
+ uint32_t changed = state->needsChanged;
+ state->needsChanged = 0; // clear the validation flag
+
+ // recompute which tracks are enabled / disabled
+ uint32_t enabled = 0;
+ uint32_t disabled = 0;
+ while (changed) {
+ const int i = 31 - __builtin_clz(changed);
+ const uint32_t mask = 1<<i;
+ changed &= ~mask;
+ track_t& t = state->tracks[i];
+ (t.enabled ? enabled : disabled) |= mask;
+ }
+ state->enabledTracks &= ~disabled;
+ state->enabledTracks |= enabled;
+
+ // compute everything we need...
+ int countActiveTracks = 0;
+ // TODO: fix all16BitsStereNoResample logic to
+ // either properly handle muted tracks (it should ignore them)
+ // or remove altogether as an obsolete optimization.
+ bool all16BitsStereoNoResample = true;
+ bool resampling = false;
+ bool volumeRamp = false;
+ uint32_t en = state->enabledTracks;
+ while (en) {
+ const int i = 31 - __builtin_clz(en);
+ en &= ~(1<<i);
+
+ countActiveTracks++;
+ track_t& t = state->tracks[i];
+ uint32_t n = 0;
+ // FIXME can overflow (mask is only 3 bits)
+ n |= NEEDS_CHANNEL_1 + t.channelCount - 1;
+ if (t.doesResample()) {
+ n |= NEEDS_RESAMPLE;
+ }
+ if (t.auxLevel != 0 && t.auxBuffer != NULL) {
+ n |= NEEDS_AUX;
+ }
+
+ if (t.volumeInc[0]|t.volumeInc[1]) {
+ volumeRamp = true;
+ } else if (!t.doesResample() && t.volumeRL == 0) {
+ n |= NEEDS_MUTE;
+ }
+ t.needs = n;
+
+ if (n & NEEDS_MUTE) {
+ t.hook = track__nop;
+ } else {
+ if (n & NEEDS_AUX) {
+ all16BitsStereoNoResample = false;
+ }
+ if (n & NEEDS_RESAMPLE) {
+ all16BitsStereoNoResample = false;
+ resampling = true;
+ t.hook = getTrackHook(TRACKTYPE_RESAMPLE, t.mMixerChannelCount,
+ t.mMixerInFormat, t.mMixerFormat);
+ ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
+ "Track %d needs downmix + resample", i);
+ } else {
+ if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
+ t.hook = getTrackHook(
+ (t.mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO // TODO: MONO_HACK
+ && t.channelMask == AUDIO_CHANNEL_OUT_MONO)
+ ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
+ t.mMixerChannelCount,
+ t.mMixerInFormat, t.mMixerFormat);
+ all16BitsStereoNoResample = false;
+ }
+ if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
+ t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, t.mMixerChannelCount,
+ t.mMixerInFormat, t.mMixerFormat);
+ ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
+ "Track %d needs downmix", i);
+ }
+ }
+ }
+ }
+
+ // select the processing hooks
+ state->hook = process__nop;
+ if (countActiveTracks > 0) {
+ if (resampling) {
+ if (!state->outputTemp) {
+ state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
+ }
+ if (!state->resampleTemp) {
+ state->resampleTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
+ }
+ state->hook = process__genericResampling;
+ } else {
+ if (state->outputTemp) {
+ delete [] state->outputTemp;
+ state->outputTemp = NULL;
+ }
+ if (state->resampleTemp) {
+ delete [] state->resampleTemp;
+ state->resampleTemp = NULL;
+ }
+ state->hook = process__genericNoResampling;
+ if (all16BitsStereoNoResample && !volumeRamp) {
+ if (countActiveTracks == 1) {
+ const int i = 31 - __builtin_clz(state->enabledTracks);
+ track_t& t = state->tracks[i];
+ if ((t.needs & NEEDS_MUTE) == 0) {
+ // The check prevents a muted track from acquiring a process hook.
+ //
+ // This is dangerous if the track is MONO as that requires
+ // special case handling due to implicit channel duplication.
+ // Stereo or Multichannel should actually be fine here.
+ state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+ t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
+ }
+ }
+ }
+ }
+ }
+
+ ALOGV("mixer configuration change: %d activeTracks (%08x) "
+ "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
+ countActiveTracks, state->enabledTracks,
+ all16BitsStereoNoResample, resampling, volumeRamp);
+
+ state->hook(state);
+
+ // Now that the volume ramp has been done, set optimal state and
+ // track hooks for subsequent mixer process
+ if (countActiveTracks > 0) {
+ bool allMuted = true;
+ uint32_t en = state->enabledTracks;
+ while (en) {
+ const int i = 31 - __builtin_clz(en);
+ en &= ~(1<<i);
+ track_t& t = state->tracks[i];
+ if (!t.doesResample() && t.volumeRL == 0) {
+ t.needs |= NEEDS_MUTE;
+ t.hook = track__nop;
+ } else {
+ allMuted = false;
+ }
+ }
+ if (allMuted) {
+ state->hook = process__nop;
+ } else if (all16BitsStereoNoResample) {
+ if (countActiveTracks == 1) {
+ const int i = 31 - __builtin_clz(state->enabledTracks);
+ track_t& t = state->tracks[i];
+ // Muted single tracks handled by allMuted above.
+ state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+ t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
+ }
+ }
+ }
+}
+
+
+void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount,
+ int32_t* temp, int32_t* aux)
+{
+ ALOGVV("track__genericResample\n");
+ t->resampler->setSampleRate(t->sampleRate);
+
+ // ramp gain - resample to temp buffer and scale/mix in 2nd step
+ if (aux != NULL) {
+ // always resample with unity gain when sending to auxiliary buffer to be able
+ // to apply send level after resampling
+ t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(int32_t));
+ t->resampler->resample(temp, outFrameCount, t->bufferProvider);
+ if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
+ volumeRampStereo(t, out, outFrameCount, temp, aux);
+ } else {
+ volumeStereo(t, out, outFrameCount, temp, aux);
+ }
+ } else {
+ if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
+ t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
+ t->resampler->resample(temp, outFrameCount, t->bufferProvider);
+ volumeRampStereo(t, out, outFrameCount, temp, aux);
+ }
+
+ // constant gain
+ else {
+ t->resampler->setVolume(t->mVolume[0], t->mVolume[1]);
+ t->resampler->resample(out, outFrameCount, t->bufferProvider);
+ }
+ }
+}
+
+void AudioMixer::track__nop(track_t* t __unused, int32_t* out __unused,
+ size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
+{
+}
+
+void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
+{
+ int32_t vl = t->prevVolume[0];
+ int32_t vr = t->prevVolume[1];
+ const int32_t vlInc = t->volumeInc[0];
+ const int32_t vrInc = t->volumeInc[1];
+
+ //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ // ramp volume
+ if (CC_UNLIKELY(aux != NULL)) {
+ int32_t va = t->prevAuxLevel;
+ const int32_t vaInc = t->auxInc;
+ int32_t l;
+ int32_t r;
+
+ do {
+ l = (*temp++ >> 12);
+ r = (*temp++ >> 12);
+ *out++ += (vl >> 16) * l;
+ *out++ += (vr >> 16) * r;
+ *aux++ += (va >> 17) * (l + r);
+ vl += vlInc;
+ vr += vrInc;
+ va += vaInc;
+ } while (--frameCount);
+ t->prevAuxLevel = va;
+ } else {
+ do {
+ *out++ += (vl >> 16) * (*temp++ >> 12);
+ *out++ += (vr >> 16) * (*temp++ >> 12);
+ vl += vlInc;
+ vr += vrInc;
+ } while (--frameCount);
+ }
+ t->prevVolume[0] = vl;
+ t->prevVolume[1] = vr;
+ t->adjustVolumeRamp(aux != NULL);
+}
+
+void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
+ int32_t* aux)
+{
+ const int16_t vl = t->volume[0];
+ const int16_t vr = t->volume[1];
+
+ if (CC_UNLIKELY(aux != NULL)) {
+ const int16_t va = t->auxLevel;
+ do {
+ int16_t l = (int16_t)(*temp++ >> 12);
+ int16_t r = (int16_t)(*temp++ >> 12);
+ out[0] = mulAdd(l, vl, out[0]);
+ int16_t a = (int16_t)(((int32_t)l + r) >> 1);
+ out[1] = mulAdd(r, vr, out[1]);
+ out += 2;
+ aux[0] = mulAdd(a, va, aux[0]);
+ aux++;
+ } while (--frameCount);
+ } else {
+ do {
+ int16_t l = (int16_t)(*temp++ >> 12);
+ int16_t r = (int16_t)(*temp++ >> 12);
+ out[0] = mulAdd(l, vl, out[0]);
+ out[1] = mulAdd(r, vr, out[1]);
+ out += 2;
+ } while (--frameCount);
+ }
+}
+
+void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount,
+ int32_t* temp __unused, int32_t* aux)
+{
+ ALOGVV("track__16BitsStereo\n");
+ const int16_t *in = static_cast<const int16_t *>(t->in);
+
+ if (CC_UNLIKELY(aux != NULL)) {
+ int32_t l;
+ int32_t r;
+ // ramp gain
+ if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
+ int32_t vl = t->prevVolume[0];
+ int32_t vr = t->prevVolume[1];
+ int32_t va = t->prevAuxLevel;
+ const int32_t vlInc = t->volumeInc[0];
+ const int32_t vrInc = t->volumeInc[1];
+ const int32_t vaInc = t->auxInc;
+ // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ do {
+ l = (int32_t)*in++;
+ r = (int32_t)*in++;
+ *out++ += (vl >> 16) * l;
+ *out++ += (vr >> 16) * r;
+ *aux++ += (va >> 17) * (l + r);
+ vl += vlInc;
+ vr += vrInc;
+ va += vaInc;
+ } while (--frameCount);
+
+ t->prevVolume[0] = vl;
+ t->prevVolume[1] = vr;
+ t->prevAuxLevel = va;
+ t->adjustVolumeRamp(true);
+ }
+
+ // constant gain
+ else {
+ const uint32_t vrl = t->volumeRL;
+ const int16_t va = (int16_t)t->auxLevel;
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
+ in += 2;
+ out[0] = mulAddRL(1, rl, vrl, out[0]);
+ out[1] = mulAddRL(0, rl, vrl, out[1]);
+ out += 2;
+ aux[0] = mulAdd(a, va, aux[0]);
+ aux++;
+ } while (--frameCount);
+ }
+ } else {
+ // ramp gain
+ if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
+ int32_t vl = t->prevVolume[0];
+ int32_t vr = t->prevVolume[1];
+ const int32_t vlInc = t->volumeInc[0];
+ const int32_t vrInc = t->volumeInc[1];
+
+ // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ do {
+ *out++ += (vl >> 16) * (int32_t) *in++;
+ *out++ += (vr >> 16) * (int32_t) *in++;
+ vl += vlInc;
+ vr += vrInc;
+ } while (--frameCount);
+
+ t->prevVolume[0] = vl;
+ t->prevVolume[1] = vr;
+ t->adjustVolumeRamp(false);
+ }
+
+ // constant gain
+ else {
+ const uint32_t vrl = t->volumeRL;
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ in += 2;
+ out[0] = mulAddRL(1, rl, vrl, out[0]);
+ out[1] = mulAddRL(0, rl, vrl, out[1]);
+ out += 2;
+ } while (--frameCount);
+ }
+ }
+ t->in = in;
+}
+
+void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount,
+ int32_t* temp __unused, int32_t* aux)
+{
+ ALOGVV("track__16BitsMono\n");
+ const int16_t *in = static_cast<int16_t const *>(t->in);
+
+ if (CC_UNLIKELY(aux != NULL)) {
+ // ramp gain
+ if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
+ int32_t vl = t->prevVolume[0];
+ int32_t vr = t->prevVolume[1];
+ int32_t va = t->prevAuxLevel;
+ const int32_t vlInc = t->volumeInc[0];
+ const int32_t vrInc = t->volumeInc[1];
+ const int32_t vaInc = t->auxInc;
+
+ // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ do {
+ int32_t l = *in++;
+ *out++ += (vl >> 16) * l;
+ *out++ += (vr >> 16) * l;
+ *aux++ += (va >> 16) * l;
+ vl += vlInc;
+ vr += vrInc;
+ va += vaInc;
+ } while (--frameCount);
+
+ t->prevVolume[0] = vl;
+ t->prevVolume[1] = vr;
+ t->prevAuxLevel = va;
+ t->adjustVolumeRamp(true);
+ }
+ // constant gain
+ else {
+ const int16_t vl = t->volume[0];
+ const int16_t vr = t->volume[1];
+ const int16_t va = (int16_t)t->auxLevel;
+ do {
+ int16_t l = *in++;
+ out[0] = mulAdd(l, vl, out[0]);
+ out[1] = mulAdd(l, vr, out[1]);
+ out += 2;
+ aux[0] = mulAdd(l, va, aux[0]);
+ aux++;
+ } while (--frameCount);
+ }
+ } else {
+ // ramp gain
+ if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
+ int32_t vl = t->prevVolume[0];
+ int32_t vr = t->prevVolume[1];
+ const int32_t vlInc = t->volumeInc[0];
+ const int32_t vrInc = t->volumeInc[1];
+
+ // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+ // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
+ // (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+ do {
+ int32_t l = *in++;
+ *out++ += (vl >> 16) * l;
+ *out++ += (vr >> 16) * l;
+ vl += vlInc;
+ vr += vrInc;
+ } while (--frameCount);
+
+ t->prevVolume[0] = vl;
+ t->prevVolume[1] = vr;
+ t->adjustVolumeRamp(false);
+ }
+ // constant gain
+ else {
+ const int16_t vl = t->volume[0];
+ const int16_t vr = t->volume[1];
+ do {
+ int16_t l = *in++;
+ out[0] = mulAdd(l, vl, out[0]);
+ out[1] = mulAdd(l, vr, out[1]);
+ out += 2;
+ } while (--frameCount);
+ }
+ }
+ t->in = in;
+}
+
+// no-op case
+void AudioMixer::process__nop(state_t* state)
+{
+ ALOGVV("process__nop\n");
+ uint32_t e0 = state->enabledTracks;
+ while (e0) {
+ // process by group of tracks with same output buffer to
+ // avoid multiple memset() on same buffer
+ uint32_t e1 = e0, e2 = e0;
+ int i = 31 - __builtin_clz(e1);
+ {
+ track_t& t1 = state->tracks[i];
+ e2 &= ~(1<<i);
+ while (e2) {
+ i = 31 - __builtin_clz(e2);
+ e2 &= ~(1<<i);
+ track_t& t2 = state->tracks[i];
+ if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
+ e1 &= ~(1<<i);
+ }
+ }
+ e0 &= ~(e1);
+
+ memset(t1.mainBuffer, 0, state->frameCount * t1.mMixerChannelCount
+ * audio_bytes_per_sample(t1.mMixerFormat));
+ }
+
+ while (e1) {
+ i = 31 - __builtin_clz(e1);
+ e1 &= ~(1<<i);
+ {
+ track_t& t3 = state->tracks[i];
+ size_t outFrames = state->frameCount;
+ while (outFrames) {
+ t3.buffer.frameCount = outFrames;
+ t3.bufferProvider->getNextBuffer(&t3.buffer);
+ if (t3.buffer.raw == NULL) break;
+ outFrames -= t3.buffer.frameCount;
+ t3.bufferProvider->releaseBuffer(&t3.buffer);
+ }
+ }
+ }
+ }
+}
+
+// generic code without resampling
+void AudioMixer::process__genericNoResampling(state_t* state)
+{
+ ALOGVV("process__genericNoResampling\n");
+ int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
+
+ // acquire each track's buffer
+ uint32_t enabledTracks = state->enabledTracks;
+ uint32_t e0 = enabledTracks;
+ while (e0) {
+ const int i = 31 - __builtin_clz(e0);
+ e0 &= ~(1<<i);
+ track_t& t = state->tracks[i];
+ t.buffer.frameCount = state->frameCount;
+ t.bufferProvider->getNextBuffer(&t.buffer);
+ t.frameCount = t.buffer.frameCount;
+ t.in = t.buffer.raw;
+ }
+
+ e0 = enabledTracks;
+ while (e0) {
+ // process by group of tracks with same output buffer to
+ // optimize cache use
+ uint32_t e1 = e0, e2 = e0;
+ int j = 31 - __builtin_clz(e1);
+ track_t& t1 = state->tracks[j];
+ e2 &= ~(1<<j);
+ while (e2) {
+ j = 31 - __builtin_clz(e2);
+ e2 &= ~(1<<j);
+ track_t& t2 = state->tracks[j];
+ if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
+ e1 &= ~(1<<j);
+ }
+ }
+ e0 &= ~(e1);
+ // this assumes output 16 bits stereo, no resampling
+ int32_t *out = t1.mainBuffer;
+ size_t numFrames = 0;
+ do {
+ memset(outTemp, 0, sizeof(outTemp));
+ e2 = e1;
+ while (e2) {
+ const int i = 31 - __builtin_clz(e2);
+ e2 &= ~(1<<i);
+ track_t& t = state->tracks[i];
+ size_t outFrames = BLOCKSIZE;
+ int32_t *aux = NULL;
+ if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
+ aux = t.auxBuffer + numFrames;
+ }
+ while (outFrames) {
+ // t.in == NULL can happen if the track was flushed just after having
+ // been enabled for mixing.
+ if (t.in == NULL) {
+ enabledTracks &= ~(1<<i);
+ e1 &= ~(1<<i);
+ break;
+ }
+ size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
+ if (inFrames > 0) {
+ t.hook(&t, outTemp + (BLOCKSIZE - outFrames) * t.mMixerChannelCount,
+ inFrames, state->resampleTemp, aux);
+ t.frameCount -= inFrames;
+ outFrames -= inFrames;
+ if (CC_UNLIKELY(aux != NULL)) {
+ aux += inFrames;
+ }
+ }
+ if (t.frameCount == 0 && outFrames) {
+ t.bufferProvider->releaseBuffer(&t.buffer);
+ t.buffer.frameCount = (state->frameCount - numFrames) -
+ (BLOCKSIZE - outFrames);
+ t.bufferProvider->getNextBuffer(&t.buffer);
+ t.in = t.buffer.raw;
+ if (t.in == NULL) {
+ enabledTracks &= ~(1<<i);
+ e1 &= ~(1<<i);
+ break;
+ }
+ t.frameCount = t.buffer.frameCount;
+ }
+ }
+ }
+
+ convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat,
+ BLOCKSIZE * t1.mMixerChannelCount);
+ // TODO: fix ugly casting due to choice of out pointer type
+ out = reinterpret_cast<int32_t*>((uint8_t*)out
+ + BLOCKSIZE * t1.mMixerChannelCount
+ * audio_bytes_per_sample(t1.mMixerFormat));
+ numFrames += BLOCKSIZE;
+ } while (numFrames < state->frameCount);
+ }
+
+ // release each track's buffer
+ e0 = enabledTracks;
+ while (e0) {
+ const int i = 31 - __builtin_clz(e0);
+ e0 &= ~(1<<i);
+ track_t& t = state->tracks[i];
+ t.bufferProvider->releaseBuffer(&t.buffer);
+ }
+}
+
+
+// generic code with resampling
+void AudioMixer::process__genericResampling(state_t* state)
+{
+ ALOGVV("process__genericResampling\n");
+ // this const just means that local variable outTemp doesn't change
+ int32_t* const outTemp = state->outputTemp;
+ size_t numFrames = state->frameCount;
+
+ uint32_t e0 = state->enabledTracks;
+ while (e0) {
+ // process by group of tracks with same output buffer
+ // to optimize cache use
+ uint32_t e1 = e0, e2 = e0;
+ int j = 31 - __builtin_clz(e1);
+ track_t& t1 = state->tracks[j];
+ e2 &= ~(1<<j);
+ while (e2) {
+ j = 31 - __builtin_clz(e2);
+ e2 &= ~(1<<j);
+ track_t& t2 = state->tracks[j];
+ if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
+ e1 &= ~(1<<j);
+ }
+ }
+ e0 &= ~(e1);
+ int32_t *out = t1.mainBuffer;
+ memset(outTemp, 0, sizeof(*outTemp) * t1.mMixerChannelCount * state->frameCount);
+ while (e1) {
+ const int i = 31 - __builtin_clz(e1);
+ e1 &= ~(1<<i);
+ track_t& t = state->tracks[i];
+ int32_t *aux = NULL;
+ if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
+ aux = t.auxBuffer;
+ }
+
+ // this is a little goofy, on the resampling case we don't
+ // acquire/release the buffers because it's done by
+ // the resampler.
+ if (t.needs & NEEDS_RESAMPLE) {
+ t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
+ } else {
+
+ size_t outFrames = 0;
+
+ while (outFrames < numFrames) {
+ t.buffer.frameCount = numFrames - outFrames;
+ t.bufferProvider->getNextBuffer(&t.buffer);
+ t.in = t.buffer.raw;
+ // t.in == NULL can happen if the track was flushed just after having
+ // been enabled for mixing.
+ if (t.in == NULL) break;
+
+ if (CC_UNLIKELY(aux != NULL)) {
+ aux += outFrames;
+ }
+ t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount,
+ state->resampleTemp, aux);
+ outFrames += t.buffer.frameCount;
+ t.bufferProvider->releaseBuffer(&t.buffer);
+ }
+ }
+ }
+ convertMixerFormat(out, t1.mMixerFormat,
+ outTemp, t1.mMixerInFormat, numFrames * t1.mMixerChannelCount);
+ }
+}
+
+// one track, 16 bits stereo without resampling is the most common case
+void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
+{
+ ALOGVV("process__OneTrack16BitsStereoNoResampling\n");
+ // This method is only called when state->enabledTracks has exactly
+ // one bit set. The asserts below would verify this, but are commented out
+ // since the whole point of this method is to optimize performance.
+ //ALOG_ASSERT(0 != state->enabledTracks, "no tracks enabled");
+ const int i = 31 - __builtin_clz(state->enabledTracks);
+ //ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
+ const track_t& t = state->tracks[i];
+
+ AudioBufferProvider::Buffer& b(t.buffer);
+
+ int32_t* out = t.mainBuffer;
+ float *fout = reinterpret_cast<float*>(out);
+ size_t numFrames = state->frameCount;
+
+ const int16_t vl = t.volume[0];
+ const int16_t vr = t.volume[1];
+ const uint32_t vrl = t.volumeRL;
+ while (numFrames) {
+ b.frameCount = numFrames;
+ t.bufferProvider->getNextBuffer(&b);
+ const int16_t *in = b.i16;
+
+ // in == NULL can happen if the track was flushed just after having
+ // been enabled for mixing.
+ if (in == NULL || (((uintptr_t)in) & 3)) {
+ if ( AUDIO_FORMAT_PCM_FLOAT == t.mMixerFormat ) {
+ memset((char*)fout, 0, numFrames
+ * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
+ } else {
+ memset((char*)out, 0, numFrames
+ * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
+ }
+ ALOGE_IF((((uintptr_t)in) & 3),
+ "process__OneTrack16BitsStereoNoResampling: misaligned buffer"
+ " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
+ in, i, t.channelCount, t.needs, vrl, t.mVolume[0], t.mVolume[1]);
+ return;
+ }
+ size_t outFrames = b.frameCount;
+
+ switch (t.mMixerFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ in += 2;
+ int32_t l = mulRL(1, rl, vrl);
+ int32_t r = mulRL(0, rl, vrl);
+ *fout++ = float_from_q4_27(l);
+ *fout++ = float_from_q4_27(r);
+ // Note: In case of later int16_t sink output,
+ // conversion and clamping is done by memcpy_to_i16_from_float().
+ } while (--outFrames);
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
+ // volume is boosted, so we might need to clamp even though
+ // we process only one track.
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ in += 2;
+ int32_t l = mulRL(1, rl, vrl) >> 12;
+ int32_t r = mulRL(0, rl, vrl) >> 12;
+ // clamping...
+ l = clamp16(l);
+ r = clamp16(r);
+ *out++ = (r<<16) | (l & 0xFFFF);
+ } while (--outFrames);
+ } else {
+ do {
+ uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+ in += 2;
+ int32_t l = mulRL(1, rl, vrl) >> 12;
+ int32_t r = mulRL(0, rl, vrl) >> 12;
+ *out++ = (r<<16) | (l & 0xFFFF);
+ } while (--outFrames);
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixer format: %d", t.mMixerFormat);
+ }
+ numFrames -= b.frameCount;
+ t.bufferProvider->releaseBuffer(&b);
+ }
+}
+
+/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT;
+
+/*static*/ void AudioMixer::sInitRoutine()
+{
+ DownmixerBufferProvider::init(); // for the downmixer
+}
+
+/* TODO: consider whether this level of optimization is necessary.
+ * Perhaps just stick with a single for loop.
+ */
+
+// Needs to derive a compile time constant (constexpr). Could be targeted to go
+// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
+#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
+ (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
+
+/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE,
+ typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
+ const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+{
+ switch (channels) {
+ case 1:
+ volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 2:
+ volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 3:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 4:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 5:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 6:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 7:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ case 8:
+ volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
+ frameCount, in, aux, vol, volinc, vola, volainc);
+ break;
+ }
+}
+
+/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE,
+ typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
+ const TI* in, TA* aux, const TV *vol, TAV vola)
+{
+ switch (channels) {
+ case 1:
+ volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 2:
+ volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 3:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 4:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 5:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 6:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 7:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
+ break;
+ case 8:
+ volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
+ break;
+ }
+}
+
+/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+ typename TO, typename TI, typename TA>
+void AudioMixer::volumeMix(TO *out, size_t outFrames,
+ const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t)
+{
+ if (USEFLOATVOL) {
+ if (ramp) {
+ volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
+ t->mPrevVolume, t->mVolumeInc, &t->prevAuxLevel, t->auxInc);
+ if (ADJUSTVOL) {
+ t->adjustVolumeRamp(aux != NULL, true);
+ }
+ } else {
+ volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
+ t->mVolume, t->auxLevel);
+ }
+ } else {
+ if (ramp) {
+ volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
+ t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc);
+ if (ADJUSTVOL) {
+ t->adjustVolumeRamp(aux != NULL);
+ }
+ } else {
+ volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
+ t->volume, t->auxLevel);
+ }
+ }
+}
+
+/* This process hook is called when there is a single track without
+ * aux buffer, volume ramp, or resampling.
+ * TODO: Update the hook selection: this can properly handle aux and ramp.
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixer::process_NoResampleOneTrack(state_t* state)
+{
+ ALOGVV("process_NoResampleOneTrack\n");
+ // CLZ is faster than CTZ on ARM, though really not sure if true after 31 - clz.
+ const int i = 31 - __builtin_clz(state->enabledTracks);
+ ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
+ track_t *t = &state->tracks[i];
+ const uint32_t channels = t->mMixerChannelCount;
+ TO* out = reinterpret_cast<TO*>(t->mainBuffer);
+ TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
+ const bool ramp = t->needsRamp();
+
+ for (size_t numFrames = state->frameCount; numFrames; ) {
+ AudioBufferProvider::Buffer& b(t->buffer);
+ // get input buffer
+ b.frameCount = numFrames;
+ t->bufferProvider->getNextBuffer(&b);
+ const TI *in = reinterpret_cast<TI*>(b.raw);
+
+ // in == NULL can happen if the track was flushed just after having
+ // been enabled for mixing.
+ if (in == NULL || (((uintptr_t)in) & 3)) {
+ memset(out, 0, numFrames
+ * channels * audio_bytes_per_sample(t->mMixerFormat));
+ ALOGE_IF((((uintptr_t)in) & 3), "process_NoResampleOneTrack: bus error: "
+ "buffer %p track %p, channels %d, needs %#x",
+ in, t, t->channelCount, t->needs);
+ return;
+ }
+
+ const size_t outFrames = b.frameCount;
+ volumeMix<MIXTYPE, is_same<TI, float>::value, false> (
+ out, outFrames, in, aux, ramp, t);
+
+ out += outFrames * channels;
+ if (aux != NULL) {
+ aux += channels;
+ }
+ numFrames -= b.frameCount;
+
+ // release buffer
+ t->bufferProvider->releaseBuffer(&b);
+ }
+ if (ramp) {
+ t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value);
+ }
+}
+
+/* This track hook is called to do resampling then mixing,
+ * pulling from the track's upstream AudioBufferProvider.
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixer::track__Resample(track_t* t, TO* out, size_t outFrameCount, TO* temp, TA* aux)
+{
+ ALOGVV("track__Resample\n");
+ t->resampler->setSampleRate(t->sampleRate);
+ const bool ramp = t->needsRamp();
+ if (ramp || aux != NULL) {
+ // if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step.
+ // if aux != NULL: resample with unity gain to temp buffer then apply send level.
+
+ t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+ memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(TO));
+ t->resampler->resample((int32_t*)temp, outFrameCount, t->bufferProvider);
+
+ volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
+ out, outFrameCount, temp, aux, ramp, t);
+
+ } else { // constant volume gain
+ t->resampler->setVolume(t->mVolume[0], t->mVolume[1]);
+ t->resampler->resample((int32_t*)out, outFrameCount, t->bufferProvider);
+ }
+}
+
+/* This track hook is called to mix a track, when no resampling is required.
+ * The input buffer should be present in t->in.
+ *
+ * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixer::track__NoResample(track_t* t, TO* out, size_t frameCount,
+ TO* temp __unused, TA* aux)
+{
+ ALOGVV("track__NoResample\n");
+ const TI *in = static_cast<const TI *>(t->in);
+
+ volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
+ out, frameCount, in, aux, t->needsRamp(), t);
+
+ // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
+ // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
+ in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * t->mMixerChannelCount;
+ t->in = in;
+}
+
+/* The Mixer engine generates either int32_t (Q4_27) or float data.
+ * We use this function to convert the engine buffers
+ * to the desired mixer output format, either int16_t (Q.15) or float.
+ */
+void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
+ void *in, audio_format_t mixerInFormat, size_t sampleCount)
+{
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ switch (mixerOutFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+ break;
+ }
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ switch (mixerOutFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ memcpy_to_float_from_q4_27((float*)out, (int32_t*)in, sampleCount);
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ // two int16_t are produced per iteration
+ ditherAndClamp((int32_t*)out, (int32_t*)in, sampleCount >> 1);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+ break;
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+}
+
+/* Returns the proper track hook to use for mixing the track into the output buffer.
+ */
+AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
+{
+ if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+ switch (trackType) {
+ case TRACKTYPE_NOP:
+ return track__nop;
+ case TRACKTYPE_RESAMPLE:
+ return track__genericResample;
+ case TRACKTYPE_NORESAMPLEMONO:
+ return track__16BitsMono;
+ case TRACKTYPE_NORESAMPLE:
+ return track__16BitsStereo;
+ default:
+ LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
+ break;
+ }
+ }
+ LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
+ switch (trackType) {
+ case TRACKTYPE_NOP:
+ return track__nop;
+ case TRACKTYPE_RESAMPLE:
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return (AudioMixer::hook_t)
+ track__Resample<MIXTYPE_MULTI, float /*TO*/, float /*TI*/, int32_t /*TA*/>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return (AudioMixer::hook_t)\
+ track__Resample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+ break;
+ case TRACKTYPE_NORESAMPLEMONO:
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return (AudioMixer::hook_t)
+ track__NoResample<MIXTYPE_MONOEXPAND, float, float, int32_t>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return (AudioMixer::hook_t)
+ track__NoResample<MIXTYPE_MONOEXPAND, int32_t, int16_t, int32_t>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+ break;
+ case TRACKTYPE_NORESAMPLE:
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return (AudioMixer::hook_t)
+ track__NoResample<MIXTYPE_MULTI, float, float, int32_t>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return (AudioMixer::hook_t)
+ track__NoResample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
+ break;
+ }
+ return NULL;
+}
+
+/* Returns the proper process hook for mixing tracks. Currently works only for
+ * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
+ *
+ * TODO: Due to the special mixing considerations of duplicating to
+ * a stereo output track, the input track cannot be MONO. This should be
+ * prevented by the caller.
+ */
+AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount,
+ audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
+{
+ if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
+ LOG_ALWAYS_FATAL("bad processType: %d", processType);
+ return NULL;
+ }
+ if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+ return process__OneTrack16BitsStereoNoResampling;
+ }
+ LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
+ switch (mixerInFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ switch (mixerOutFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
+ float /*TO*/, float /*TI*/, int32_t /*TA*/>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
+ int16_t, float, int32_t>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+ break;
+ }
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ switch (mixerOutFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
+ float, int16_t, int32_t>;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
+ int16_t, int16_t, int32_t>;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+ break;
+ }
+ break;
+ default:
+ LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+ break;
+ }
+ return NULL;
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/services/audioflinger/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
similarity index 100%
rename from services/audioflinger/AudioMixerOps.h
rename to media/libaudioprocessing/AudioMixerOps.h
diff --git a/media/libaudioprocessing/AudioResampler.cpp b/media/libaudioprocessing/AudioResampler.cpp
new file mode 100644
index 0000000..c761b38
--- /dev/null
+++ b/media/libaudioprocessing/AudioResampler.cpp
@@ -0,0 +1,787 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioResampler"
+//#define LOG_NDEBUG 0
+
+#include <pthread.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include <cutils/properties.h>
+#include <log/log.h>
+
+#include <audio_utils/primitives.h>
+#include <media/AudioResampler.h>
+#include "AudioResamplerSinc.h"
+#include "AudioResamplerCubic.h"
+#include "AudioResamplerDyn.h"
+
+#ifdef __arm__
+ // bug 13102576
+ //#define ASM_ARM_RESAMP1 // enable asm optimisation for ResamplerOrder1
+#endif
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class AudioResamplerOrder1 : public AudioResampler {
+public:
+ AudioResamplerOrder1(int inChannelCount, int32_t sampleRate) :
+ AudioResampler(inChannelCount, sampleRate, LOW_QUALITY), mX0L(0), mX0R(0) {
+ }
+ virtual size_t resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+private:
+ // number of bits used in interpolation multiply - 15 bits avoids overflow
+ static const int kNumInterpBits = 15;
+
+ // bits to shift the phase fraction down to avoid overflow
+ static const int kPreInterpShift = kNumPhaseBits - kNumInterpBits;
+
+ void init() {}
+ size_t resampleMono16(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+ size_t resampleStereo16(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
+ void AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
+ size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
+ uint32_t &phaseFraction, uint32_t phaseIncrement);
+ void AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
+ size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
+ uint32_t &phaseFraction, uint32_t phaseIncrement);
+#endif // ASM_ARM_RESAMP1
+
+ static inline int32_t Interp(int32_t x0, int32_t x1, uint32_t f) {
+ return x0 + (((x1 - x0) * (int32_t)(f >> kPreInterpShift)) >> kNumInterpBits);
+ }
+ static inline void Advance(size_t* index, uint32_t* frac, uint32_t inc) {
+ *frac += inc;
+ *index += (size_t)(*frac >> kNumPhaseBits);
+ *frac &= kPhaseMask;
+ }
+ int mX0L;
+ int mX0R;
+};
+
+/*static*/
+const double AudioResampler::kPhaseMultiplier = 1L << AudioResampler::kNumPhaseBits;
+
+bool AudioResampler::qualityIsSupported(src_quality quality)
+{
+ switch (quality) {
+ case DEFAULT_QUALITY:
+ case LOW_QUALITY:
+ case MED_QUALITY:
+ case HIGH_QUALITY:
+ case VERY_HIGH_QUALITY:
+ case DYN_LOW_QUALITY:
+ case DYN_MED_QUALITY:
+ case DYN_HIGH_QUALITY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+static AudioResampler::src_quality defaultQuality = AudioResampler::DEFAULT_QUALITY;
+
+void AudioResampler::init_routine()
+{
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("af.resampler.quality", value, NULL) > 0) {
+ char *endptr;
+ unsigned long l = strtoul(value, &endptr, 0);
+ if (*endptr == '\0') {
+ defaultQuality = (src_quality) l;
+ ALOGD("forcing AudioResampler quality to %d", defaultQuality);
+ if (defaultQuality < DEFAULT_QUALITY || defaultQuality > DYN_HIGH_QUALITY) {
+ defaultQuality = DEFAULT_QUALITY;
+ }
+ }
+ }
+}
+
+uint32_t AudioResampler::qualityMHz(src_quality quality)
+{
+ switch (quality) {
+ default:
+ case DEFAULT_QUALITY:
+ case LOW_QUALITY:
+ return 3;
+ case MED_QUALITY:
+ return 6;
+ case HIGH_QUALITY:
+ return 20;
+ case VERY_HIGH_QUALITY:
+ return 34;
+ case DYN_LOW_QUALITY:
+ return 4;
+ case DYN_MED_QUALITY:
+ return 6;
+ case DYN_HIGH_QUALITY:
+ return 12;
+ }
+}
+
+static const uint32_t maxMHz = 130; // an arbitrary number that permits 3 VHQ, should be tunable
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static uint32_t currentMHz = 0;
+
+AudioResampler* AudioResampler::create(audio_format_t format, int inChannelCount,
+ int32_t sampleRate, src_quality quality) {
+
+ bool atFinalQuality;
+ if (quality == DEFAULT_QUALITY) {
+ // read the resampler default quality property the first time it is needed
+ int ok = pthread_once(&once_control, init_routine);
+ if (ok != 0) {
+ ALOGE("%s pthread_once failed: %d", __func__, ok);
+ }
+ quality = defaultQuality;
+ atFinalQuality = false;
+ } else {
+ atFinalQuality = true;
+ }
+
+ /* if the caller requests DEFAULT_QUALITY and af.resampler.property
+ * has not been set, the target resampler quality is set to DYN_MED_QUALITY,
+ * and allowed to "throttle" down to DYN_LOW_QUALITY if necessary
+ * due to estimated CPU load of having too many active resamplers
+ * (the code below the if).
+ */
+ if (quality == DEFAULT_QUALITY) {
+ quality = DYN_MED_QUALITY;
+ }
+
+ // naive implementation of CPU load throttling doesn't account for whether resampler is active
+ pthread_mutex_lock(&mutex);
+ for (;;) {
+ uint32_t deltaMHz = qualityMHz(quality);
+ uint32_t newMHz = currentMHz + deltaMHz;
+ if ((qualityIsSupported(quality) && newMHz <= maxMHz) || atFinalQuality) {
+ ALOGV("resampler load %u -> %u MHz due to delta +%u MHz from quality %d",
+ currentMHz, newMHz, deltaMHz, quality);
+ currentMHz = newMHz;
+ break;
+ }
+ // not enough CPU available for proposed quality level, so try next lowest level
+ switch (quality) {
+ default:
+ case LOW_QUALITY:
+ atFinalQuality = true;
+ break;
+ case MED_QUALITY:
+ quality = LOW_QUALITY;
+ break;
+ case HIGH_QUALITY:
+ quality = MED_QUALITY;
+ break;
+ case VERY_HIGH_QUALITY:
+ quality = HIGH_QUALITY;
+ break;
+ case DYN_LOW_QUALITY:
+ atFinalQuality = true;
+ break;
+ case DYN_MED_QUALITY:
+ quality = DYN_LOW_QUALITY;
+ break;
+ case DYN_HIGH_QUALITY:
+ quality = DYN_MED_QUALITY;
+ break;
+ }
+ }
+ pthread_mutex_unlock(&mutex);
+
+ AudioResampler* resampler;
+
+ switch (quality) {
+ default:
+ case LOW_QUALITY:
+ ALOGV("Create linear Resampler");
+ LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
+ resampler = new AudioResamplerOrder1(inChannelCount, sampleRate);
+ break;
+ case MED_QUALITY:
+ ALOGV("Create cubic Resampler");
+ LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
+ resampler = new AudioResamplerCubic(inChannelCount, sampleRate);
+ break;
+ case HIGH_QUALITY:
+ ALOGV("Create HIGH_QUALITY sinc Resampler");
+ LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
+ resampler = new AudioResamplerSinc(inChannelCount, sampleRate);
+ break;
+ case VERY_HIGH_QUALITY:
+ ALOGV("Create VERY_HIGH_QUALITY sinc Resampler = %d", quality);
+ LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
+ resampler = new AudioResamplerSinc(inChannelCount, sampleRate, quality);
+ break;
+ case DYN_LOW_QUALITY:
+ case DYN_MED_QUALITY:
+ case DYN_HIGH_QUALITY:
+ ALOGV("Create dynamic Resampler = %d", quality);
+ if (format == AUDIO_FORMAT_PCM_FLOAT) {
+ resampler = new AudioResamplerDyn<float, float, float>(inChannelCount,
+ sampleRate, quality);
+ } else {
+ LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
+ if (quality == DYN_HIGH_QUALITY) {
+ resampler = new AudioResamplerDyn<int32_t, int16_t, int32_t>(inChannelCount,
+ sampleRate, quality);
+ } else {
+ resampler = new AudioResamplerDyn<int16_t, int16_t, int32_t>(inChannelCount,
+ sampleRate, quality);
+ }
+ }
+ break;
+ }
+
+ // initialize resampler
+ resampler->init();
+ return resampler;
+}
+
+AudioResampler::AudioResampler(int inChannelCount,
+ int32_t sampleRate, src_quality quality) :
+ mChannelCount(inChannelCount),
+ mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0),
+ mPhaseFraction(0),
+ mQuality(quality) {
+
+ const int maxChannels = quality < DYN_LOW_QUALITY ? 2 : 8;
+ if (inChannelCount < 1
+ || inChannelCount > maxChannels) {
+ LOG_ALWAYS_FATAL("Unsupported sample format %d quality %d channels",
+ quality, inChannelCount);
+ }
+ if (sampleRate <= 0) {
+ LOG_ALWAYS_FATAL("Unsupported sample rate %d Hz", sampleRate);
+ }
+
+ // initialize common members
+ mVolume[0] = mVolume[1] = 0;
+ mBuffer.frameCount = 0;
+}
+
+AudioResampler::~AudioResampler() {
+ pthread_mutex_lock(&mutex);
+ src_quality quality = getQuality();
+ uint32_t deltaMHz = qualityMHz(quality);
+ int32_t newMHz = currentMHz - deltaMHz;
+ ALOGV("resampler load %u -> %d MHz due to delta -%u MHz from quality %d",
+ currentMHz, newMHz, deltaMHz, quality);
+ LOG_ALWAYS_FATAL_IF(newMHz < 0, "negative resampler load %d MHz", newMHz);
+ currentMHz = newMHz;
+ pthread_mutex_unlock(&mutex);
+}
+
+void AudioResampler::setSampleRate(int32_t inSampleRate) {
+ mInSampleRate = inSampleRate;
+ mPhaseIncrement = (uint32_t)((kPhaseMultiplier * inSampleRate) / mSampleRate);
+}
+
+void AudioResampler::setVolume(float left, float right) {
+ // TODO: Implement anti-zipper filter
+ // convert to U4.12 for internal integer use (round down)
+ // integer volume values are clamped to 0 to UNITY_GAIN.
+ mVolume[0] = u4_12_from_float(clampFloatVol(left));
+ mVolume[1] = u4_12_from_float(clampFloatVol(right));
+}
+
+void AudioResampler::reset() {
+ mInputIndex = 0;
+ mPhaseFraction = 0;
+ mBuffer.frameCount = 0;
+}
+
+// ----------------------------------------------------------------------------
+
+size_t AudioResamplerOrder1::resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider) {
+
+ // should never happen, but we overflow if it does
+ // ALOG_ASSERT(outFrameCount < 32767);
+
+ // select the appropriate resampler
+ switch (mChannelCount) {
+ case 1:
+ return resampleMono16(out, outFrameCount, provider);
+ case 2:
+ return resampleStereo16(out, outFrameCount, provider);
+ default:
+ LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
+ return 0;
+ }
+}
+
+size_t AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider) {
+
+ int32_t vl = mVolume[0];
+ int32_t vr = mVolume[1];
+
+ size_t inputIndex = mInputIndex;
+ uint32_t phaseFraction = mPhaseFraction;
+ uint32_t phaseIncrement = mPhaseIncrement;
+ size_t outputIndex = 0;
+ size_t outputSampleCount = outFrameCount * 2;
+ size_t inFrameCount = getInFrameCountRequired(outFrameCount);
+
+ // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
+ // outFrameCount, inputIndex, phaseFraction, phaseIncrement);
+
+ while (outputIndex < outputSampleCount) {
+
+ // buffer is empty, fetch a new one
+ while (mBuffer.frameCount == 0) {
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ if (mBuffer.raw == NULL) {
+ goto resampleStereo16_exit;
+ }
+
+ // ALOGE("New buffer fetched: %d frames", mBuffer.frameCount);
+ if (mBuffer.frameCount > inputIndex) break;
+
+ inputIndex -= mBuffer.frameCount;
+ mX0L = mBuffer.i16[mBuffer.frameCount*2-2];
+ mX0R = mBuffer.i16[mBuffer.frameCount*2-1];
+ provider->releaseBuffer(&mBuffer);
+ // mBuffer.frameCount == 0 now so we reload a new buffer
+ }
+
+ int16_t *in = mBuffer.i16;
+
+ // handle boundary case
+ while (inputIndex == 0) {
+ // ALOGE("boundary case");
+ out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction);
+ out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction);
+ Advance(&inputIndex, &phaseFraction, phaseIncrement);
+ if (outputIndex == outputSampleCount) {
+ break;
+ }
+ }
+
+ // process input samples
+ // ALOGE("general case");
+
+#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
+ if (inputIndex + 2 < mBuffer.frameCount) {
+ int32_t* maxOutPt;
+ int32_t maxInIdx;
+
+ maxOutPt = out + (outputSampleCount - 2); // 2 because 2 frames per loop
+ maxInIdx = mBuffer.frameCount - 2;
+ AsmStereo16Loop(in, maxOutPt, maxInIdx, outputIndex, out, inputIndex, vl, vr,
+ phaseFraction, phaseIncrement);
+ }
+#endif // ASM_ARM_RESAMP1
+
+ while (outputIndex < outputSampleCount && inputIndex < mBuffer.frameCount) {
+ out[outputIndex++] += vl * Interp(in[inputIndex*2-2],
+ in[inputIndex*2], phaseFraction);
+ out[outputIndex++] += vr * Interp(in[inputIndex*2-1],
+ in[inputIndex*2+1], phaseFraction);
+ Advance(&inputIndex, &phaseFraction, phaseIncrement);
+ }
+
+ // ALOGE("loop done - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
+
+ // if done with buffer, save samples
+ if (inputIndex >= mBuffer.frameCount) {
+ inputIndex -= mBuffer.frameCount;
+
+ // ALOGE("buffer done, new input index %d", inputIndex);
+
+ mX0L = mBuffer.i16[mBuffer.frameCount*2-2];
+ mX0R = mBuffer.i16[mBuffer.frameCount*2-1];
+ provider->releaseBuffer(&mBuffer);
+
+ // verify that the releaseBuffer resets the buffer frameCount
+ // ALOG_ASSERT(mBuffer.frameCount == 0);
+ }
+ }
+
+ // ALOGE("output buffer full - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
+
+resampleStereo16_exit:
+ // save state
+ mInputIndex = inputIndex;
+ mPhaseFraction = phaseFraction;
+ return outputIndex / 2 /* channels for stereo */;
+}
+
+size_t AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider) {
+
+ int32_t vl = mVolume[0];
+ int32_t vr = mVolume[1];
+
+ size_t inputIndex = mInputIndex;
+ uint32_t phaseFraction = mPhaseFraction;
+ uint32_t phaseIncrement = mPhaseIncrement;
+ size_t outputIndex = 0;
+ size_t outputSampleCount = outFrameCount * 2;
+ size_t inFrameCount = getInFrameCountRequired(outFrameCount);
+
+ // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
+ // outFrameCount, inputIndex, phaseFraction, phaseIncrement);
+ while (outputIndex < outputSampleCount) {
+ // buffer is empty, fetch a new one
+ while (mBuffer.frameCount == 0) {
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ if (mBuffer.raw == NULL) {
+ mInputIndex = inputIndex;
+ mPhaseFraction = phaseFraction;
+ goto resampleMono16_exit;
+ }
+ // ALOGE("New buffer fetched: %d frames", mBuffer.frameCount);
+ if (mBuffer.frameCount > inputIndex) break;
+
+ inputIndex -= mBuffer.frameCount;
+ mX0L = mBuffer.i16[mBuffer.frameCount-1];
+ provider->releaseBuffer(&mBuffer);
+ // mBuffer.frameCount == 0 now so we reload a new buffer
+ }
+ int16_t *in = mBuffer.i16;
+
+ // handle boundary case
+ while (inputIndex == 0) {
+ // ALOGE("boundary case");
+ int32_t sample = Interp(mX0L, in[0], phaseFraction);
+ out[outputIndex++] += vl * sample;
+ out[outputIndex++] += vr * sample;
+ Advance(&inputIndex, &phaseFraction, phaseIncrement);
+ if (outputIndex == outputSampleCount) {
+ break;
+ }
+ }
+
+ // process input samples
+ // ALOGE("general case");
+
+#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
+ if (inputIndex + 2 < mBuffer.frameCount) {
+ int32_t* maxOutPt;
+ int32_t maxInIdx;
+
+ maxOutPt = out + (outputSampleCount - 2);
+ maxInIdx = (int32_t)mBuffer.frameCount - 2;
+ AsmMono16Loop(in, maxOutPt, maxInIdx, outputIndex, out, inputIndex, vl, vr,
+ phaseFraction, phaseIncrement);
+ }
+#endif // ASM_ARM_RESAMP1
+
+ while (outputIndex < outputSampleCount && inputIndex < mBuffer.frameCount) {
+ int32_t sample = Interp(in[inputIndex-1], in[inputIndex],
+ phaseFraction);
+ out[outputIndex++] += vl * sample;
+ out[outputIndex++] += vr * sample;
+ Advance(&inputIndex, &phaseFraction, phaseIncrement);
+ }
+
+
+ // ALOGE("loop done - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
+
+ // if done with buffer, save samples
+ if (inputIndex >= mBuffer.frameCount) {
+ inputIndex -= mBuffer.frameCount;
+
+ // ALOGE("buffer done, new input index %d", inputIndex);
+
+ mX0L = mBuffer.i16[mBuffer.frameCount-1];
+ provider->releaseBuffer(&mBuffer);
+
+ // verify that the releaseBuffer resets the buffer frameCount
+ // ALOG_ASSERT(mBuffer.frameCount == 0);
+ }
+ }
+
+ // ALOGE("output buffer full - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
+
+resampleMono16_exit:
+ // save state
+ mInputIndex = inputIndex;
+ mPhaseFraction = phaseFraction;
+ return outputIndex;
+}
+
+#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
+
+/*******************************************************************
+*
+* AsmMono16Loop
+* asm optimized monotonic loop version; one loop is 2 frames
+* Input:
+* in : pointer on input samples
+* maxOutPt : pointer on first not filled
+* maxInIdx : index on first not used
+* outputIndex : pointer on current output index
+* out : pointer on output buffer
+* inputIndex : pointer on current input index
+* vl, vr : left and right gain
+* phaseFraction : pointer on current phase fraction
+* phaseIncrement
+* Ouput:
+* outputIndex :
+* out : updated buffer
+* inputIndex : index of next to use
+* phaseFraction : phase fraction for next interpolation
+*
+*******************************************************************/
+__attribute__((noinline))
+void AudioResamplerOrder1::AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
+ size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
+ uint32_t &phaseFraction, uint32_t phaseIncrement)
+{
+ (void)maxOutPt; // remove unused parameter warnings
+ (void)maxInIdx;
+ (void)outputIndex;
+ (void)out;
+ (void)inputIndex;
+ (void)vl;
+ (void)vr;
+ (void)phaseFraction;
+ (void)phaseIncrement;
+ (void)in;
+#define MO_PARAM5 "36" // offset of parameter 5 (outputIndex)
+
+ asm(
+ "stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n"
+ // get parameters
+ " ldr r6, [sp, #" MO_PARAM5 " + 20]\n" // &phaseFraction
+ " ldr r6, [r6]\n" // phaseFraction
+ " ldr r7, [sp, #" MO_PARAM5 " + 8]\n" // &inputIndex
+ " ldr r7, [r7]\n" // inputIndex
+ " ldr r8, [sp, #" MO_PARAM5 " + 4]\n" // out
+ " ldr r0, [sp, #" MO_PARAM5 " + 0]\n" // &outputIndex
+ " ldr r0, [r0]\n" // outputIndex
+ " add r8, r8, r0, asl #2\n" // curOut
+ " ldr r9, [sp, #" MO_PARAM5 " + 24]\n" // phaseIncrement
+ " ldr r10, [sp, #" MO_PARAM5 " + 12]\n" // vl
+ " ldr r11, [sp, #" MO_PARAM5 " + 16]\n" // vr
+
+ // r0 pin, x0, Samp
+
+ // r1 in
+ // r2 maxOutPt
+ // r3 maxInIdx
+
+ // r4 x1, i1, i3, Out1
+ // r5 out0
+
+ // r6 frac
+ // r7 inputIndex
+ // r8 curOut
+
+ // r9 inc
+ // r10 vl
+ // r11 vr
+
+ // r12
+ // r13 sp
+ // r14
+
+ // the following loop works on 2 frames
+
+ "1:\n"
+ " cmp r8, r2\n" // curOut - maxCurOut
+ " bcs 2f\n"
+
+#define MO_ONE_FRAME \
+ " add r0, r1, r7, asl #1\n" /* in + inputIndex */\
+ " ldrsh r4, [r0]\n" /* in[inputIndex] */\
+ " ldr r5, [r8]\n" /* out[outputIndex] */\
+ " ldrsh r0, [r0, #-2]\n" /* in[inputIndex-1] */\
+ " bic r6, r6, #0xC0000000\n" /* phaseFraction & ... */\
+ " sub r4, r4, r0\n" /* in[inputIndex] - in[inputIndex-1] */\
+ " mov r4, r4, lsl #2\n" /* <<2 */\
+ " smulwt r4, r4, r6\n" /* (x1-x0)*.. */\
+ " add r6, r6, r9\n" /* phaseFraction + phaseIncrement */\
+ " add r0, r0, r4\n" /* x0 - (..) */\
+ " mla r5, r0, r10, r5\n" /* vl*interp + out[] */\
+ " ldr r4, [r8, #4]\n" /* out[outputIndex+1] */\
+ " str r5, [r8], #4\n" /* out[outputIndex++] = ... */\
+ " mla r4, r0, r11, r4\n" /* vr*interp + out[] */\
+ " add r7, r7, r6, lsr #30\n" /* inputIndex + phaseFraction>>30 */\
+ " str r4, [r8], #4\n" /* out[outputIndex++] = ... */
+
+ MO_ONE_FRAME // frame 1
+ MO_ONE_FRAME // frame 2
+
+ " cmp r7, r3\n" // inputIndex - maxInIdx
+ " bcc 1b\n"
+ "2:\n"
+
+ " bic r6, r6, #0xC0000000\n" // phaseFraction & ...
+ // save modified values
+ " ldr r0, [sp, #" MO_PARAM5 " + 20]\n" // &phaseFraction
+ " str r6, [r0]\n" // phaseFraction
+ " ldr r0, [sp, #" MO_PARAM5 " + 8]\n" // &inputIndex
+ " str r7, [r0]\n" // inputIndex
+ " ldr r0, [sp, #" MO_PARAM5 " + 4]\n" // out
+ " sub r8, r0\n" // curOut - out
+ " asr r8, #2\n" // new outputIndex
+ " ldr r0, [sp, #" MO_PARAM5 " + 0]\n" // &outputIndex
+ " str r8, [r0]\n" // save outputIndex
+
+ " ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}\n"
+ );
+}
+
+/*******************************************************************
+*
+* AsmStereo16Loop
+* asm optimized stereo loop version; one loop is 2 frames
+* Input:
+* in : pointer on input samples
+* maxOutPt : pointer on first not filled
+* maxInIdx : index on first not used
+* outputIndex : pointer on current output index
+* out : pointer on output buffer
+* inputIndex : pointer on current input index
+* vl, vr : left and right gain
+* phaseFraction : pointer on current phase fraction
+* phaseIncrement
+* Ouput:
+* outputIndex :
+* out : updated buffer
+* inputIndex : index of next to use
+* phaseFraction : phase fraction for next interpolation
+*
+*******************************************************************/
+__attribute__((noinline))
+void AudioResamplerOrder1::AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
+ size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
+ uint32_t &phaseFraction, uint32_t phaseIncrement)
+{
+ (void)maxOutPt; // remove unused parameter warnings
+ (void)maxInIdx;
+ (void)outputIndex;
+ (void)out;
+ (void)inputIndex;
+ (void)vl;
+ (void)vr;
+ (void)phaseFraction;
+ (void)phaseIncrement;
+ (void)in;
+#define ST_PARAM5 "40" // offset of parameter 5 (outputIndex)
+ asm(
+ "stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}\n"
+ // get parameters
+ " ldr r6, [sp, #" ST_PARAM5 " + 20]\n" // &phaseFraction
+ " ldr r6, [r6]\n" // phaseFraction
+ " ldr r7, [sp, #" ST_PARAM5 " + 8]\n" // &inputIndex
+ " ldr r7, [r7]\n" // inputIndex
+ " ldr r8, [sp, #" ST_PARAM5 " + 4]\n" // out
+ " ldr r0, [sp, #" ST_PARAM5 " + 0]\n" // &outputIndex
+ " ldr r0, [r0]\n" // outputIndex
+ " add r8, r8, r0, asl #2\n" // curOut
+ " ldr r9, [sp, #" ST_PARAM5 " + 24]\n" // phaseIncrement
+ " ldr r10, [sp, #" ST_PARAM5 " + 12]\n" // vl
+ " ldr r11, [sp, #" ST_PARAM5 " + 16]\n" // vr
+
+ // r0 pin, x0, Samp
+
+ // r1 in
+ // r2 maxOutPt
+ // r3 maxInIdx
+
+ // r4 x1, i1, i3, out1
+ // r5 out0
+
+ // r6 frac
+ // r7 inputIndex
+ // r8 curOut
+
+ // r9 inc
+ // r10 vl
+ // r11 vr
+
+ // r12 temporary
+ // r13 sp
+ // r14
+
+ "3:\n"
+ " cmp r8, r2\n" // curOut - maxCurOut
+ " bcs 4f\n"
+
+#define ST_ONE_FRAME \
+ " bic r6, r6, #0xC0000000\n" /* phaseFraction & ... */\
+\
+ " add r0, r1, r7, asl #2\n" /* in + 2*inputIndex */\
+\
+ " ldrsh r4, [r0]\n" /* in[2*inputIndex] */\
+ " ldr r5, [r8]\n" /* out[outputIndex] */\
+ " ldrsh r12, [r0, #-4]\n" /* in[2*inputIndex-2] */\
+ " sub r4, r4, r12\n" /* in[2*InputIndex] - in[2*InputIndex-2] */\
+ " mov r4, r4, lsl #2\n" /* <<2 */\
+ " smulwt r4, r4, r6\n" /* (x1-x0)*.. */\
+ " add r12, r12, r4\n" /* x0 - (..) */\
+ " mla r5, r12, r10, r5\n" /* vl*interp + out[] */\
+ " ldr r4, [r8, #4]\n" /* out[outputIndex+1] */\
+ " str r5, [r8], #4\n" /* out[outputIndex++] = ... */\
+\
+ " ldrsh r12, [r0, #+2]\n" /* in[2*inputIndex+1] */\
+ " ldrsh r0, [r0, #-2]\n" /* in[2*inputIndex-1] */\
+ " sub r12, r12, r0\n" /* in[2*InputIndex] - in[2*InputIndex-2] */\
+ " mov r12, r12, lsl #2\n" /* <<2 */\
+ " smulwt r12, r12, r6\n" /* (x1-x0)*.. */\
+ " add r12, r0, r12\n" /* x0 - (..) */\
+ " mla r4, r12, r11, r4\n" /* vr*interp + out[] */\
+ " str r4, [r8], #4\n" /* out[outputIndex++] = ... */\
+\
+ " add r6, r6, r9\n" /* phaseFraction + phaseIncrement */\
+ " add r7, r7, r6, lsr #30\n" /* inputIndex + phaseFraction>>30 */
+
+ ST_ONE_FRAME // frame 1
+ ST_ONE_FRAME // frame 1
+
+ " cmp r7, r3\n" // inputIndex - maxInIdx
+ " bcc 3b\n"
+ "4:\n"
+
+ " bic r6, r6, #0xC0000000\n" // phaseFraction & ...
+ // save modified values
+ " ldr r0, [sp, #" ST_PARAM5 " + 20]\n" // &phaseFraction
+ " str r6, [r0]\n" // phaseFraction
+ " ldr r0, [sp, #" ST_PARAM5 " + 8]\n" // &inputIndex
+ " str r7, [r0]\n" // inputIndex
+ " ldr r0, [sp, #" ST_PARAM5 " + 4]\n" // out
+ " sub r8, r0\n" // curOut - out
+ " asr r8, #2\n" // new outputIndex
+ " ldr r0, [sp, #" ST_PARAM5 " + 0]\n" // &outputIndex
+ " str r8, [r0]\n" // save outputIndex
+
+ " ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, pc}\n"
+ );
+}
+
+#endif // ASM_ARM_RESAMP1
+
+
+// ----------------------------------------------------------------------------
+
+} // namespace android
diff --git a/media/libaudioprocessing/AudioResamplerCubic.cpp b/media/libaudioprocessing/AudioResamplerCubic.cpp
new file mode 100644
index 0000000..9bcd8e1
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerCubic.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioResamplerCubic"
+
+#include <stdint.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include <log/log.h>
+
+#include "AudioResamplerCubic.h"
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+void AudioResamplerCubic::init() {
+ memset(&left, 0, sizeof(state));
+ memset(&right, 0, sizeof(state));
+}
+
+size_t AudioResamplerCubic::resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider) {
+
+ // should never happen, but we overflow if it does
+ // ALOG_ASSERT(outFrameCount < 32767);
+
+ // select the appropriate resampler
+ switch (mChannelCount) {
+ case 1:
+ return resampleMono16(out, outFrameCount, provider);
+ case 2:
+ return resampleStereo16(out, outFrameCount, provider);
+ default:
+ LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
+ return 0;
+ }
+}
+
+size_t AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider) {
+
+ int32_t vl = mVolume[0];
+ int32_t vr = mVolume[1];
+
+ size_t inputIndex = mInputIndex;
+ uint32_t phaseFraction = mPhaseFraction;
+ uint32_t phaseIncrement = mPhaseIncrement;
+ size_t outputIndex = 0;
+ size_t outputSampleCount = outFrameCount * 2;
+ size_t inFrameCount = getInFrameCountRequired(outFrameCount);
+
+ // fetch first buffer
+ if (mBuffer.frameCount == 0) {
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ if (mBuffer.raw == NULL) {
+ return 0;
+ }
+ // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
+ }
+ int16_t *in = mBuffer.i16;
+
+ while (outputIndex < outputSampleCount) {
+ int32_t x;
+
+ // calculate output sample
+ x = phaseFraction >> kPreInterpShift;
+ out[outputIndex++] += vl * interp(&left, x);
+ out[outputIndex++] += vr * interp(&right, x);
+ // out[outputIndex++] += vr * in[inputIndex*2];
+
+ // increment phase
+ phaseFraction += phaseIncrement;
+ uint32_t indexIncrement = (phaseFraction >> kNumPhaseBits);
+ phaseFraction &= kPhaseMask;
+
+ // time to fetch another sample
+ while (indexIncrement--) {
+
+ inputIndex++;
+ if (inputIndex == mBuffer.frameCount) {
+ inputIndex = 0;
+ provider->releaseBuffer(&mBuffer);
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ if (mBuffer.raw == NULL) {
+ goto save_state; // ugly, but efficient
+ }
+ in = mBuffer.i16;
+ // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
+ }
+
+ // advance sample state
+ advance(&left, in[inputIndex*2]);
+ advance(&right, in[inputIndex*2+1]);
+ }
+ }
+
+save_state:
+ // ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction);
+ mInputIndex = inputIndex;
+ mPhaseFraction = phaseFraction;
+ return outputIndex / 2 /* channels for stereo */;
+}
+
+size_t AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider) {
+
+ int32_t vl = mVolume[0];
+ int32_t vr = mVolume[1];
+
+ size_t inputIndex = mInputIndex;
+ uint32_t phaseFraction = mPhaseFraction;
+ uint32_t phaseIncrement = mPhaseIncrement;
+ size_t outputIndex = 0;
+ size_t outputSampleCount = outFrameCount * 2;
+ size_t inFrameCount = getInFrameCountRequired(outFrameCount);
+
+ // fetch first buffer
+ if (mBuffer.frameCount == 0) {
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ if (mBuffer.raw == NULL) {
+ return 0;
+ }
+ // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
+ }
+ int16_t *in = mBuffer.i16;
+
+ while (outputIndex < outputSampleCount) {
+ int32_t sample;
+ int32_t x;
+
+ // calculate output sample
+ x = phaseFraction >> kPreInterpShift;
+ sample = interp(&left, x);
+ out[outputIndex++] += vl * sample;
+ out[outputIndex++] += vr * sample;
+
+ // increment phase
+ phaseFraction += phaseIncrement;
+ uint32_t indexIncrement = (phaseFraction >> kNumPhaseBits);
+ phaseFraction &= kPhaseMask;
+
+ // time to fetch another sample
+ while (indexIncrement--) {
+
+ inputIndex++;
+ if (inputIndex == mBuffer.frameCount) {
+ inputIndex = 0;
+ provider->releaseBuffer(&mBuffer);
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ if (mBuffer.raw == NULL) {
+ goto save_state; // ugly, but efficient
+ }
+ // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
+ in = mBuffer.i16;
+ }
+
+ // advance sample state
+ advance(&left, in[inputIndex]);
+ }
+ }
+
+save_state:
+ // ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction);
+ mInputIndex = inputIndex;
+ mPhaseFraction = phaseFraction;
+ return outputIndex;
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/media/libaudioprocessing/AudioResamplerCubic.h b/media/libaudioprocessing/AudioResamplerCubic.h
new file mode 100644
index 0000000..defaf33
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerCubic.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_CUBIC_H
+#define ANDROID_AUDIO_RESAMPLER_CUBIC_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <android/log.h>
+
+#include <media/AudioResampler.h>
+
+namespace android {
+// ----------------------------------------------------------------------------
+
+class AudioResamplerCubic : public AudioResampler {
+public:
+ AudioResamplerCubic(int inChannelCount, int32_t sampleRate) :
+ AudioResampler(inChannelCount, sampleRate, MED_QUALITY) {
+ }
+ virtual size_t resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+private:
+ // number of bits used in interpolation multiply - 14 bits avoids overflow
+ static const int kNumInterpBits = 14;
+
+ // bits to shift the phase fraction down to avoid overflow
+ static const int kPreInterpShift = kNumPhaseBits - kNumInterpBits;
+ typedef struct {
+ int32_t a, b, c, y0, y1, y2, y3;
+ } state;
+ void init();
+ size_t resampleMono16(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+ size_t resampleStereo16(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+ static inline int32_t interp(state* p, int32_t x) {
+ return (((((p->a * x >> 14) + p->b) * x >> 14) + p->c) * x >> 14) + p->y1;
+ }
+ static inline void advance(state* p, int16_t in) {
+ p->y0 = p->y1;
+ p->y1 = p->y2;
+ p->y2 = p->y3;
+ p->y3 = in;
+ p->a = (3 * (p->y1 - p->y2) - p->y0 + p->y3) >> 1;
+ p->b = (p->y2 << 1) + p->y0 - (((5 * p->y1 + p->y3)) >> 1);
+ p->c = (p->y2 - p->y0) >> 1;
+ }
+ state left, right;
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_CUBIC_H*/
diff --git a/media/libaudioprocessing/AudioResamplerDyn.cpp b/media/libaudioprocessing/AudioResamplerDyn.cpp
new file mode 100644
index 0000000..8f7b982
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerDyn.cpp
@@ -0,0 +1,633 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioResamplerDyn"
+//#define LOG_NDEBUG 0
+
+#include <malloc.h>
+#include <string.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <math.h>
+
+#include <cutils/compiler.h>
+#include <cutils/properties.h>
+#include <utils/Debug.h>
+#include <utils/Log.h>
+#include <audio_utils/primitives.h>
+
+#include "AudioResamplerFirOps.h" // USE_NEON, USE_SSE and USE_INLINE_ASSEMBLY defined here
+#include "AudioResamplerFirProcess.h"
+#include "AudioResamplerFirProcessNeon.h"
+#include "AudioResamplerFirProcessSSE.h"
+#include "AudioResamplerFirGen.h" // requires math.h
+#include "AudioResamplerDyn.h"
+
+//#define DEBUG_RESAMPLER
+
+namespace android {
+
+/*
+ * InBuffer is a type agnostic input buffer.
+ *
+ * Layout of the state buffer for halfNumCoefs=8.
+ *
+ * [rrrrrrppppppppnnnnnnnnrrrrrrrrrrrrrrrrrrr.... rrrrrrr]
+ * S I R
+ *
+ * S = mState
+ * I = mImpulse
+ * R = mRingFull
+ * p = past samples, convoluted with the (p)ositive side of sinc()
+ * n = future samples, convoluted with the (n)egative side of sinc()
+ * r = extra space for implementing the ring buffer
+ */
+
+template<typename TC, typename TI, typename TO>
+AudioResamplerDyn<TC, TI, TO>::InBuffer::InBuffer()
+ : mState(NULL), mImpulse(NULL), mRingFull(NULL), mStateCount(0)
+{
+}
+
+template<typename TC, typename TI, typename TO>
+AudioResamplerDyn<TC, TI, TO>::InBuffer::~InBuffer()
+{
+ init();
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::init()
+{
+ free(mState);
+ mState = NULL;
+ mImpulse = NULL;
+ mRingFull = NULL;
+ mStateCount = 0;
+}
+
+// resizes the state buffer to accommodate the appropriate filter length
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::resize(int CHANNELS, int halfNumCoefs)
+{
+ // calculate desired state size
+ size_t stateCount = halfNumCoefs * CHANNELS * 2 * kStateSizeMultipleOfFilterLength;
+
+ // check if buffer needs resizing
+ if (mState
+ && stateCount == mStateCount
+ && mRingFull-mState == (ssize_t) (mStateCount-halfNumCoefs*CHANNELS)) {
+ return;
+ }
+
+ // create new buffer
+ TI* state = NULL;
+ (void)posix_memalign(reinterpret_cast<void**>(&state), 32, stateCount*sizeof(*state));
+ memset(state, 0, stateCount*sizeof(*state));
+
+ // attempt to preserve state
+ if (mState) {
+ TI* srcLo = mImpulse - halfNumCoefs*CHANNELS;
+ TI* srcHi = mImpulse + halfNumCoefs*CHANNELS;
+ TI* dst = state;
+
+ if (srcLo < mState) {
+ dst += mState-srcLo;
+ srcLo = mState;
+ }
+ if (srcHi > mState + mStateCount) {
+ srcHi = mState + mStateCount;
+ }
+ memcpy(dst, srcLo, (srcHi - srcLo) * sizeof(*srcLo));
+ free(mState);
+ }
+
+ // set class member vars
+ mState = state;
+ mStateCount = stateCount;
+ mImpulse = state + halfNumCoefs*CHANNELS; // actually one sample greater than needed
+ mRingFull = state + mStateCount - halfNumCoefs*CHANNELS;
+}
+
+// copy in the input data into the head (impulse+halfNumCoefs) of the buffer.
+template<typename TC, typename TI, typename TO>
+template<int CHANNELS>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAgain(TI*& impulse, const int halfNumCoefs,
+ const TI* const in, const size_t inputIndex)
+{
+ TI* head = impulse + halfNumCoefs*CHANNELS;
+ for (size_t i=0 ; i<CHANNELS ; i++) {
+ head[i] = in[inputIndex*CHANNELS + i];
+ }
+}
+
+// advance the impulse pointer, and load in data into the head (impulse+halfNumCoefs)
+template<typename TC, typename TI, typename TO>
+template<int CHANNELS>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAdvance(TI*& impulse, const int halfNumCoefs,
+ const TI* const in, const size_t inputIndex)
+{
+ impulse += CHANNELS;
+
+ if (CC_UNLIKELY(impulse >= mRingFull)) {
+ const size_t shiftDown = mRingFull - mState - halfNumCoefs*CHANNELS;
+ memcpy(mState, mState+shiftDown, halfNumCoefs*CHANNELS*2*sizeof(TI));
+ impulse -= shiftDown;
+ }
+ readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex);
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::reset()
+{
+ // clear resampler state
+ if (mState != nullptr) {
+ memset(mState, 0, mStateCount * sizeof(TI));
+ }
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::Constants::set(
+ int L, int halfNumCoefs, int inSampleRate, int outSampleRate)
+{
+ int bits = 0;
+ int lscale = inSampleRate/outSampleRate < 2 ? L - 1 :
+ static_cast<int>(static_cast<uint64_t>(L)*inSampleRate/outSampleRate);
+ for (int i=lscale; i; ++bits, i>>=1)
+ ;
+ mL = L;
+ mShift = kNumPhaseBits - bits;
+ mHalfNumCoefs = halfNumCoefs;
+}
+
+template<typename TC, typename TI, typename TO>
+AudioResamplerDyn<TC, TI, TO>::AudioResamplerDyn(
+ int inChannelCount, int32_t sampleRate, src_quality quality)
+ : AudioResampler(inChannelCount, sampleRate, quality),
+ mResampleFunc(0), mFilterSampleRate(0), mFilterQuality(DEFAULT_QUALITY),
+ mCoefBuffer(NULL)
+{
+ mVolumeSimd[0] = mVolumeSimd[1] = 0;
+ // The AudioResampler base class assumes we are always ready for 1:1 resampling.
+ // We reset mInSampleRate to 0, so setSampleRate() will calculate filters for
+ // setSampleRate() for 1:1. (May be removed if precalculated filters are used.)
+ mInSampleRate = 0;
+ mConstants.set(128, 8, mSampleRate, mSampleRate); // TODO: set better
+}
+
+template<typename TC, typename TI, typename TO>
+AudioResamplerDyn<TC, TI, TO>::~AudioResamplerDyn()
+{
+ free(mCoefBuffer);
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::init()
+{
+ mFilterSampleRate = 0; // always trigger new filter generation
+ mInBuffer.init();
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::setVolume(float left, float right)
+{
+ AudioResampler::setVolume(left, right);
+ if (is_same<TO, float>::value || is_same<TO, double>::value) {
+ mVolumeSimd[0] = static_cast<TO>(left);
+ mVolumeSimd[1] = static_cast<TO>(right);
+ } else { // integer requires scaling to U4_28 (rounding down)
+ // integer volumes are clamped to 0 to UNITY_GAIN so there
+ // are no issues with signed overflow.
+ mVolumeSimd[0] = u4_28_from_float(clampFloatVol(left));
+ mVolumeSimd[1] = u4_28_from_float(clampFloatVol(right));
+ }
+}
+
+template<typename T> T max(T a, T b) {return a > b ? a : b;}
+
+template<typename T> T absdiff(T a, T b) {return a > b ? a - b : b - a;}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::createKaiserFir(Constants &c,
+ double stopBandAtten, int inSampleRate, int outSampleRate, double tbwCheat)
+{
+ TC* buf = NULL;
+ static const double atten = 0.9998; // to avoid ripple overflow
+ double fcr;
+ double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten);
+
+ (void)posix_memalign(reinterpret_cast<void**>(&buf), 32, (c.mL+1)*c.mHalfNumCoefs*sizeof(TC));
+ if (inSampleRate < outSampleRate) { // upsample
+ fcr = max(0.5*tbwCheat - tbw/2, tbw/2);
+ } else { // downsample
+ fcr = max(0.5*tbwCheat*outSampleRate/inSampleRate - tbw/2, tbw/2);
+ }
+ // create and set filter
+ firKaiserGen(buf, c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten);
+ c.mFirCoefs = buf;
+ if (mCoefBuffer) {
+ free(mCoefBuffer);
+ }
+ mCoefBuffer = buf;
+#ifdef DEBUG_RESAMPLER
+ // print basic filter stats
+ printf("L:%d hnc:%d stopBandAtten:%lf fcr:%lf atten:%lf tbw:%lf\n",
+ c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten, tbw);
+ // test the filter and report results
+ double fp = (fcr - tbw/2)/c.mL;
+ double fs = (fcr + tbw/2)/c.mL;
+ double passMin, passMax, passRipple;
+ double stopMax, stopRipple;
+ testFir(buf, c.mL, c.mHalfNumCoefs, fp, fs, /*passSteps*/ 1000, /*stopSteps*/ 100000,
+ passMin, passMax, passRipple, stopMax, stopRipple);
+ printf("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple);
+ printf("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple);
+#endif
+}
+
+// recursive gcd. Using objdump, it appears the tail recursion is converted to a while loop.
+static int gcd(int n, int m)
+{
+ if (m == 0) {
+ return n;
+ }
+ return gcd(m, n % m);
+}
+
+static bool isClose(int32_t newSampleRate, int32_t prevSampleRate,
+ int32_t filterSampleRate, int32_t outSampleRate)
+{
+
+ // different upsampling ratios do not need a filter change.
+ if (filterSampleRate != 0
+ && filterSampleRate < outSampleRate
+ && newSampleRate < outSampleRate)
+ return true;
+
+ // check design criteria again if downsampling is detected.
+ int pdiff = absdiff(newSampleRate, prevSampleRate);
+ int adiff = absdiff(newSampleRate, filterSampleRate);
+
+ // allow up to 6% relative change increments.
+ // allow up to 12% absolute change increments (from filter design)
+ return pdiff < prevSampleRate>>4 && adiff < filterSampleRate>>3;
+}
+
+template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::setSampleRate(int32_t inSampleRate)
+{
+ if (mInSampleRate == inSampleRate) {
+ return;
+ }
+ int32_t oldSampleRate = mInSampleRate;
+ uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift;
+ bool useS32 = false;
+
+ mInSampleRate = inSampleRate;
+
+ // TODO: Add precalculated Equiripple filters
+
+ if (mFilterQuality != getQuality() ||
+ !isClose(inSampleRate, oldSampleRate, mFilterSampleRate, mSampleRate)) {
+ mFilterSampleRate = inSampleRate;
+ mFilterQuality = getQuality();
+
+ // Begin Kaiser Filter computation
+ //
+ // The quantization floor for S16 is about 96db - 10*log_10(#length) + 3dB.
+ // Keep the stop band attenuation no greater than 84-85dB for 32 length S16 filters
+ //
+ // For s32 we keep the stop band attenuation at the same as 16b resolution, about
+ // 96-98dB
+ //
+
+ double stopBandAtten;
+ double tbwCheat = 1.; // how much we "cheat" into aliasing
+ int halfLength;
+ if (mFilterQuality == DYN_HIGH_QUALITY) {
+ // 32b coefficients, 64 length
+ useS32 = true;
+ stopBandAtten = 98.;
+ if (inSampleRate >= mSampleRate * 4) {
+ halfLength = 48;
+ } else if (inSampleRate >= mSampleRate * 2) {
+ halfLength = 40;
+ } else {
+ halfLength = 32;
+ }
+ } else if (mFilterQuality == DYN_LOW_QUALITY) {
+ // 16b coefficients, 16-32 length
+ useS32 = false;
+ stopBandAtten = 80.;
+ if (inSampleRate >= mSampleRate * 4) {
+ halfLength = 24;
+ } else if (inSampleRate >= mSampleRate * 2) {
+ halfLength = 16;
+ } else {
+ halfLength = 8;
+ }
+ if (inSampleRate <= mSampleRate) {
+ tbwCheat = 1.05;
+ } else {
+ tbwCheat = 1.03;
+ }
+ } else { // DYN_MED_QUALITY
+ // 16b coefficients, 32-64 length
+ // note: > 64 length filters with 16b coefs can have quantization noise problems
+ useS32 = false;
+ stopBandAtten = 84.;
+ if (inSampleRate >= mSampleRate * 4) {
+ halfLength = 32;
+ } else if (inSampleRate >= mSampleRate * 2) {
+ halfLength = 24;
+ } else {
+ halfLength = 16;
+ }
+ if (inSampleRate <= mSampleRate) {
+ tbwCheat = 1.03;
+ } else {
+ tbwCheat = 1.01;
+ }
+ }
+
+ // determine the number of polyphases in the filterbank.
+ // for 16b, it is desirable to have 2^(16/2) = 256 phases.
+ // https://ccrma.stanford.edu/~jos/resample/Relation_Interpolation_Error_Quantization.html
+ //
+ // We are a bit more lax on this.
+
+ int phases = mSampleRate / gcd(mSampleRate, inSampleRate);
+
+ // TODO: Once dynamic sample rate change is an option, the code below
+ // should be modified to execute only when dynamic sample rate change is enabled.
+ //
+ // as above, #phases less than 63 is too few phases for accurate linear interpolation.
+ // we increase the phases to compensate, but more phases means more memory per
+ // filter and more time to compute the filter.
+ //
+ // if we know that the filter will be used for dynamic sample rate changes,
+ // that would allow us skip this part for fixed sample rate resamplers.
+ //
+ while (phases<63) {
+ phases *= 2; // this code only needed to support dynamic rate changes
+ }
+
+ if (phases>=256) { // too many phases, always interpolate
+ phases = 127;
+ }
+
+ // create the filter
+ mConstants.set(phases, halfLength, inSampleRate, mSampleRate);
+ createKaiserFir(mConstants, stopBandAtten,
+ inSampleRate, mSampleRate, tbwCheat);
+ } // End Kaiser filter
+
+ // update phase and state based on the new filter.
+ const Constants& c(mConstants);
+ mInBuffer.resize(mChannelCount, c.mHalfNumCoefs);
+ const uint32_t phaseWrapLimit = c.mL << c.mShift;
+ // try to preserve as much of the phase fraction as possible for on-the-fly changes
+ mPhaseFraction = static_cast<unsigned long long>(mPhaseFraction)
+ * phaseWrapLimit / oldPhaseWrapLimit;
+ mPhaseFraction %= phaseWrapLimit; // should not do anything, but just in case.
+ mPhaseIncrement = static_cast<uint32_t>(static_cast<uint64_t>(phaseWrapLimit)
+ * inSampleRate / mSampleRate);
+
+ // determine which resampler to use
+ // check if locked phase (works only if mPhaseIncrement has no "fractional phase bits")
+ int locked = (mPhaseIncrement << (sizeof(mPhaseIncrement)*8 - c.mShift)) == 0;
+ if (locked) {
+ mPhaseFraction = mPhaseFraction >> c.mShift << c.mShift; // remove fractional phase
+ }
+
+ // stride is the minimum number of filter coefficients processed per loop iteration.
+ // We currently only allow a stride of 16 to match with SIMD processing.
+ // This means that the filter length must be a multiple of 16,
+ // or half the filter length (mHalfNumCoefs) must be a multiple of 8.
+ //
+ // Note: A stride of 2 is achieved with non-SIMD processing.
+ int stride = ((c.mHalfNumCoefs & 7) == 0) ? 16 : 2;
+ LOG_ALWAYS_FATAL_IF(stride < 16, "Resampler stride must be 16 or more");
+ LOG_ALWAYS_FATAL_IF(mChannelCount < 1 || mChannelCount > 8,
+ "Resampler channels(%d) must be between 1 to 8", mChannelCount);
+ // stride 16 (falls back to stride 2 for machines that do not support NEON)
+ if (locked) {
+ switch (mChannelCount) {
+ case 1:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>;
+ break;
+ case 2:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>;
+ break;
+ case 3:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, true, 16>;
+ break;
+ case 4:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, true, 16>;
+ break;
+ case 5:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, true, 16>;
+ break;
+ case 6:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, true, 16>;
+ break;
+ case 7:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, true, 16>;
+ break;
+ case 8:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, true, 16>;
+ break;
+ }
+ } else {
+ switch (mChannelCount) {
+ case 1:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>;
+ break;
+ case 2:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>;
+ break;
+ case 3:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, false, 16>;
+ break;
+ case 4:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, false, 16>;
+ break;
+ case 5:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, false, 16>;
+ break;
+ case 6:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, false, 16>;
+ break;
+ case 7:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, false, 16>;
+ break;
+ case 8:
+ mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, false, 16>;
+ break;
+ }
+ }
+#ifdef DEBUG_RESAMPLER
+ printf("channels:%d %s stride:%d %s coef:%d shift:%d\n",
+ mChannelCount, locked ? "locked" : "interpolated",
+ stride, useS32 ? "S32" : "S16", 2*c.mHalfNumCoefs, c.mShift);
+#endif
+}
+
+template<typename TC, typename TI, typename TO>
+size_t AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider)
+{
+ return (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider);
+}
+
+template<typename TC, typename TI, typename TO>
+template<int CHANNELS, bool LOCKED, int STRIDE>
+size_t AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount,
+ AudioBufferProvider* provider)
+{
+ // TODO Mono -> Mono is not supported. OUTPUT_CHANNELS reflects minimum of stereo out.
+ const int OUTPUT_CHANNELS = (CHANNELS < 2) ? 2 : CHANNELS;
+ const Constants& c(mConstants);
+ const TC* const coefs = mConstants.mFirCoefs;
+ TI* impulse = mInBuffer.getImpulse();
+ size_t inputIndex = 0;
+ uint32_t phaseFraction = mPhaseFraction;
+ const uint32_t phaseIncrement = mPhaseIncrement;
+ size_t outputIndex = 0;
+ size_t outputSampleCount = outFrameCount * OUTPUT_CHANNELS;
+ const uint32_t phaseWrapLimit = c.mL << c.mShift;
+ size_t inFrameCount = (phaseIncrement * (uint64_t)outFrameCount + phaseFraction)
+ / phaseWrapLimit;
+ // sanity check that inFrameCount is in signed 32 bit integer range.
+ ALOG_ASSERT(0 <= inFrameCount && inFrameCount < (1U << 31));
+
+ //ALOGV("inFrameCount:%d outFrameCount:%d"
+ // " phaseIncrement:%u phaseFraction:%u phaseWrapLimit:%u",
+ // inFrameCount, outFrameCount, phaseIncrement, phaseFraction, phaseWrapLimit);
+
+ // NOTE: be very careful when modifying the code here. register
+ // pressure is very high and a small change might cause the compiler
+ // to generate far less efficient code.
+ // Always sanity check the result with objdump or test-resample.
+
+ // the following logic is a bit convoluted to keep the main processing loop
+ // as tight as possible with register allocation.
+ while (outputIndex < outputSampleCount) {
+ //ALOGV("LOOP: inFrameCount:%d outputIndex:%d outFrameCount:%d"
+ // " phaseFraction:%u phaseWrapLimit:%u",
+ // inFrameCount, outputIndex, outFrameCount, phaseFraction, phaseWrapLimit);
+
+ // check inputIndex overflow
+ ALOG_ASSERT(inputIndex <= mBuffer.frameCount, "inputIndex%zu > frameCount%zu",
+ inputIndex, mBuffer.frameCount);
+ // Buffer is empty, fetch a new one if necessary (inFrameCount > 0).
+ // We may not fetch a new buffer if the existing data is sufficient.
+ while (mBuffer.frameCount == 0 && inFrameCount > 0) {
+ mBuffer.frameCount = inFrameCount;
+ provider->getNextBuffer(&mBuffer);
+ if (mBuffer.raw == NULL) {
+ // We are either at the end of playback or in an underrun situation.
+ // Reset buffer to prevent pop noise at the next buffer.
+ mInBuffer.reset();
+ goto resample_exit;
+ }
+ inFrameCount -= mBuffer.frameCount;
+ if (phaseFraction >= phaseWrapLimit) { // read in data
+ mInBuffer.template readAdvance<CHANNELS>(
+ impulse, c.mHalfNumCoefs,
+ reinterpret_cast<TI*>(mBuffer.raw), inputIndex);
+ inputIndex++;
+ phaseFraction -= phaseWrapLimit;
+ while (phaseFraction >= phaseWrapLimit) {
+ if (inputIndex >= mBuffer.frameCount) {
+ inputIndex = 0;
+ provider->releaseBuffer(&mBuffer);
+ break;
+ }
+ mInBuffer.template readAdvance<CHANNELS>(
+ impulse, c.mHalfNumCoefs,
+ reinterpret_cast<TI*>(mBuffer.raw), inputIndex);
+ inputIndex++;
+ phaseFraction -= phaseWrapLimit;
+ }
+ }
+ }
+ const TI* const in = reinterpret_cast<const TI*>(mBuffer.raw);
+ const size_t frameCount = mBuffer.frameCount;
+ const int coefShift = c.mShift;
+ const int halfNumCoefs = c.mHalfNumCoefs;
+ const TO* const volumeSimd = mVolumeSimd;
+
+ // main processing loop
+ while (CC_LIKELY(outputIndex < outputSampleCount)) {
+ // caution: fir() is inlined and may be large.
+ // output will be loaded with the appropriate values
+ //
+ // from the input samples in impulse[-halfNumCoefs+1]... impulse[halfNumCoefs]
+ // from the polyphase filter of (phaseFraction / phaseWrapLimit) in coefs.
+ //
+ //ALOGV("LOOP2: inFrameCount:%d outputIndex:%d outFrameCount:%d"
+ // " phaseFraction:%u phaseWrapLimit:%u",
+ // inFrameCount, outputIndex, outFrameCount, phaseFraction, phaseWrapLimit);
+ ALOG_ASSERT(phaseFraction < phaseWrapLimit);
+ fir<CHANNELS, LOCKED, STRIDE>(
+ &out[outputIndex],
+ phaseFraction, phaseWrapLimit,
+ coefShift, halfNumCoefs, coefs,
+ impulse, volumeSimd);
+
+ outputIndex += OUTPUT_CHANNELS;
+
+ phaseFraction += phaseIncrement;
+ while (phaseFraction >= phaseWrapLimit) {
+ if (inputIndex >= frameCount) {
+ goto done; // need a new buffer
+ }
+ mInBuffer.template readAdvance<CHANNELS>(impulse, halfNumCoefs, in, inputIndex);
+ inputIndex++;
+ phaseFraction -= phaseWrapLimit;
+ }
+ }
+done:
+ // We arrive here when we're finished or when the input buffer runs out.
+ // Regardless we need to release the input buffer if we've acquired it.
+ if (inputIndex > 0) { // we've acquired a buffer (alternatively could check frameCount)
+ ALOG_ASSERT(inputIndex == frameCount, "inputIndex(%zu) != frameCount(%zu)",
+ inputIndex, frameCount); // must have been fully read.
+ inputIndex = 0;
+ provider->releaseBuffer(&mBuffer);
+ ALOG_ASSERT(mBuffer.frameCount == 0);
+ }
+ }
+
+resample_exit:
+ // inputIndex must be zero in all three cases:
+ // (1) the buffer never was been acquired; (2) the buffer was
+ // released at "done:"; or (3) getNextBuffer() failed.
+ ALOG_ASSERT(inputIndex == 0, "Releasing: inputindex:%zu frameCount:%zu phaseFraction:%u",
+ inputIndex, mBuffer.frameCount, phaseFraction);
+ ALOG_ASSERT(mBuffer.frameCount == 0); // there must be no frames in the buffer
+ mInBuffer.setImpulse(impulse);
+ mPhaseFraction = phaseFraction;
+ return outputIndex / OUTPUT_CHANNELS;
+}
+
+/* instantiate templates used by AudioResampler::create */
+template class AudioResamplerDyn<float, float, float>;
+template class AudioResamplerDyn<int16_t, int16_t, int32_t>;
+template class AudioResamplerDyn<int32_t, int16_t, int32_t>;
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/media/libaudioprocessing/AudioResamplerDyn.h b/media/libaudioprocessing/AudioResamplerDyn.h
new file mode 100644
index 0000000..1840fc7
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerDyn.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_DYN_H
+#define ANDROID_AUDIO_RESAMPLER_DYN_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <android/log.h>
+
+#include <media/AudioResampler.h>
+
+namespace android {
+
+/* AudioResamplerDyn
+ *
+ * This class template is used for floating point and integer resamplers.
+ *
+ * Type variables:
+ * TC = filter coefficient type (one of int16_t, int32_t, or float)
+ * TI = input data type (one of int16_t or float)
+ * TO = output data type (one of int32_t or float)
+ *
+ * For integer input data types TI, the coefficient type TC is either int16_t or int32_t.
+ * For float input data types TI, the coefficient type TC is float.
+ */
+
+template<typename TC, typename TI, typename TO>
+class AudioResamplerDyn: public AudioResampler {
+public:
+ AudioResamplerDyn(int inChannelCount,
+ int32_t sampleRate, src_quality quality);
+
+ virtual ~AudioResamplerDyn();
+
+ virtual void init();
+
+ virtual void setSampleRate(int32_t inSampleRate);
+
+ virtual void setVolume(float left, float right);
+
+ virtual size_t resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+
+private:
+
+ class Constants { // stores the filter constants.
+ public:
+ Constants() :
+ mL(0), mShift(0), mHalfNumCoefs(0), mFirCoefs(NULL)
+ {}
+ void set(int L, int halfNumCoefs,
+ int inSampleRate, int outSampleRate);
+
+ int mL; // interpolation phases in the filter.
+ int mShift; // right shift to get polyphase index
+ unsigned int mHalfNumCoefs; // filter half #coefs
+ const TC* mFirCoefs; // polyphase filter bank
+ };
+
+ class InBuffer { // buffer management for input type TI
+ public:
+ InBuffer();
+ ~InBuffer();
+ void init();
+
+ void resize(int CHANNELS, int halfNumCoefs);
+
+ // used for direct management of the mImpulse pointer
+ inline TI* getImpulse() {
+ return mImpulse;
+ }
+
+ inline void setImpulse(TI *impulse) {
+ mImpulse = impulse;
+ }
+
+ template<int CHANNELS>
+ inline void readAgain(TI*& impulse, const int halfNumCoefs,
+ const TI* const in, const size_t inputIndex);
+
+ template<int CHANNELS>
+ inline void readAdvance(TI*& impulse, const int halfNumCoefs,
+ const TI* const in, const size_t inputIndex);
+
+ void reset();
+
+ private:
+ // tuning parameter guidelines: 2 <= multiple <= 8
+ static const int kStateSizeMultipleOfFilterLength = 4;
+
+ // in general, mRingFull = mState + mStateSize - halfNumCoefs*CHANNELS.
+ TI* mState; // base pointer for the input buffer storage
+ TI* mImpulse; // current location of the impulse response (centered)
+ TI* mRingFull; // mState <= mImpulse < mRingFull
+ size_t mStateCount; // size of state in units of TI.
+ };
+
+ void createKaiserFir(Constants &c, double stopBandAtten,
+ int inSampleRate, int outSampleRate, double tbwCheat);
+
+ template<int CHANNELS, bool LOCKED, int STRIDE>
+ size_t resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider);
+
+ // define a pointer to member function type for resample
+ typedef size_t (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out,
+ size_t outFrameCount, AudioBufferProvider* provider);
+
+ // data - the contiguous storage and layout of these is important.
+ InBuffer mInBuffer;
+ Constants mConstants; // current set of coefficient parameters
+ TO __attribute__ ((aligned (8))) mVolumeSimd[2]; // must be aligned or NEON may crash
+ resample_ABP_t mResampleFunc; // called function for resampling
+ int32_t mFilterSampleRate; // designed filter sample rate.
+ src_quality mFilterQuality; // designed filter quality.
+ void* mCoefBuffer; // if a filter is created, this is not null
+};
+
+} // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_DYN_H*/
diff --git a/services/audioflinger/AudioResamplerFirGen.h b/media/libaudioprocessing/AudioResamplerFirGen.h
similarity index 100%
rename from services/audioflinger/AudioResamplerFirGen.h
rename to media/libaudioprocessing/AudioResamplerFirGen.h
diff --git a/media/libaudioprocessing/AudioResamplerFirOps.h b/media/libaudioprocessing/AudioResamplerFirOps.h
new file mode 100644
index 0000000..2e4cee3
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerFirOps.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_FIR_OPS_H
+#define ANDROID_AUDIO_RESAMPLER_FIR_OPS_H
+
+namespace android {
+
+#if defined(__arm__) && !defined(__thumb__)
+#define USE_INLINE_ASSEMBLY (true)
+#else
+#define USE_INLINE_ASSEMBLY (false)
+#endif
+
+#if defined(__aarch64__) || defined(__ARM_NEON__)
+#ifndef USE_NEON
+#define USE_NEON (true)
+#endif
+#else
+#define USE_NEON (false)
+#endif
+#if USE_NEON
+#include <arm_neon.h>
+#endif
+
+#if defined(__SSSE3__) // Should be supported in x86 ABI for both 32 & 64-bit.
+#define USE_SSE (true)
+#include <tmmintrin.h>
+#else
+#define USE_SSE (false)
+#endif
+
+template<typename T, typename U>
+struct is_same
+{
+ static const bool value = false;
+};
+
+template<typename T>
+struct is_same<T, T> // partial specialization
+{
+ static const bool value = true;
+};
+
+static inline
+int32_t mulRL(int left, int32_t in, uint32_t vRL)
+{
+#if USE_INLINE_ASSEMBLY
+ int32_t out;
+ if (left) {
+ asm( "smultb %[out], %[in], %[vRL] \n"
+ : [out]"=r"(out)
+ : [in]"%r"(in), [vRL]"r"(vRL)
+ : );
+ } else {
+ asm( "smultt %[out], %[in], %[vRL] \n"
+ : [out]"=r"(out)
+ : [in]"%r"(in), [vRL]"r"(vRL)
+ : );
+ }
+ return out;
+#else
+ int16_t v = left ? static_cast<int16_t>(vRL) : static_cast<int16_t>(vRL>>16);
+ return static_cast<int32_t>((static_cast<int64_t>(in) * v) >> 16);
+#endif
+}
+
+static inline
+int32_t mulAdd(int16_t in, int16_t v, int32_t a)
+{
+#if USE_INLINE_ASSEMBLY
+ int32_t out;
+ asm( "smlabb %[out], %[v], %[in], %[a] \n"
+ : [out]"=r"(out)
+ : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
+ : );
+ return out;
+#else
+ return a + v * in;
+#endif
+}
+
+static inline
+int32_t mulAdd(int16_t in, int32_t v, int32_t a)
+{
+#if USE_INLINE_ASSEMBLY
+ int32_t out;
+ asm( "smlawb %[out], %[v], %[in], %[a] \n"
+ : [out]"=r"(out)
+ : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
+ : );
+ return out;
+#else
+ return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 16);
+#endif
+}
+
+static inline
+int32_t mulAdd(int32_t in, int32_t v, int32_t a)
+{
+#if USE_INLINE_ASSEMBLY
+ int32_t out;
+ asm( "smmla %[out], %[v], %[in], %[a] \n"
+ : [out]"=r"(out)
+ : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
+ : );
+ return out;
+#else
+ return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 32);
+#endif
+}
+
+static inline
+int32_t mulAddRL(int left, uint32_t inRL, int16_t v, int32_t a)
+{
+#if 0 // USE_INLINE_ASSEMBLY Seems to fail with Clang b/34110890
+ int32_t out;
+ if (left) {
+ asm( "smlabb %[out], %[v], %[inRL], %[a] \n"
+ : [out]"=r"(out)
+ : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
+ : );
+ } else {
+ asm( "smlabt %[out], %[v], %[inRL], %[a] \n"
+ : [out]"=r"(out)
+ : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
+ : );
+ }
+ return out;
+#else
+ int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16);
+ return a + v * s;
+#endif
+}
+
+static inline
+int32_t mulAddRL(int left, uint32_t inRL, int32_t v, int32_t a)
+{
+#if 0 // USE_INLINE_ASSEMBLY Seems to fail with Clang b/34110890
+ int32_t out;
+ if (left) {
+ asm( "smlawb %[out], %[v], %[inRL], %[a] \n"
+ : [out]"=r"(out)
+ : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
+ : );
+ } else {
+ asm( "smlawt %[out], %[v], %[inRL], %[a] \n"
+ : [out]"=r"(out)
+ : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
+ : );
+ }
+ return out;
+#else
+ int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16);
+ return a + static_cast<int32_t>((static_cast<int64_t>(v) * s) >> 16);
+#endif
+}
+
+} // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_FIR_OPS_H*/
diff --git a/media/libaudioprocessing/AudioResamplerFirProcess.h b/media/libaudioprocessing/AudioResamplerFirProcess.h
new file mode 100644
index 0000000..a741677
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerFirProcess.h
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H
+#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H
+
+namespace android {
+
+// depends on AudioResamplerFirOps.h
+
+/* variant for input type TI = int16_t input samples */
+template<typename TC>
+static inline
+void mac(int32_t& l, int32_t& r, TC coef, const int16_t* samples)
+{
+ uint32_t rl = *reinterpret_cast<const uint32_t*>(samples);
+ l = mulAddRL(1, rl, coef, l);
+ r = mulAddRL(0, rl, coef, r);
+}
+
+template<typename TC>
+static inline
+void mac(int32_t& l, TC coef, const int16_t* samples)
+{
+ l = mulAdd(samples[0], coef, l);
+}
+
+/* variant for input type TI = float input samples */
+template<typename TC>
+static inline
+void mac(float& l, float& r, TC coef, const float* samples)
+{
+ l += *samples++ * coef;
+ r += *samples * coef;
+}
+
+template<typename TC>
+static inline
+void mac(float& l, TC coef, const float* samples)
+{
+ l += *samples * coef;
+}
+
+/* variant for output type TO = int32_t output samples */
+static inline
+int32_t volumeAdjust(int32_t value, int32_t volume)
+{
+ return 2 * mulRL(0, value, volume); // Note: only use top 16b
+}
+
+/* variant for output type TO = float output samples */
+static inline
+float volumeAdjust(float value, float volume)
+{
+ return value * volume;
+}
+
+/*
+ * Helper template functions for loop unrolling accumulator operations.
+ *
+ * Unrolling the loops achieves about 2x gain.
+ * Using a recursive template rather than an array of TO[] for the accumulator
+ * values is an additional 10-20% gain.
+ */
+
+template<int CHANNELS, typename TO>
+class Accumulator : public Accumulator<CHANNELS-1, TO> // recursive
+{
+public:
+ inline void clear() {
+ value = 0;
+ Accumulator<CHANNELS-1, TO>::clear();
+ }
+ template<typename TC, typename TI>
+ inline void acc(TC coef, const TI*& data) {
+ mac(value, coef, data++);
+ Accumulator<CHANNELS-1, TO>::acc(coef, data);
+ }
+ inline void volume(TO*& out, TO gain) {
+ *out++ = volumeAdjust(value, gain);
+ Accumulator<CHANNELS-1, TO>::volume(out, gain);
+ }
+
+ TO value; // one per recursive inherited base class
+};
+
+template<typename TO>
+class Accumulator<0, TO> {
+public:
+ inline void clear() {
+ }
+ template<typename TC, typename TI>
+ inline void acc(TC coef __unused, const TI*& data __unused) {
+ }
+ inline void volume(TO*& out __unused, TO gain __unused) {
+ }
+};
+
+template<typename TC, typename TINTERP>
+inline
+TC interpolate(TC coef_0, TC coef_1, TINTERP lerp)
+{
+ return lerp * (coef_1 - coef_0) + coef_0;
+}
+
+template<>
+inline
+int16_t interpolate<int16_t, uint32_t>(int16_t coef_0, int16_t coef_1, uint32_t lerp)
+{ // in some CPU architectures 16b x 16b multiplies are faster.
+ return (static_cast<int16_t>(lerp) * static_cast<int16_t>(coef_1 - coef_0) >> 15) + coef_0;
+}
+
+template<>
+inline
+int32_t interpolate<int32_t, uint32_t>(int32_t coef_0, int32_t coef_1, uint32_t lerp)
+{
+ return (lerp * static_cast<int64_t>(coef_1 - coef_0) >> 31) + coef_0;
+}
+
+/* class scope for passing in functions into templates */
+struct InterpCompute {
+ template<typename TC, typename TINTERP>
+ static inline
+ TC interpolatep(TC coef_0, TC coef_1, TINTERP lerp) {
+ return interpolate(coef_0, coef_1, lerp);
+ }
+
+ template<typename TC, typename TINTERP>
+ static inline
+ TC interpolaten(TC coef_0, TC coef_1, TINTERP lerp) {
+ return interpolate(coef_0, coef_1, lerp);
+ }
+};
+
+struct InterpNull {
+ template<typename TC, typename TINTERP>
+ static inline
+ TC interpolatep(TC coef_0, TC coef_1 __unused, TINTERP lerp __unused) {
+ return coef_0;
+ }
+
+ template<typename TC, typename TINTERP>
+ static inline
+ TC interpolaten(TC coef_0 __unused, TC coef_1, TINTERP lerp __unused) {
+ return coef_1;
+ }
+};
+
+/*
+ * Calculates a single output frame (two samples).
+ *
+ * The Process*() functions compute both the positive half FIR dot product and
+ * the negative half FIR dot product, accumulates, and then applies the volume.
+ *
+ * Use fir() to compute the proper coefficient pointers for a polyphase
+ * filter bank.
+ *
+ * ProcessBase() is the fundamental processing template function.
+ *
+ * ProcessL() calls ProcessBase() with TFUNC = InterpNull, for fixed/locked phase.
+ * Process() calls ProcessBase() with TFUNC = InterpCompute, for interpolated phase.
+ */
+
+template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO,
+ typename TINTERP>
+static inline
+void ProcessBase(TO* const out,
+ size_t count,
+ const TC* coefsP,
+ const TC* coefsN,
+ const TI* sP,
+ const TI* sN,
+ TINTERP lerpP,
+ const TO* const volumeLR)
+{
+ static_assert(CHANNELS > 0, "CHANNELS must be > 0");
+
+ if (CHANNELS > 2) {
+ // TO accum[CHANNELS];
+ Accumulator<CHANNELS, TO> accum;
+
+ // for (int j = 0; j < CHANNELS; ++j) accum[j] = 0;
+ accum.clear();
+ for (size_t i = 0; i < count; ++i) {
+ TC c = TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP);
+
+ // for (int j = 0; j < CHANNELS; ++j) mac(accum[j], c, sP + j);
+ const TI *tmp_data = sP; // tmp_ptr seems to work better
+ accum.acc(c, tmp_data);
+
+ coefsP++;
+ sP -= CHANNELS;
+ c = TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP);
+
+ // for (int j = 0; j < CHANNELS; ++j) mac(accum[j], c, sN + j);
+ tmp_data = sN; // tmp_ptr seems faster than directly using sN
+ accum.acc(c, tmp_data);
+
+ coefsN++;
+ sN += CHANNELS;
+ }
+ // for (int j = 0; j < CHANNELS; ++j) out[j] += volumeAdjust(accum[j], volumeLR[0]);
+ TO *tmp_out = out; // may remove if const out definition changes.
+ accum.volume(tmp_out, volumeLR[0]);
+ } else if (CHANNELS == 2) {
+ TO l = 0;
+ TO r = 0;
+ for (size_t i = 0; i < count; ++i) {
+ mac(l, r, TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP), sP);
+ coefsP++;
+ sP -= CHANNELS;
+ mac(l, r, TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP), sN);
+ coefsN++;
+ sN += CHANNELS;
+ }
+ out[0] += volumeAdjust(l, volumeLR[0]);
+ out[1] += volumeAdjust(r, volumeLR[1]);
+ } else { /* CHANNELS == 1 */
+ TO l = 0;
+ for (size_t i = 0; i < count; ++i) {
+ mac(l, TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP), sP);
+ coefsP++;
+ sP -= CHANNELS;
+ mac(l, TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP), sN);
+ coefsN++;
+ sN += CHANNELS;
+ }
+ out[0] += volumeAdjust(l, volumeLR[0]);
+ out[1] += volumeAdjust(l, volumeLR[1]);
+ }
+}
+
+/* Calculates a single output frame from a polyphase resampling filter.
+ * See Process() for parameter details.
+ */
+template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO>
+static inline
+void ProcessL(TO* const out,
+ int count,
+ const TC* coefsP,
+ const TC* coefsN,
+ const TI* sP,
+ const TI* sN,
+ const TO* const volumeLR)
+{
+ ProcessBase<CHANNELS, STRIDE, InterpNull>(out, count, coefsP, coefsN, sP, sN, 0, volumeLR);
+}
+
+/*
+ * Calculates a single output frame from a polyphase resampling filter,
+ * with filter phase interpolation.
+ *
+ * @param out should point to the output buffer with space for at least one output frame.
+ *
+ * @param count should be half the size of the total filter length (halfNumCoefs), as we
+ * use symmetry in filter coefficients to evaluate two dot products.
+ *
+ * @param coefsP is one phase of the polyphase filter bank of size halfNumCoefs, corresponding
+ * to the positive sP.
+ *
+ * @param coefsN is one phase of the polyphase filter bank of size halfNumCoefs, corresponding
+ * to the negative sN.
+ *
+ * @param coefsP1 is the next phase of coefsP (used for interpolation).
+ *
+ * @param coefsN1 is the next phase of coefsN (used for interpolation).
+ *
+ * @param sP is the positive half of the coefficients (as viewed by a convolution),
+ * starting at the original samples pointer and decrementing (by CHANNELS).
+ *
+ * @param sN is the negative half of the samples (as viewed by a convolution),
+ * starting at the original samples pointer + CHANNELS and incrementing (by CHANNELS).
+ *
+ * @param lerpP The fractional siting between the polyphase indices is given by the bits
+ * below coefShift. See fir() for details.
+ *
+ * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel,
+ * expressed as a S32 integer or float. A negative value inverts the channel 180 degrees.
+ * The pointer volumeLR should be aligned to a minimum of 8 bytes.
+ * A typical value for volume is 0x1000 to align to a unity gain output of 20.12.
+ */
+template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO, typename TINTERP>
+static inline
+void Process(TO* const out,
+ int count,
+ const TC* coefsP,
+ const TC* coefsN,
+ const TC* coefsP1 __unused,
+ const TC* coefsN1 __unused,
+ const TI* sP,
+ const TI* sN,
+ TINTERP lerpP,
+ const TO* const volumeLR)
+{
+ ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP,
+ volumeLR);
+}
+
+/*
+ * Calculates a single output frame from input sample pointer.
+ *
+ * This sets up the params for the accelerated Process() and ProcessL()
+ * functions to do the appropriate dot products.
+ *
+ * @param out should point to the output buffer with space for at least one output frame.
+ *
+ * @param phase is the fractional distance between input frames for interpolation:
+ * phase >= 0 && phase < phaseWrapLimit. It can be thought of as a rational fraction
+ * of phase/phaseWrapLimit.
+ *
+ * @param phaseWrapLimit is #polyphases<<coefShift, where #polyphases is the number of polyphases
+ * in the polyphase filter. Likewise, #polyphases can be obtained as (phaseWrapLimit>>coefShift).
+ *
+ * @param coefShift gives the bit alignment of the polyphase index in the phase parameter.
+ *
+ * @param halfNumCoefs is the half the number of coefficients per polyphase filter. Since the
+ * overall filterbank is odd-length symmetric, only halfNumCoefs need be stored.
+ *
+ * @param coefs is the polyphase filter bank, starting at from polyphase index 0, and ranging to
+ * and including the #polyphases. Each polyphase of the filter has half-length halfNumCoefs
+ * (due to symmetry). The total size of the filter bank in coefficients is
+ * (#polyphases+1)*halfNumCoefs.
+ *
+ * The filter bank coefs should be aligned to a minimum of 16 bytes (preferrably to cache line).
+ *
+ * The coefs should be attenuated (to compensate for passband ripple)
+ * if storing back into the native format.
+ *
+ * @param samples are unaligned input samples. The position is in the "middle" of the
+ * sample array with respect to the FIR filter:
+ * the negative half of the filter is dot product from samples+1 to samples+halfNumCoefs;
+ * the positive half of the filter is dot product from samples to samples-halfNumCoefs+1.
+ *
+ * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel,
+ * expressed as a S32 integer or float. A negative value inverts the channel 180 degrees.
+ * The pointer volumeLR should be aligned to a minimum of 8 bytes.
+ * A typical value for volume is 0x1000 to align to a unity gain output of 20.12.
+ *
+ * In between calls to filterCoefficient, the phase is incremented by phaseIncrement, where
+ * phaseIncrement is calculated as inputSampling * phaseWrapLimit / outputSampling.
+ *
+ * The filter polyphase index is given by indexP = phase >> coefShift. Due to
+ * odd length symmetric filter, the polyphase index of the negative half depends on
+ * whether interpolation is used.
+ *
+ * The fractional siting between the polyphase indices is given by the bits below coefShift:
+ *
+ * lerpP = phase << 32 - coefShift >> 1; // for 32 bit unsigned phase multiply
+ * lerpP = phase << 32 - coefShift >> 17; // for 16 bit unsigned phase multiply
+ *
+ * For integer types, this is expressed as:
+ *
+ * lerpP = phase << sizeof(phase)*8 - coefShift
+ * >> (sizeof(phase)-sizeof(*coefs))*8 + 1;
+ *
+ * For floating point, lerpP is the fractional phase scaled to [0.0, 1.0):
+ *
+ * lerpP = (phase << 32 - coefShift) / (1 << 32); // floating point equivalent
+ */
+
+template<int CHANNELS, bool LOCKED, int STRIDE, typename TC, typename TI, typename TO>
+static inline
+void fir(TO* const out,
+ const uint32_t phase, const uint32_t phaseWrapLimit,
+ const int coefShift, const int halfNumCoefs, const TC* const coefs,
+ const TI* const samples, const TO* const volumeLR)
+{
+ // NOTE: be very careful when modifying the code here. register
+ // pressure is very high and a small change might cause the compiler
+ // to generate far less efficient code.
+ // Always sanity check the result with objdump or test-resample.
+
+ if (LOCKED) {
+ // locked polyphase (no interpolation)
+ // Compute the polyphase filter index on the positive and negative side.
+ uint32_t indexP = phase >> coefShift;
+ uint32_t indexN = (phaseWrapLimit - phase) >> coefShift;
+ const TC* coefsP = coefs + indexP*halfNumCoefs;
+ const TC* coefsN = coefs + indexN*halfNumCoefs;
+ const TI* sP = samples;
+ const TI* sN = samples + CHANNELS;
+
+ // dot product filter.
+ ProcessL<CHANNELS, STRIDE>(out,
+ halfNumCoefs, coefsP, coefsN, sP, sN, volumeLR);
+ } else {
+ // interpolated polyphase
+ // Compute the polyphase filter index on the positive and negative side.
+ uint32_t indexP = phase >> coefShift;
+ uint32_t indexN = (phaseWrapLimit - phase - 1) >> coefShift; // one's complement.
+ const TC* coefsP = coefs + indexP*halfNumCoefs;
+ const TC* coefsN = coefs + indexN*halfNumCoefs;
+ const TC* coefsP1 = coefsP + halfNumCoefs;
+ const TC* coefsN1 = coefsN + halfNumCoefs;
+ const TI* sP = samples;
+ const TI* sN = samples + CHANNELS;
+
+ // Interpolation fraction lerpP derived by shifting all the way up and down
+ // to clear the appropriate bits and align to the appropriate level
+ // for the integer multiply. The constants should resolve in compile time.
+ //
+ // The interpolated filter coefficient is derived as follows for the pos/neg half:
+ //
+ // interpolated[P] = index[P]*lerpP + index[P+1]*(1-lerpP)
+ // interpolated[N] = index[N+1]*lerpP + index[N]*(1-lerpP)
+
+ // on-the-fly interpolated dot product filter
+ if (is_same<TC, float>::value || is_same<TC, double>::value) {
+ static const TC scale = 1. / (65536. * 65536.); // scale phase bits to [0.0, 1.0)
+ TC lerpP = TC(phase << (sizeof(phase)*8 - coefShift)) * scale;
+
+ Process<CHANNELS, STRIDE>(out,
+ halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR);
+ } else {
+ uint32_t lerpP = phase << (sizeof(phase)*8 - coefShift)
+ >> ((sizeof(phase)-sizeof(*coefs))*8 + 1);
+
+ Process<CHANNELS, STRIDE>(out,
+ halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR);
+ }
+ }
+}
+
+} // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H*/
diff --git a/media/libaudioprocessing/AudioResamplerFirProcessNeon.h b/media/libaudioprocessing/AudioResamplerFirProcessNeon.h
new file mode 100644
index 0000000..c335050
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerFirProcessNeon.h
@@ -0,0 +1,1214 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H
+#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H
+
+namespace android {
+
+// depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h
+
+#if USE_NEON
+
+// use intrinsics if inline arm32 assembly is not possible
+#if !USE_INLINE_ASSEMBLY
+#define USE_INTRINSIC
+#endif
+
+// following intrinsics available only on ARM 64 bit ACLE
+#ifndef __aarch64__
+#undef vld1q_f32_x2
+#undef vld1q_s32_x2
+#endif
+
+#define TO_STRING2(x) #x
+#define TO_STRING(x) TO_STRING2(x)
+// uncomment to print GCC version, may be relevant for intrinsic optimizations
+/* #pragma message ("GCC version: " TO_STRING(__GNUC__) \
+ "." TO_STRING(__GNUC_MINOR__) \
+ "." TO_STRING(__GNUC_PATCHLEVEL__)) */
+
+//
+// NEON specializations are enabled for Process() and ProcessL() in AudioResamplerFirProcess.h
+//
+// Two variants are presented here:
+// ARM NEON inline assembly which appears up to 10-15% faster than intrinsics (gcc 4.9) for arm32.
+// ARM NEON intrinsics which can also be used by arm64 and x86/64 with NEON header.
+//
+
+// Macros to save a mono/stereo accumulator sample in q0 (and q4) as stereo out.
+// These are only used for inline assembly.
+#define ASSEMBLY_ACCUMULATE_MONO \
+ "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes */\
+ "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output */\
+ "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums */\
+ "vpadd.s32 d0, d0, d0 \n"/* (1+4d) and replicate L/R */\
+ "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume */\
+ "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating) */\
+ "vst1.s32 {d3}, %[out] \n"/* (2+2d) store result */
+
+#define ASSEMBLY_ACCUMULATE_STEREO \
+ "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes*/\
+ "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output*/\
+ "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums from q0*/\
+ "vpadd.s32 d8, d8, d9 \n"/* (1) add all 4 partial sums from q4*/\
+ "vpadd.s32 d0, d0, d8 \n"/* (1+4d) combine into L/R*/\
+ "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume*/\
+ "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating)*/\
+ "vst1.s32 {d3}, %[out] \n"/* (2+2d)store result*/
+
+template <int CHANNELS, int STRIDE, bool FIXED>
+static inline void ProcessNeonIntrinsic(int32_t* out,
+ int count,
+ const int16_t* coefsP,
+ const int16_t* coefsN,
+ const int16_t* sP,
+ const int16_t* sN,
+ const int32_t* volumeLR,
+ uint32_t lerpP,
+ const int16_t* coefsP1,
+ const int16_t* coefsN1)
+{
+ ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
+ static_assert(CHANNELS == 1 || CHANNELS == 2, "CHANNELS must be 1 or 2");
+
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ coefsP = (const int16_t*)__builtin_assume_aligned(coefsP, 16);
+ coefsN = (const int16_t*)__builtin_assume_aligned(coefsN, 16);
+
+ int16x4_t interp;
+ if (!FIXED) {
+ interp = vdup_n_s16(lerpP);
+ //interp = (int16x4_t)vset_lane_s32 ((int32x2_t)lerpP, interp, 0);
+ coefsP1 = (const int16_t*)__builtin_assume_aligned(coefsP1, 16);
+ coefsN1 = (const int16_t*)__builtin_assume_aligned(coefsN1, 16);
+ }
+ int32x4_t accum, accum2;
+ // warning uninitialized if we use veorq_s32
+ // (alternative to below) accum = veorq_s32(accum, accum);
+ accum = vdupq_n_s32(0);
+ if (CHANNELS == 2) {
+ // (alternative to below) accum2 = veorq_s32(accum2, accum2);
+ accum2 = vdupq_n_s32(0);
+ }
+ do {
+ int16x8_t posCoef = vld1q_s16(coefsP);
+ coefsP += 8;
+ int16x8_t negCoef = vld1q_s16(coefsN);
+ coefsN += 8;
+ if (!FIXED) { // interpolate
+ int16x8_t posCoef1 = vld1q_s16(coefsP1);
+ coefsP1 += 8;
+ int16x8_t negCoef1 = vld1q_s16(coefsN1);
+ coefsN1 += 8;
+
+ posCoef1 = vsubq_s16(posCoef1, posCoef);
+ negCoef = vsubq_s16(negCoef, negCoef1);
+
+ posCoef1 = vqrdmulhq_lane_s16(posCoef1, interp, 0);
+ negCoef = vqrdmulhq_lane_s16(negCoef, interp, 0);
+
+ posCoef = vaddq_s16(posCoef, posCoef1);
+ negCoef = vaddq_s16(negCoef, negCoef1);
+ }
+ switch (CHANNELS) {
+ case 1: {
+ int16x8_t posSamp = vld1q_s16(sP);
+ int16x8_t negSamp = vld1q_s16(sN);
+ sN += 8;
+ posSamp = vrev64q_s16(posSamp);
+
+ // dot product
+ accum = vmlal_s16(accum, vget_low_s16(posSamp), vget_high_s16(posCoef)); // reversed
+ accum = vmlal_s16(accum, vget_high_s16(posSamp), vget_low_s16(posCoef)); // reversed
+ accum = vmlal_s16(accum, vget_low_s16(negSamp), vget_low_s16(negCoef));
+ accum = vmlal_s16(accum, vget_high_s16(negSamp), vget_high_s16(negCoef));
+ sP -= 8;
+ } break;
+ case 2: {
+ int16x8x2_t posSamp = vld2q_s16(sP);
+ int16x8x2_t negSamp = vld2q_s16(sN);
+ sN += 16;
+ posSamp.val[0] = vrev64q_s16(posSamp.val[0]);
+ posSamp.val[1] = vrev64q_s16(posSamp.val[1]);
+
+ // dot product
+ accum = vmlal_s16(accum, vget_low_s16(posSamp.val[0]), vget_high_s16(posCoef)); // r
+ accum = vmlal_s16(accum, vget_high_s16(posSamp.val[0]), vget_low_s16(posCoef)); // r
+ accum2 = vmlal_s16(accum2, vget_low_s16(posSamp.val[1]), vget_high_s16(posCoef)); // r
+ accum2 = vmlal_s16(accum2, vget_high_s16(posSamp.val[1]), vget_low_s16(posCoef)); // r
+ accum = vmlal_s16(accum, vget_low_s16(negSamp.val[0]), vget_low_s16(negCoef));
+ accum = vmlal_s16(accum, vget_high_s16(negSamp.val[0]), vget_high_s16(negCoef));
+ accum2 = vmlal_s16(accum2, vget_low_s16(negSamp.val[1]), vget_low_s16(negCoef));
+ accum2 = vmlal_s16(accum2, vget_high_s16(negSamp.val[1]), vget_high_s16(negCoef));
+ sP -= 16;
+ } break;
+ }
+ } while (count -= 8);
+
+ // multiply by volume and save
+ volumeLR = (const int32_t*)__builtin_assume_aligned(volumeLR, 8);
+ int32x2_t vLR = vld1_s32(volumeLR);
+ int32x2_t outSamp = vld1_s32(out);
+ // combine and funnel down accumulator
+ int32x2_t outAccum = vpadd_s32(vget_low_s32(accum), vget_high_s32(accum));
+ if (CHANNELS == 1) {
+ // duplicate accum to both L and R
+ outAccum = vpadd_s32(outAccum, outAccum);
+ } else if (CHANNELS == 2) {
+ // accum2 contains R, fold in
+ int32x2_t outAccum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
+ outAccum = vpadd_s32(outAccum, outAccum2);
+ }
+ outAccum = vqrdmulh_s32(outAccum, vLR);
+ outSamp = vqadd_s32(outSamp, outAccum);
+ vst1_s32(out, outSamp);
+}
+
+template <int CHANNELS, int STRIDE, bool FIXED>
+static inline void ProcessNeonIntrinsic(int32_t* out,
+ int count,
+ const int32_t* coefsP,
+ const int32_t* coefsN,
+ const int16_t* sP,
+ const int16_t* sN,
+ const int32_t* volumeLR,
+ uint32_t lerpP,
+ const int32_t* coefsP1,
+ const int32_t* coefsN1)
+{
+ ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
+ static_assert(CHANNELS == 1 || CHANNELS == 2, "CHANNELS must be 1 or 2");
+
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ coefsP = (const int32_t*)__builtin_assume_aligned(coefsP, 16);
+ coefsN = (const int32_t*)__builtin_assume_aligned(coefsN, 16);
+
+ int32x2_t interp;
+ if (!FIXED) {
+ interp = vdup_n_s32(lerpP);
+ coefsP1 = (const int32_t*)__builtin_assume_aligned(coefsP1, 16);
+ coefsN1 = (const int32_t*)__builtin_assume_aligned(coefsN1, 16);
+ }
+ int32x4_t accum, accum2;
+ // warning uninitialized if we use veorq_s32
+ // (alternative to below) accum = veorq_s32(accum, accum);
+ accum = vdupq_n_s32(0);
+ if (CHANNELS == 2) {
+ // (alternative to below) accum2 = veorq_s32(accum2, accum2);
+ accum2 = vdupq_n_s32(0);
+ }
+ do {
+#ifdef vld1q_s32_x2
+ int32x4x2_t posCoef = vld1q_s32_x2(coefsP);
+ coefsP += 8;
+ int32x4x2_t negCoef = vld1q_s32_x2(coefsN);
+ coefsN += 8;
+#else
+ int32x4x2_t posCoef;
+ posCoef.val[0] = vld1q_s32(coefsP);
+ coefsP += 4;
+ posCoef.val[1] = vld1q_s32(coefsP);
+ coefsP += 4;
+ int32x4x2_t negCoef;
+ negCoef.val[0] = vld1q_s32(coefsN);
+ coefsN += 4;
+ negCoef.val[1] = vld1q_s32(coefsN);
+ coefsN += 4;
+#endif
+ if (!FIXED) { // interpolate
+#ifdef vld1q_s32_x2
+ int32x4x2_t posCoef1 = vld1q_s32_x2(coefsP1);
+ coefsP1 += 8;
+ int32x4x2_t negCoef1 = vld1q_s32_x2(coefsN1);
+ coefsN1 += 8;
+#else
+ int32x4x2_t posCoef1;
+ posCoef1.val[0] = vld1q_s32(coefsP1);
+ coefsP1 += 4;
+ posCoef1.val[1] = vld1q_s32(coefsP1);
+ coefsP1 += 4;
+ int32x4x2_t negCoef1;
+ negCoef1.val[0] = vld1q_s32(coefsN1);
+ coefsN1 += 4;
+ negCoef1.val[1] = vld1q_s32(coefsN1);
+ coefsN1 += 4;
+#endif
+
+ posCoef1.val[0] = vsubq_s32(posCoef1.val[0], posCoef.val[0]);
+ posCoef1.val[1] = vsubq_s32(posCoef1.val[1], posCoef.val[1]);
+ negCoef.val[0] = vsubq_s32(negCoef.val[0], negCoef1.val[0]);
+ negCoef.val[1] = vsubq_s32(negCoef.val[1], negCoef1.val[1]);
+
+ posCoef1.val[0] = vqrdmulhq_lane_s32(posCoef1.val[0], interp, 0);
+ posCoef1.val[1] = vqrdmulhq_lane_s32(posCoef1.val[1], interp, 0);
+ negCoef.val[0] = vqrdmulhq_lane_s32(negCoef.val[0], interp, 0);
+ negCoef.val[1] = vqrdmulhq_lane_s32(negCoef.val[1], interp, 0);
+
+ posCoef.val[0] = vaddq_s32(posCoef.val[0], posCoef1.val[0]);
+ posCoef.val[1] = vaddq_s32(posCoef.val[1], posCoef1.val[1]);
+ negCoef.val[0] = vaddq_s32(negCoef.val[0], negCoef1.val[0]);
+ negCoef.val[1] = vaddq_s32(negCoef.val[1], negCoef1.val[1]);
+ }
+ switch (CHANNELS) {
+ case 1: {
+ int16x8_t posSamp = vld1q_s16(sP);
+ int16x8_t negSamp = vld1q_s16(sN);
+ sN += 8;
+ posSamp = vrev64q_s16(posSamp);
+
+ int32x4_t posSamp0 = vshll_n_s16(vget_low_s16(posSamp), 15);
+ int32x4_t posSamp1 = vshll_n_s16(vget_high_s16(posSamp), 15);
+ int32x4_t negSamp0 = vshll_n_s16(vget_low_s16(negSamp), 15);
+ int32x4_t negSamp1 = vshll_n_s16(vget_high_s16(negSamp), 15);
+
+ // dot product
+ posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
+ posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
+ negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
+ negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
+
+ accum = vaddq_s32(accum, posSamp0);
+ negSamp0 = vaddq_s32(negSamp0, negSamp1);
+ accum = vaddq_s32(accum, posSamp1);
+ accum = vaddq_s32(accum, negSamp0);
+
+ sP -= 8;
+ } break;
+ case 2: {
+ int16x8x2_t posSamp = vld2q_s16(sP);
+ int16x8x2_t negSamp = vld2q_s16(sN);
+ sN += 16;
+ posSamp.val[0] = vrev64q_s16(posSamp.val[0]);
+ posSamp.val[1] = vrev64q_s16(posSamp.val[1]);
+
+ // left
+ int32x4_t posSamp0 = vshll_n_s16(vget_low_s16(posSamp.val[0]), 15);
+ int32x4_t posSamp1 = vshll_n_s16(vget_high_s16(posSamp.val[0]), 15);
+ int32x4_t negSamp0 = vshll_n_s16(vget_low_s16(negSamp.val[0]), 15);
+ int32x4_t negSamp1 = vshll_n_s16(vget_high_s16(negSamp.val[0]), 15);
+
+ // dot product
+ posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
+ posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
+ negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
+ negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
+
+ accum = vaddq_s32(accum, posSamp0);
+ negSamp0 = vaddq_s32(negSamp0, negSamp1);
+ accum = vaddq_s32(accum, posSamp1);
+ accum = vaddq_s32(accum, negSamp0);
+
+ // right
+ posSamp0 = vshll_n_s16(vget_low_s16(posSamp.val[1]), 15);
+ posSamp1 = vshll_n_s16(vget_high_s16(posSamp.val[1]), 15);
+ negSamp0 = vshll_n_s16(vget_low_s16(negSamp.val[1]), 15);
+ negSamp1 = vshll_n_s16(vget_high_s16(negSamp.val[1]), 15);
+
+ // dot product
+ posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
+ posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
+ negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
+ negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
+
+ accum2 = vaddq_s32(accum2, posSamp0);
+ negSamp0 = vaddq_s32(negSamp0, negSamp1);
+ accum2 = vaddq_s32(accum2, posSamp1);
+ accum2 = vaddq_s32(accum2, negSamp0);
+
+ sP -= 16;
+ } break;
+ }
+ } while (count -= 8);
+
+ // multiply by volume and save
+ volumeLR = (const int32_t*)__builtin_assume_aligned(volumeLR, 8);
+ int32x2_t vLR = vld1_s32(volumeLR);
+ int32x2_t outSamp = vld1_s32(out);
+ // combine and funnel down accumulator
+ int32x2_t outAccum = vpadd_s32(vget_low_s32(accum), vget_high_s32(accum));
+ if (CHANNELS == 1) {
+ // duplicate accum to both L and R
+ outAccum = vpadd_s32(outAccum, outAccum);
+ } else if (CHANNELS == 2) {
+ // accum2 contains R, fold in
+ int32x2_t outAccum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
+ outAccum = vpadd_s32(outAccum, outAccum2);
+ }
+ outAccum = vqrdmulh_s32(outAccum, vLR);
+ outSamp = vqadd_s32(outSamp, outAccum);
+ vst1_s32(out, outSamp);
+}
+
+template <int CHANNELS, int STRIDE, bool FIXED>
+static inline void ProcessNeonIntrinsic(float* out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* sP,
+ const float* sN,
+ const float* volumeLR,
+ float lerpP,
+ const float* coefsP1,
+ const float* coefsN1)
+{
+ ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
+ static_assert(CHANNELS == 1 || CHANNELS == 2, "CHANNELS must be 1 or 2");
+
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ coefsP = (const float*)__builtin_assume_aligned(coefsP, 16);
+ coefsN = (const float*)__builtin_assume_aligned(coefsN, 16);
+
+ float32x2_t interp;
+ if (!FIXED) {
+ interp = vdup_n_f32(lerpP);
+ coefsP1 = (const float*)__builtin_assume_aligned(coefsP1, 16);
+ coefsN1 = (const float*)__builtin_assume_aligned(coefsN1, 16);
+ }
+ float32x4_t accum, accum2;
+ // warning uninitialized if we use veorq_s32
+ // (alternative to below) accum = veorq_s32(accum, accum);
+ accum = vdupq_n_f32(0);
+ if (CHANNELS == 2) {
+ // (alternative to below) accum2 = veorq_s32(accum2, accum2);
+ accum2 = vdupq_n_f32(0);
+ }
+ do {
+#ifdef vld1q_f32_x2
+ float32x4x2_t posCoef = vld1q_f32_x2(coefsP);
+ coefsP += 8;
+ float32x4x2_t negCoef = vld1q_f32_x2(coefsN);
+ coefsN += 8;
+#else
+ float32x4x2_t posCoef;
+ posCoef.val[0] = vld1q_f32(coefsP);
+ coefsP += 4;
+ posCoef.val[1] = vld1q_f32(coefsP);
+ coefsP += 4;
+ float32x4x2_t negCoef;
+ negCoef.val[0] = vld1q_f32(coefsN);
+ coefsN += 4;
+ negCoef.val[1] = vld1q_f32(coefsN);
+ coefsN += 4;
+#endif
+ if (!FIXED) { // interpolate
+#ifdef vld1q_f32_x2
+ float32x4x2_t posCoef1 = vld1q_f32_x2(coefsP1);
+ coefsP1 += 8;
+ float32x4x2_t negCoef1 = vld1q_f32_x2(coefsN1);
+ coefsN1 += 8;
+#else
+ float32x4x2_t posCoef1;
+ posCoef1.val[0] = vld1q_f32(coefsP1);
+ coefsP1 += 4;
+ posCoef1.val[1] = vld1q_f32(coefsP1);
+ coefsP1 += 4;
+ float32x4x2_t negCoef1;
+ negCoef1.val[0] = vld1q_f32(coefsN1);
+ coefsN1 += 4;
+ negCoef1.val[1] = vld1q_f32(coefsN1);
+ coefsN1 += 4;
+#endif
+ posCoef1.val[0] = vsubq_f32(posCoef1.val[0], posCoef.val[0]);
+ posCoef1.val[1] = vsubq_f32(posCoef1.val[1], posCoef.val[1]);
+ negCoef.val[0] = vsubq_f32(negCoef.val[0], negCoef1.val[0]);
+ negCoef.val[1] = vsubq_f32(negCoef.val[1], negCoef1.val[1]);
+
+ posCoef.val[0] = vmlaq_lane_f32(posCoef.val[0], posCoef1.val[0], interp, 0);
+ posCoef.val[1] = vmlaq_lane_f32(posCoef.val[1], posCoef1.val[1], interp, 0);
+ negCoef.val[0] = vmlaq_lane_f32(negCoef1.val[0], negCoef.val[0], interp, 0); // rev
+ negCoef.val[1] = vmlaq_lane_f32(negCoef1.val[1], negCoef.val[1], interp, 0); // rev
+ }
+ switch (CHANNELS) {
+ case 1: {
+#ifdef vld1q_f32_x2
+ float32x4x2_t posSamp = vld1q_f32_x2(sP);
+ float32x4x2_t negSamp = vld1q_f32_x2(sN);
+ sN += 8;
+ sP -= 8;
+#else
+ float32x4x2_t posSamp;
+ posSamp.val[0] = vld1q_f32(sP);
+ sP += 4;
+ posSamp.val[1] = vld1q_f32(sP);
+ sP -= 12;
+ float32x4x2_t negSamp;
+ negSamp.val[0] = vld1q_f32(sN);
+ sN += 4;
+ negSamp.val[1] = vld1q_f32(sN);
+ sN += 4;
+#endif
+ // effectively we want a vrev128q_f32()
+ posSamp.val[0] = vrev64q_f32(posSamp.val[0]);
+ posSamp.val[1] = vrev64q_f32(posSamp.val[1]);
+ posSamp.val[0] = vcombine_f32(
+ vget_high_f32(posSamp.val[0]), vget_low_f32(posSamp.val[0]));
+ posSamp.val[1] = vcombine_f32(
+ vget_high_f32(posSamp.val[1]), vget_low_f32(posSamp.val[1]));
+
+ accum = vmlaq_f32(accum, posSamp.val[0], posCoef.val[1]);
+ accum = vmlaq_f32(accum, posSamp.val[1], posCoef.val[0]);
+ accum = vmlaq_f32(accum, negSamp.val[0], negCoef.val[0]);
+ accum = vmlaq_f32(accum, negSamp.val[1], negCoef.val[1]);
+ } break;
+ case 2: {
+ float32x4x2_t posSamp0 = vld2q_f32(sP);
+ sP += 8;
+ float32x4x2_t negSamp0 = vld2q_f32(sN);
+ sN += 8;
+ posSamp0.val[0] = vrev64q_f32(posSamp0.val[0]);
+ posSamp0.val[1] = vrev64q_f32(posSamp0.val[1]);
+ posSamp0.val[0] = vcombine_f32(
+ vget_high_f32(posSamp0.val[0]), vget_low_f32(posSamp0.val[0]));
+ posSamp0.val[1] = vcombine_f32(
+ vget_high_f32(posSamp0.val[1]), vget_low_f32(posSamp0.val[1]));
+
+ float32x4x2_t posSamp1 = vld2q_f32(sP);
+ sP -= 24;
+ float32x4x2_t negSamp1 = vld2q_f32(sN);
+ sN += 8;
+ posSamp1.val[0] = vrev64q_f32(posSamp1.val[0]);
+ posSamp1.val[1] = vrev64q_f32(posSamp1.val[1]);
+ posSamp1.val[0] = vcombine_f32(
+ vget_high_f32(posSamp1.val[0]), vget_low_f32(posSamp1.val[0]));
+ posSamp1.val[1] = vcombine_f32(
+ vget_high_f32(posSamp1.val[1]), vget_low_f32(posSamp1.val[1]));
+
+ // Note: speed is affected by accumulation order.
+ // Also, speed appears slower using vmul/vadd instead of vmla for
+ // stereo case, comparable for mono.
+
+ accum = vmlaq_f32(accum, negSamp0.val[0], negCoef.val[0]);
+ accum = vmlaq_f32(accum, negSamp1.val[0], negCoef.val[1]);
+ accum2 = vmlaq_f32(accum2, negSamp0.val[1], negCoef.val[0]);
+ accum2 = vmlaq_f32(accum2, negSamp1.val[1], negCoef.val[1]);
+
+ accum = vmlaq_f32(accum, posSamp0.val[0], posCoef.val[1]); // reversed
+ accum = vmlaq_f32(accum, posSamp1.val[0], posCoef.val[0]); // reversed
+ accum2 = vmlaq_f32(accum2, posSamp0.val[1], posCoef.val[1]); // reversed
+ accum2 = vmlaq_f32(accum2, posSamp1.val[1], posCoef.val[0]); // reversed
+ } break;
+ }
+ } while (count -= 8);
+
+ // multiply by volume and save
+ volumeLR = (const float*)__builtin_assume_aligned(volumeLR, 8);
+ float32x2_t vLR = vld1_f32(volumeLR);
+ float32x2_t outSamp = vld1_f32(out);
+ // combine and funnel down accumulator
+ float32x2_t outAccum = vpadd_f32(vget_low_f32(accum), vget_high_f32(accum));
+ if (CHANNELS == 1) {
+ // duplicate accum to both L and R
+ outAccum = vpadd_f32(outAccum, outAccum);
+ } else if (CHANNELS == 2) {
+ // accum2 contains R, fold in
+ float32x2_t outAccum2 = vpadd_f32(vget_low_f32(accum2), vget_high_f32(accum2));
+ outAccum = vpadd_f32(outAccum, outAccum2);
+ }
+ outSamp = vmla_f32(outSamp, outAccum, vLR);
+ vst1_f32(out, outSamp);
+}
+
+template <>
+inline void ProcessL<1, 16>(int32_t* const out,
+ int count,
+ const int16_t* coefsP,
+ const int16_t* coefsN,
+ const int16_t* sP,
+ const int16_t* sN,
+ const int32_t* const volumeLR)
+{
+#ifdef USE_INTRINSIC
+ ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+#else
+ const int CHANNELS = 1; // template specialization does not preserve params
+ const int STRIDE = 16;
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ asm (
+ "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0
+
+ "1: \n"
+
+ "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples
+ "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples
+ "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
+ "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs
+
+ "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4
+
+ // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
+ "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply (reversed)samples by coef
+ "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed)samples by coef
+ "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples
+ "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples
+
+ // moving these ARM instructions before neon above seems to be slower
+ "subs %[count], %[count], #8 \n"// (1) update loop counter
+ "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples
+
+ // sP used after branch (warning)
+ "bne 1b \n"// loop
+
+ ASSEMBLY_ACCUMULATE_MONO
+
+ : [out] "=Uv" (out[0]),
+ [count] "+r" (count),
+ [coefsP0] "+r" (coefsP),
+ [coefsN0] "+r" (coefsN),
+ [sP] "+r" (sP),
+ [sN] "+r" (sN)
+ : [vLR] "r" (volumeLR)
+ : "cc", "memory",
+ "q0", "q1", "q2", "q3",
+ "q8", "q10"
+ );
+#endif
+}
+
+template <>
+inline void ProcessL<2, 16>(int32_t* const out,
+ int count,
+ const int16_t* coefsP,
+ const int16_t* coefsN,
+ const int16_t* sP,
+ const int16_t* sN,
+ const int32_t* const volumeLR)
+{
+#ifdef USE_INTRINSIC
+ ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+#else
+ const int CHANNELS = 2; // template specialization does not preserve params
+ const int STRIDE = 16;
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ asm (
+ "veor q0, q0, q0 \n"// (1) acc_L = 0
+ "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0
+
+ "1: \n"
+
+ "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo frames
+ "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo frames
+ "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
+ "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs
+
+ "vrev64.16 q2, q2 \n"// (1) reverse 8 samples of positive left
+ "vrev64.16 q3, q3 \n"// (0 combines+) reverse positive right
+
+ "vmlal.s16 q0, d4, d17 \n"// (1) multiply (reversed) samples left
+ "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed) samples left
+ "vmlal.s16 q4, d6, d17 \n"// (1) multiply (reversed) samples right
+ "vmlal.s16 q4, d7, d16 \n"// (1) multiply (reversed) samples right
+ "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left
+ "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left
+ "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right
+ "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right
+
+ // moving these ARM before neon seems to be slower
+ "subs %[count], %[count], #8 \n"// (1) update loop counter
+ "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples
+
+ // sP used after branch (warning)
+ "bne 1b \n"// loop
+
+ ASSEMBLY_ACCUMULATE_STEREO
+
+ : [out] "=Uv" (out[0]),
+ [count] "+r" (count),
+ [coefsP0] "+r" (coefsP),
+ [coefsN0] "+r" (coefsN),
+ [sP] "+r" (sP),
+ [sN] "+r" (sN)
+ : [vLR] "r" (volumeLR)
+ : "cc", "memory",
+ "q0", "q1", "q2", "q3",
+ "q4", "q5", "q6",
+ "q8", "q10"
+ );
+#endif
+}
+
+template <>
+inline void Process<1, 16>(int32_t* const out,
+ int count,
+ const int16_t* coefsP,
+ const int16_t* coefsN,
+ const int16_t* coefsP1,
+ const int16_t* coefsN1,
+ const int16_t* sP,
+ const int16_t* sN,
+ uint32_t lerpP,
+ const int32_t* const volumeLR)
+{
+#ifdef USE_INTRINSIC
+ ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ lerpP, coefsP1, coefsN1);
+#else
+
+ const int CHANNELS = 1; // template specialization does not preserve params
+ const int STRIDE = 16;
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ asm (
+ "vmov.32 d2[0], %[lerpP] \n"// load the positive phase S32 Q15
+ "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0
+
+ "1: \n"
+
+ "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples
+ "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples
+ "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
+ "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation
+ "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs
+ "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation
+
+ "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs
+ "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets
+
+ "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs
+ "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs
+
+ "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4
+
+ "vadd.s16 q8, q8, q9 \n"// (1+2d) interpolate (step3) 1st set
+ "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set
+
+ // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
+ "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply reversed samples by coef
+ "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples by coef
+ "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples
+ "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples
+
+ // moving these ARM instructions before neon above seems to be slower
+ "subs %[count], %[count], #8 \n"// (1) update loop counter
+ "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples
+
+ // sP used after branch (warning)
+ "bne 1b \n"// loop
+
+ ASSEMBLY_ACCUMULATE_MONO
+
+ : [out] "=Uv" (out[0]),
+ [count] "+r" (count),
+ [coefsP0] "+r" (coefsP),
+ [coefsN0] "+r" (coefsN),
+ [coefsP1] "+r" (coefsP1),
+ [coefsN1] "+r" (coefsN1),
+ [sP] "+r" (sP),
+ [sN] "+r" (sN)
+ : [lerpP] "r" (lerpP),
+ [vLR] "r" (volumeLR)
+ : "cc", "memory",
+ "q0", "q1", "q2", "q3",
+ "q8", "q9", "q10", "q11"
+ );
+#endif
+}
+
+template <>
+inline void Process<2, 16>(int32_t* const out,
+ int count,
+ const int16_t* coefsP,
+ const int16_t* coefsN,
+ const int16_t* coefsP1,
+ const int16_t* coefsN1,
+ const int16_t* sP,
+ const int16_t* sN,
+ uint32_t lerpP,
+ const int32_t* const volumeLR)
+{
+#ifdef USE_INTRINSIC
+ ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ lerpP, coefsP1, coefsN1);
+#else
+ const int CHANNELS = 2; // template specialization does not preserve params
+ const int STRIDE = 16;
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ asm (
+ "vmov.32 d2[0], %[lerpP] \n"// load the positive phase
+ "veor q0, q0, q0 \n"// (1) acc_L = 0
+ "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0
+
+ "1: \n"
+
+ "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo frames
+ "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo frames
+ "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
+ "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation
+ "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs
+ "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation
+
+ "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs
+ "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets
+
+ "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs
+ "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs
+
+ "vrev64.16 q2, q2 \n"// (1) reverse 8 samples of positive left
+ "vrev64.16 q3, q3 \n"// (1) reverse 8 samples of positive right
+
+ "vadd.s16 q8, q8, q9 \n"// (1+1d) interpolate (step3) 1st set
+ "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set
+
+ "vmlal.s16 q0, d4, d17 \n"// (1) multiply reversed samples left
+ "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples left
+ "vmlal.s16 q4, d6, d17 \n"// (1) multiply reversed samples right
+ "vmlal.s16 q4, d7, d16 \n"// (1) multiply reversed samples right
+ "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left
+ "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left
+ "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right
+ "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right
+
+ // moving these ARM before neon seems to be slower
+ "subs %[count], %[count], #8 \n"// (1) update loop counter
+ "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples
+
+ // sP used after branch (warning)
+ "bne 1b \n"// loop
+
+ ASSEMBLY_ACCUMULATE_STEREO
+
+ : [out] "=Uv" (out[0]),
+ [count] "+r" (count),
+ [coefsP0] "+r" (coefsP),
+ [coefsN0] "+r" (coefsN),
+ [coefsP1] "+r" (coefsP1),
+ [coefsN1] "+r" (coefsN1),
+ [sP] "+r" (sP),
+ [sN] "+r" (sN)
+ : [lerpP] "r" (lerpP),
+ [vLR] "r" (volumeLR)
+ : "cc", "memory",
+ "q0", "q1", "q2", "q3",
+ "q4", "q5", "q6",
+ "q8", "q9", "q10", "q11"
+ );
+#endif
+}
+
+template <>
+inline void ProcessL<1, 16>(int32_t* const out,
+ int count,
+ const int32_t* coefsP,
+ const int32_t* coefsN,
+ const int16_t* sP,
+ const int16_t* sN,
+ const int32_t* const volumeLR)
+{
+#ifdef USE_INTRINSIC
+ ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+#else
+ const int CHANNELS = 1; // template specialization does not preserve params
+ const int STRIDE = 16;
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ asm (
+ "veor q0, q0, q0 \n"// result, initialize to 0
+
+ "1: \n"
+
+ "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples
+ "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples
+ "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
+
+ "vrev64.16 q2, q2 \n"// reverse 8 samples of the positive side
+
+ "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
+
+ "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits
+
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples
+
+ "vadd.s32 q0, q0, q12 \n"// accumulate result
+ "vadd.s32 q13, q13, q14 \n"// accumulate result
+ "vadd.s32 q0, q0, q15 \n"// accumulate result
+ "vadd.s32 q0, q0, q13 \n"// accumulate result
+
+ "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples
+ "subs %[count], %[count], #8 \n"// update loop counter
+
+ "bne 1b \n"// loop
+
+ ASSEMBLY_ACCUMULATE_MONO
+
+ : [out] "=Uv" (out[0]),
+ [count] "+r" (count),
+ [coefsP0] "+r" (coefsP),
+ [coefsN0] "+r" (coefsN),
+ [sP] "+r" (sP),
+ [sN] "+r" (sN)
+ : [vLR] "r" (volumeLR)
+ : "cc", "memory",
+ "q0", "q1", "q2", "q3",
+ "q8", "q9", "q10", "q11",
+ "q12", "q13", "q14", "q15"
+ );
+#endif
+}
+
+template <>
+inline void ProcessL<2, 16>(int32_t* const out,
+ int count,
+ const int32_t* coefsP,
+ const int32_t* coefsN,
+ const int16_t* sP,
+ const int16_t* sN,
+ const int32_t* const volumeLR)
+{
+#ifdef USE_INTRINSIC
+ ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+#else
+ const int CHANNELS = 2; // template specialization does not preserve params
+ const int STRIDE = 16;
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ asm (
+ "veor q0, q0, q0 \n"// result, initialize to 0
+ "veor q4, q4, q4 \n"// result, initialize to 0
+
+ "1: \n"
+
+ "vld2.16 {q2, q3}, [%[sP]] \n"// load 8 16-bits stereo frames
+ "vld2.16 {q5, q6}, [%[sN]]! \n"// load 8 16-bits stereo frames
+ "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
+
+ "vrev64.16 q2, q2 \n"// reverse 8 samples of positive left
+ "vrev64.16 q3, q3 \n"// reverse 8 samples of positive right
+
+ "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
+
+ "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits
+
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by coef
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by coef
+
+ "vadd.s32 q0, q0, q12 \n"// accumulate result
+ "vadd.s32 q13, q13, q14 \n"// accumulate result
+ "vadd.s32 q0, q0, q15 \n"// accumulate result
+ "vadd.s32 q0, q0, q13 \n"// accumulate result
+
+ "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits
+
+ "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits
+
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by coef
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by coef
+
+ "vadd.s32 q4, q4, q12 \n"// accumulate result
+ "vadd.s32 q13, q13, q14 \n"// accumulate result
+ "vadd.s32 q4, q4, q15 \n"// accumulate result
+ "vadd.s32 q4, q4, q13 \n"// accumulate result
+
+ "subs %[count], %[count], #8 \n"// update loop counter
+ "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples
+
+ "bne 1b \n"// loop
+
+ ASSEMBLY_ACCUMULATE_STEREO
+
+ : [out] "=Uv" (out[0]),
+ [count] "+r" (count),
+ [coefsP0] "+r" (coefsP),
+ [coefsN0] "+r" (coefsN),
+ [sP] "+r" (sP),
+ [sN] "+r" (sN)
+ : [vLR] "r" (volumeLR)
+ : "cc", "memory",
+ "q0", "q1", "q2", "q3",
+ "q4", "q5", "q6",
+ "q8", "q9", "q10", "q11",
+ "q12", "q13", "q14", "q15"
+ );
+#endif
+}
+
+template <>
+inline void Process<1, 16>(int32_t* const out,
+ int count,
+ const int32_t* coefsP,
+ const int32_t* coefsN,
+ const int32_t* coefsP1,
+ const int32_t* coefsN1,
+ const int16_t* sP,
+ const int16_t* sN,
+ uint32_t lerpP,
+ const int32_t* const volumeLR)
+{
+#ifdef USE_INTRINSIC
+ ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ lerpP, coefsP1, coefsN1);
+#else
+ const int CHANNELS = 1; // template specialization does not preserve params
+ const int STRIDE = 16;
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ asm (
+ "vmov.32 d2[0], %[lerpP] \n"// load the positive phase
+ "veor q0, q0, q0 \n"// result, initialize to 0
+
+ "1: \n"
+
+ "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples
+ "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples
+ "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
+
+ "vsub.s32 q12, q12, q8 \n"// interpolate (step1)
+ "vsub.s32 q13, q13, q9 \n"// interpolate (step1)
+ "vsub.s32 q14, q14, q10 \n"// interpolate (step1)
+ "vsub.s32 q15, q15, q11 \n"// interpolate (step1)
+
+ "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2)
+ "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2)
+ "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2)
+ "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2)
+
+ "vadd.s32 q8, q8, q12 \n"// interpolate (step3)
+ "vadd.s32 q9, q9, q13 \n"// interpolate (step3)
+ "vadd.s32 q10, q10, q14 \n"// interpolate (step3)
+ "vadd.s32 q11, q11, q15 \n"// interpolate (step3)
+
+ "vrev64.16 q2, q2 \n"// reverse 8 samples of the positive side
+
+ "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
+
+ "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits
+
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
+
+ "vadd.s32 q0, q0, q12 \n"// accumulate result
+ "vadd.s32 q13, q13, q14 \n"// accumulate result
+ "vadd.s32 q0, q0, q15 \n"// accumulate result
+ "vadd.s32 q0, q0, q13 \n"// accumulate result
+
+ "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples
+ "subs %[count], %[count], #8 \n"// update loop counter
+
+ "bne 1b \n"// loop
+
+ ASSEMBLY_ACCUMULATE_MONO
+
+ : [out] "=Uv" (out[0]),
+ [count] "+r" (count),
+ [coefsP0] "+r" (coefsP),
+ [coefsN0] "+r" (coefsN),
+ [coefsP1] "+r" (coefsP1),
+ [coefsN1] "+r" (coefsN1),
+ [sP] "+r" (sP),
+ [sN] "+r" (sN)
+ : [lerpP] "r" (lerpP),
+ [vLR] "r" (volumeLR)
+ : "cc", "memory",
+ "q0", "q1", "q2", "q3",
+ "q8", "q9", "q10", "q11",
+ "q12", "q13", "q14", "q15"
+ );
+#endif
+}
+
+template <>
+inline void Process<2, 16>(int32_t* const out,
+ int count,
+ const int32_t* coefsP,
+ const int32_t* coefsN,
+ const int32_t* coefsP1,
+ const int32_t* coefsN1,
+ const int16_t* sP,
+ const int16_t* sN,
+ uint32_t lerpP,
+ const int32_t* const volumeLR)
+{
+#ifdef USE_INTRINSIC
+ ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ lerpP, coefsP1, coefsN1);
+#else
+ const int CHANNELS = 2; // template specialization does not preserve params
+ const int STRIDE = 16;
+ sP -= CHANNELS*((STRIDE>>1)-1);
+ asm (
+ "vmov.32 d2[0], %[lerpP] \n"// load the positive phase
+ "veor q0, q0, q0 \n"// result, initialize to 0
+ "veor q4, q4, q4 \n"// result, initialize to 0
+
+ "1: \n"
+
+ "vld2.16 {q2, q3}, [%[sP]] \n"// load 8 16-bits stereo frames
+ "vld2.16 {q5, q6}, [%[sN]]! \n"// load 8 16-bits stereo frames
+ "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs
+ "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
+
+ "vsub.s32 q12, q12, q8 \n"// interpolate (step1)
+ "vsub.s32 q13, q13, q9 \n"// interpolate (step1)
+ "vsub.s32 q14, q14, q10 \n"// interpolate (step1)
+ "vsub.s32 q15, q15, q11 \n"// interpolate (step1)
+
+ "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2)
+ "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2)
+ "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2)
+ "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2)
+
+ "vadd.s32 q8, q8, q12 \n"// interpolate (step3)
+ "vadd.s32 q9, q9, q13 \n"// interpolate (step3)
+ "vadd.s32 q10, q10, q14 \n"// interpolate (step3)
+ "vadd.s32 q11, q11, q15 \n"// interpolate (step3)
+
+ "vrev64.16 q2, q2 \n"// reverse 8 samples of positive left
+ "vrev64.16 q3, q3 \n"// reverse 8 samples of positive right
+
+ "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
+
+ "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits
+
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
+
+ "vadd.s32 q0, q0, q12 \n"// accumulate result
+ "vadd.s32 q13, q13, q14 \n"// accumulate result
+ "vadd.s32 q0, q0, q15 \n"// accumulate result
+ "vadd.s32 q0, q0, q13 \n"// accumulate result
+
+ "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits
+
+ "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits
+ "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits
+
+ "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
+ "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
+
+ "vadd.s32 q4, q4, q12 \n"// accumulate result
+ "vadd.s32 q13, q13, q14 \n"// accumulate result
+ "vadd.s32 q4, q4, q15 \n"// accumulate result
+ "vadd.s32 q4, q4, q13 \n"// accumulate result
+
+ "subs %[count], %[count], #8 \n"// update loop counter
+ "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples
+
+ "bne 1b \n"// loop
+
+ ASSEMBLY_ACCUMULATE_STEREO
+
+ : [out] "=Uv" (out[0]),
+ [count] "+r" (count),
+ [coefsP0] "+r" (coefsP),
+ [coefsN0] "+r" (coefsN),
+ [coefsP1] "+r" (coefsP1),
+ [coefsN1] "+r" (coefsN1),
+ [sP] "+r" (sP),
+ [sN] "+r" (sN)
+ : [lerpP] "r" (lerpP),
+ [vLR] "r" (volumeLR)
+ : "cc", "memory",
+ "q0", "q1", "q2", "q3",
+ "q4", "q5", "q6",
+ "q8", "q9", "q10", "q11",
+ "q12", "q13", "q14", "q15"
+ );
+#endif
+}
+
+template<>
+inline void ProcessL<1, 16>(float* const out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* sP,
+ const float* sN,
+ const float* const volumeLR)
+{
+ ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+}
+
+template<>
+inline void ProcessL<2, 16>(float* const out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* sP,
+ const float* sN,
+ const float* const volumeLR)
+{
+ ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+}
+
+template<>
+inline void Process<1, 16>(float* const out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* coefsP1,
+ const float* coefsN1,
+ const float* sP,
+ const float* sN,
+ float lerpP,
+ const float* const volumeLR)
+{
+ ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ lerpP, coefsP1, coefsN1);
+}
+
+template<>
+inline void Process<2, 16>(float* const out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* coefsP1,
+ const float* coefsN1,
+ const float* sP,
+ const float* sN,
+ float lerpP,
+ const float* const volumeLR)
+{
+ ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ lerpP, coefsP1, coefsN1);
+}
+
+#endif //USE_NEON
+
+} // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H*/
diff --git a/media/libaudioprocessing/AudioResamplerFirProcessSSE.h b/media/libaudioprocessing/AudioResamplerFirProcessSSE.h
new file mode 100644
index 0000000..30233b5
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerFirProcessSSE.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_SSE_H
+#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_SSE_H
+
+namespace android {
+
+// depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h
+
+#if USE_SSE
+
+#define TO_STRING2(x) #x
+#define TO_STRING(x) TO_STRING2(x)
+// uncomment to print GCC version, may be relevant for intrinsic optimizations
+/* #pragma message ("GCC version: " TO_STRING(__GNUC__) \
+ "." TO_STRING(__GNUC_MINOR__) \
+ "." TO_STRING(__GNUC_PATCHLEVEL__)) */
+
+//
+// SSEx specializations are enabled for Process() and ProcessL() in AudioResamplerFirProcess.h
+//
+
+template <int CHANNELS, int STRIDE, bool FIXED>
+static inline void ProcessSSEIntrinsic(float* out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* sP,
+ const float* sN,
+ const float* volumeLR,
+ float lerpP,
+ const float* coefsP1,
+ const float* coefsN1)
+{
+ ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
+ static_assert(CHANNELS == 1 || CHANNELS == 2, "CHANNELS must be 1 or 2");
+
+ sP -= CHANNELS*(4-1); // adjust sP for a loop iteration of four
+
+ __m128 interp;
+ if (!FIXED) {
+ interp = _mm_set1_ps(lerpP);
+ }
+
+ __m128 accL, accR;
+ accL = _mm_setzero_ps();
+ if (CHANNELS == 2) {
+ accR = _mm_setzero_ps();
+ }
+
+ do {
+ __m128 posCoef = _mm_load_ps(coefsP);
+ __m128 negCoef = _mm_load_ps(coefsN);
+ coefsP += 4;
+ coefsN += 4;
+
+ if (!FIXED) { // interpolate
+ __m128 posCoef1 = _mm_load_ps(coefsP1);
+ __m128 negCoef1 = _mm_load_ps(coefsN1);
+ coefsP1 += 4;
+ coefsN1 += 4;
+
+ // Calculate the final coefficient for interpolation
+ // posCoef = interp * (posCoef1 - posCoef) + posCoef
+ // negCoef = interp * (negCoef - negCoef1) + negCoef1
+ posCoef1 = _mm_sub_ps(posCoef1, posCoef);
+ negCoef = _mm_sub_ps(negCoef, negCoef1);
+
+ posCoef1 = _mm_mul_ps(posCoef1, interp);
+ negCoef = _mm_mul_ps(negCoef, interp);
+
+ posCoef = _mm_add_ps(posCoef1, posCoef);
+ negCoef = _mm_add_ps(negCoef, negCoef1);
+ }
+ switch (CHANNELS) {
+ case 1: {
+ __m128 posSamp = _mm_loadu_ps(sP);
+ __m128 negSamp = _mm_loadu_ps(sN);
+ sP -= 4;
+ sN += 4;
+
+ posSamp = _mm_shuffle_ps(posSamp, posSamp, 0x1B);
+ posSamp = _mm_mul_ps(posSamp, posCoef);
+ negSamp = _mm_mul_ps(negSamp, negCoef);
+
+ accL = _mm_add_ps(accL, posSamp);
+ accL = _mm_add_ps(accL, negSamp);
+ } break;
+ case 2: {
+ __m128 posSamp0 = _mm_loadu_ps(sP);
+ __m128 posSamp1 = _mm_loadu_ps(sP+4);
+ __m128 negSamp0 = _mm_loadu_ps(sN);
+ __m128 negSamp1 = _mm_loadu_ps(sN+4);
+ sP -= 8;
+ sN += 8;
+
+ // deinterleave everything and reverse the positives
+ __m128 posSampL = _mm_shuffle_ps(posSamp1, posSamp0, 0x22);
+ __m128 posSampR = _mm_shuffle_ps(posSamp1, posSamp0, 0x77);
+ __m128 negSampL = _mm_shuffle_ps(negSamp0, negSamp1, 0x88);
+ __m128 negSampR = _mm_shuffle_ps(negSamp0, negSamp1, 0xDD);
+
+ posSampL = _mm_mul_ps(posSampL, posCoef);
+ posSampR = _mm_mul_ps(posSampR, posCoef);
+ negSampL = _mm_mul_ps(negSampL, negCoef);
+ negSampR = _mm_mul_ps(negSampR, negCoef);
+
+ accL = _mm_add_ps(accL, posSampL);
+ accR = _mm_add_ps(accR, posSampR);
+ accL = _mm_add_ps(accL, negSampL);
+ accR = _mm_add_ps(accR, negSampR);
+ } break;
+ }
+ } while (count -= 4);
+
+ // multiply by volume and save
+ __m128 vLR = _mm_setzero_ps();
+ __m128 outSamp;
+ vLR = _mm_loadl_pi(vLR, reinterpret_cast<const __m64*>(volumeLR));
+ outSamp = _mm_loadl_pi(vLR, reinterpret_cast<__m64*>(out));
+
+ // combine and funnel down accumulator
+ __m128 outAccum = _mm_setzero_ps();
+ if (CHANNELS == 1) {
+ // duplicate accL to both L and R
+ outAccum = _mm_add_ps(accL, _mm_movehl_ps(accL, accL));
+ outAccum = _mm_add_ps(outAccum, _mm_shuffle_ps(outAccum, outAccum, 0x11));
+ } else if (CHANNELS == 2) {
+ // accR contains R, fold in
+ outAccum = _mm_hadd_ps(accL, accR);
+ outAccum = _mm_hadd_ps(outAccum, outAccum);
+ }
+
+ outAccum = _mm_mul_ps(outAccum, vLR);
+ outSamp = _mm_add_ps(outSamp, outAccum);
+ _mm_storel_pi(reinterpret_cast<__m64*>(out), outSamp);
+}
+
+template<>
+inline void ProcessL<1, 16>(float* const out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* sP,
+ const float* sN,
+ const float* const volumeLR)
+{
+ ProcessSSEIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+}
+
+template<>
+inline void ProcessL<2, 16>(float* const out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* sP,
+ const float* sN,
+ const float* const volumeLR)
+{
+ ProcessSSEIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+}
+
+template<>
+inline void Process<1, 16>(float* const out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* coefsP1,
+ const float* coefsN1,
+ const float* sP,
+ const float* sN,
+ float lerpP,
+ const float* const volumeLR)
+{
+ ProcessSSEIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ lerpP, coefsP1, coefsN1);
+}
+
+template<>
+inline void Process<2, 16>(float* const out,
+ int count,
+ const float* coefsP,
+ const float* coefsN,
+ const float* coefsP1,
+ const float* coefsN1,
+ const float* sP,
+ const float* sN,
+ float lerpP,
+ const float* const volumeLR)
+{
+ ProcessSSEIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+ lerpP, coefsP1, coefsN1);
+}
+
+#endif //USE_SSE
+
+} // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_SSE_H*/
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/media/libaudioprocessing/AudioResamplerSinc.cpp
similarity index 100%
rename from services/audioflinger/AudioResamplerSinc.cpp
rename to media/libaudioprocessing/AudioResamplerSinc.cpp
diff --git a/media/libaudioprocessing/AudioResamplerSinc.h b/media/libaudioprocessing/AudioResamplerSinc.h
new file mode 100644
index 0000000..f6dcf91
--- /dev/null
+++ b/media/libaudioprocessing/AudioResamplerSinc.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_RESAMPLER_SINC_H
+#define ANDROID_AUDIO_RESAMPLER_SINC_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <android/log.h>
+
+#include <media/AudioResampler.h>
+
+namespace android {
+
+
+typedef const int32_t * (*readCoefficientsFn)(bool upDownSample);
+typedef int32_t (*readResampleFirNumCoeffFn)();
+typedef int32_t (*readResampleFirLerpIntBitsFn)();
+
+// ----------------------------------------------------------------------------
+
+class AudioResamplerSinc : public AudioResampler {
+public:
+ AudioResamplerSinc(int inChannelCount, int32_t sampleRate,
+ src_quality quality = HIGH_QUALITY);
+
+ virtual ~AudioResamplerSinc();
+
+ virtual size_t resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+private:
+ void init();
+
+ virtual void setVolume(float left, float right);
+
+ template<int CHANNELS>
+ size_t resample(int32_t* out, size_t outFrameCount,
+ AudioBufferProvider* provider);
+
+ template<int CHANNELS>
+ inline void filterCoefficient(
+ int32_t* out, uint32_t phase, const int16_t *samples, uint32_t vRL);
+
+ template<int CHANNELS>
+ inline void interpolate(
+ int32_t& l, int32_t& r,
+ const int32_t* coefs, size_t offset,
+ int32_t lerp, const int16_t* samples);
+
+ template<int CHANNELS>
+ inline void read(int16_t*& impulse, uint32_t& phaseFraction,
+ const int16_t* in, size_t inputIndex);
+
+ int16_t *mState;
+ int16_t *mImpulse;
+ int16_t *mRingFull;
+ int32_t mVolumeSIMD[2];
+
+ const int32_t * mFirCoefs;
+ static const uint32_t mFirCoefsDown[];
+ static const uint32_t mFirCoefsUp[];
+
+ // ----------------------------------------------------------------------------
+ static const int32_t RESAMPLE_FIR_NUM_COEF = 8;
+ static const int32_t RESAMPLE_FIR_LERP_INT_BITS = 7;
+
+ struct Constants {
+ int coefsBits;
+ int cShift;
+ uint32_t cMask;
+ int pShift;
+ uint32_t pMask;
+ // number of zero-crossing on each side
+ unsigned int halfNumCoefs;
+ };
+
+ static Constants highQualityConstants;
+ static Constants veryHighQualityConstants;
+ const Constants *mConstants; // points to appropriate set of coefficient parameters
+
+ static void init_routine();
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif /*ANDROID_AUDIO_RESAMPLER_SINC_H*/
diff --git a/services/audioflinger/AudioResamplerSincDown.h b/media/libaudioprocessing/AudioResamplerSincDown.h
similarity index 100%
rename from services/audioflinger/AudioResamplerSincDown.h
rename to media/libaudioprocessing/AudioResamplerSincDown.h
diff --git a/services/audioflinger/AudioResamplerSincUp.h b/media/libaudioprocessing/AudioResamplerSincUp.h
similarity index 100%
rename from services/audioflinger/AudioResamplerSincUp.h
rename to media/libaudioprocessing/AudioResamplerSincUp.h
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
new file mode 100644
index 0000000..862fef6
--- /dev/null
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BufferProvider"
+//#define LOG_NDEBUG 0
+
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <external/sonic/sonic.h>
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/BufferProviders.h>
+#include <system/audio_effects/effect_downmix.h>
+#include <utils/Log.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+template <typename T>
+static inline T min(const T& a, const T& b)
+{
+ return a < b ? a : b;
+}
+
+CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
+ size_t outputFrameSize, size_t bufferFrameCount) :
+ mInputFrameSize(inputFrameSize),
+ mOutputFrameSize(outputFrameSize),
+ mLocalBufferFrameCount(bufferFrameCount),
+ mLocalBufferData(NULL),
+ mConsumed(0)
+{
+ ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
+ inputFrameSize, outputFrameSize, bufferFrameCount);
+ LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
+ "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
+ inputFrameSize, outputFrameSize);
+ if (mLocalBufferFrameCount) {
+ (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
+ }
+ mBuffer.frameCount = 0;
+}
+
+CopyBufferProvider::~CopyBufferProvider()
+{
+ ALOGV("~CopyBufferProvider(%p)", this);
+ if (mBuffer.frameCount != 0) {
+ mTrackBufferProvider->releaseBuffer(&mBuffer);
+ }
+ free(mLocalBufferData);
+}
+
+status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+ //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu))",
+ // this, pBuffer, pBuffer->frameCount);
+ if (mLocalBufferFrameCount == 0) {
+ status_t res = mTrackBufferProvider->getNextBuffer(pBuffer);
+ if (res == OK) {
+ copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
+ }
+ return res;
+ }
+ if (mBuffer.frameCount == 0) {
+ mBuffer.frameCount = pBuffer->frameCount;
+ status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer);
+ // At one time an upstream buffer provider had
+ // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
+ //
+ // By API spec, if res != OK, then mBuffer.frameCount == 0.
+ // but there may be improper implementations.
+ ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+ if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
+ pBuffer->raw = NULL;
+ pBuffer->frameCount = 0;
+ return res;
+ }
+ mConsumed = 0;
+ }
+ ALOG_ASSERT(mConsumed < mBuffer.frameCount);
+ size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
+ count = min(count, pBuffer->frameCount);
+ pBuffer->raw = mLocalBufferData;
+ pBuffer->frameCount = count;
+ copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
+ pBuffer->frameCount);
+ return OK;
+}
+
+void CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+ //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
+ // this, pBuffer, pBuffer->frameCount);
+ if (mLocalBufferFrameCount == 0) {
+ mTrackBufferProvider->releaseBuffer(pBuffer);
+ return;
+ }
+ // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
+ mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
+ if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
+ mTrackBufferProvider->releaseBuffer(&mBuffer);
+ ALOG_ASSERT(mBuffer.frameCount == 0);
+ }
+ pBuffer->raw = NULL;
+ pBuffer->frameCount = 0;
+}
+
+void CopyBufferProvider::reset()
+{
+ if (mBuffer.frameCount != 0) {
+ mTrackBufferProvider->releaseBuffer(&mBuffer);
+ }
+ mConsumed = 0;
+}
+
+DownmixerBufferProvider::DownmixerBufferProvider(
+ audio_channel_mask_t inputChannelMask,
+ audio_channel_mask_t outputChannelMask, audio_format_t format,
+ uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
+ CopyBufferProvider(
+ audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
+ audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
+ bufferFrameCount) // set bufferFrameCount to 0 to do in-place
+{
+ ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d %d)",
+ this, inputChannelMask, outputChannelMask, format,
+ sampleRate, sessionId, (int)bufferFrameCount);
+ if (!sIsMultichannelCapable) {
+ ALOGE("DownmixerBufferProvider() error: not multichannel capable");
+ return;
+ }
+ mEffectsFactory = EffectsFactoryHalInterface::create();
+ if (mEffectsFactory == 0) {
+ ALOGE("DownmixerBufferProvider() error: could not obtain the effects factory");
+ return;
+ }
+ if (mEffectsFactory->createEffect(&sDwnmFxDesc.uuid,
+ sessionId,
+ SESSION_ID_INVALID_AND_IGNORED,
+ &mDownmixInterface) != 0) {
+ ALOGE("DownmixerBufferProvider() error creating downmixer effect");
+ mDownmixInterface.clear();
+ mEffectsFactory.clear();
+ return;
+ }
+ // channel input configuration will be overridden per-track
+ mDownmixConfig.inputCfg.channels = inputChannelMask; // FIXME: Should be bits
+ mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
+ mDownmixConfig.inputCfg.format = format;
+ mDownmixConfig.outputCfg.format = format;
+ mDownmixConfig.inputCfg.samplingRate = sampleRate;
+ mDownmixConfig.outputCfg.samplingRate = sampleRate;
+ mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+ mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+ // input and output buffer provider, and frame count will not be used as the downmix effect
+ // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
+ mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
+ EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
+ mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
+
+ mInFrameSize =
+ audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask);
+ mOutFrameSize =
+ audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask);
+ status_t status;
+ status = EffectBufferHalInterface::mirror(
+ nullptr, mInFrameSize * bufferFrameCount, &mInBuffer);
+ if (status != 0) {
+ ALOGE("DownmixerBufferProvider() error %d while creating input buffer", status);
+ mDownmixInterface.clear();
+ mEffectsFactory.clear();
+ return;
+ }
+ status = EffectBufferHalInterface::mirror(
+ nullptr, mOutFrameSize * bufferFrameCount, &mOutBuffer);
+ if (status != 0) {
+ ALOGE("DownmixerBufferProvider() error %d while creating output buffer", status);
+ mInBuffer.clear();
+ mDownmixInterface.clear();
+ mEffectsFactory.clear();
+ return;
+ }
+ mDownmixInterface->setInBuffer(mInBuffer);
+ mDownmixInterface->setOutBuffer(mOutBuffer);
+
+ int cmdStatus;
+ uint32_t replySize = sizeof(int);
+
+ // Configure downmixer
+ status = mDownmixInterface->command(
+ EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
+ &mDownmixConfig /*pCmdData*/,
+ &replySize, &cmdStatus /*pReplyData*/);
+ if (status != 0 || cmdStatus != 0) {
+ ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
+ status, cmdStatus);
+ mOutBuffer.clear();
+ mInBuffer.clear();
+ mDownmixInterface.clear();
+ mEffectsFactory.clear();
+ return;
+ }
+
+ // Enable downmixer
+ replySize = sizeof(int);
+ status = mDownmixInterface->command(
+ EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
+ &replySize, &cmdStatus /*pReplyData*/);
+ if (status != 0 || cmdStatus != 0) {
+ ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
+ status, cmdStatus);
+ mOutBuffer.clear();
+ mInBuffer.clear();
+ mDownmixInterface.clear();
+ mEffectsFactory.clear();
+ return;
+ }
+
+ // Set downmix type
+ // parameter size rounded for padding on 32bit boundary
+ const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
+ const int downmixParamSize =
+ sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
+ effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
+ param->psize = sizeof(downmix_params_t);
+ const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
+ memcpy(param->data, &downmixParam, param->psize);
+ const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
+ param->vsize = sizeof(downmix_type_t);
+ memcpy(param->data + psizePadded, &downmixType, param->vsize);
+ replySize = sizeof(int);
+ status = mDownmixInterface->command(
+ EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
+ param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
+ free(param);
+ if (status != 0 || cmdStatus != 0) {
+ ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
+ status, cmdStatus);
+ mOutBuffer.clear();
+ mInBuffer.clear();
+ mDownmixInterface.clear();
+ mEffectsFactory.clear();
+ return;
+ }
+ ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
+}
+
+DownmixerBufferProvider::~DownmixerBufferProvider()
+{
+ ALOGV("~DownmixerBufferProvider (%p)", this);
+ if (mDownmixInterface != 0) {
+ mDownmixInterface->close();
+ }
+}
+
+void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+ mInBuffer->setExternalData(const_cast<void*>(src));
+ mInBuffer->setFrameCount(frames);
+ mInBuffer->update(mInFrameSize * frames);
+ mOutBuffer->setFrameCount(frames);
+ mOutBuffer->setExternalData(dst);
+ if (dst != src) {
+ // Downmix may be accumulating, need to populate the output buffer
+ // with the dst data.
+ mOutBuffer->update(mOutFrameSize * frames);
+ }
+ // may be in-place if src == dst.
+ status_t res = mDownmixInterface->process();
+ if (res == OK) {
+ mOutBuffer->commit(mOutFrameSize * frames);
+ } else {
+ ALOGE("DownmixBufferProvider error %d", res);
+ }
+}
+
+/* call once in a pthread_once handler. */
+/*static*/ status_t DownmixerBufferProvider::init()
+{
+ // find multichannel downmix effect if we have to play multichannel content
+ sp<EffectsFactoryHalInterface> effectsFactory = EffectsFactoryHalInterface::create();
+ if (effectsFactory == 0) {
+ ALOGE("AudioMixer() error: could not obtain the effects factory");
+ return NO_INIT;
+ }
+ uint32_t numEffects = 0;
+ int ret = effectsFactory->queryNumberEffects(&numEffects);
+ if (ret != 0) {
+ ALOGE("AudioMixer() error %d querying number of effects", ret);
+ return NO_INIT;
+ }
+ ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+ for (uint32_t i = 0 ; i < numEffects ; i++) {
+ if (effectsFactory->getDescriptor(i, &sDwnmFxDesc) == 0) {
+ ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+ if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+ ALOGI("found effect \"%s\" from %s",
+ sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+ sIsMultichannelCapable = true;
+ break;
+ }
+ }
+ }
+ ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+ return NO_INIT;
+}
+
+/*static*/ bool DownmixerBufferProvider::sIsMultichannelCapable = false;
+/*static*/ effect_descriptor_t DownmixerBufferProvider::sDwnmFxDesc;
+
+RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+ audio_channel_mask_t outputChannelMask, audio_format_t format,
+ size_t bufferFrameCount) :
+ CopyBufferProvider(
+ audio_bytes_per_sample(format)
+ * audio_channel_count_from_out_mask(inputChannelMask),
+ audio_bytes_per_sample(format)
+ * audio_channel_count_from_out_mask(outputChannelMask),
+ bufferFrameCount),
+ mFormat(format),
+ mSampleSize(audio_bytes_per_sample(format)),
+ mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
+ mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
+{
+ ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
+ this, format, inputChannelMask, outputChannelMask,
+ mInputChannels, mOutputChannels);
+ (void) memcpy_by_index_array_initialization_from_channel_mask(
+ mIdxAry, ARRAY_SIZE(mIdxAry), outputChannelMask, inputChannelMask);
+}
+
+void RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+ memcpy_by_index_array(dst, mOutputChannels,
+ src, mInputChannels, mIdxAry, mSampleSize, frames);
+}
+
+ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
+ audio_format_t inputFormat, audio_format_t outputFormat,
+ size_t bufferFrameCount) :
+ CopyBufferProvider(
+ channelCount * audio_bytes_per_sample(inputFormat),
+ channelCount * audio_bytes_per_sample(outputFormat),
+ bufferFrameCount),
+ mChannelCount(channelCount),
+ mInputFormat(inputFormat),
+ mOutputFormat(outputFormat)
+{
+ ALOGV("ReformatBufferProvider(%p)(%u, %#x, %#x)",
+ this, channelCount, inputFormat, outputFormat);
+}
+
+void ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+ memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount);
+}
+
+TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount,
+ audio_format_t format, uint32_t sampleRate, const AudioPlaybackRate &playbackRate) :
+ mChannelCount(channelCount),
+ mFormat(format),
+ mSampleRate(sampleRate),
+ mFrameSize(channelCount * audio_bytes_per_sample(format)),
+ mLocalBufferFrameCount(0),
+ mLocalBufferData(NULL),
+ mRemaining(0),
+ mSonicStream(sonicCreateStream(sampleRate, mChannelCount)),
+ mFallbackFailErrorShown(false),
+ mAudioPlaybackRateValid(false)
+{
+ LOG_ALWAYS_FATAL_IF(mSonicStream == NULL,
+ "TimestretchBufferProvider can't allocate Sonic stream");
+
+ setPlaybackRate(playbackRate);
+ ALOGV("TimestretchBufferProvider(%p)(%u, %#x, %u %f %f %d %d)",
+ this, channelCount, format, sampleRate, playbackRate.mSpeed,
+ playbackRate.mPitch, playbackRate.mStretchMode, playbackRate.mFallbackMode);
+ mBuffer.frameCount = 0;
+}
+
+TimestretchBufferProvider::~TimestretchBufferProvider()
+{
+ ALOGV("~TimestretchBufferProvider(%p)", this);
+ sonicDestroyStream(mSonicStream);
+ if (mBuffer.frameCount != 0) {
+ mTrackBufferProvider->releaseBuffer(&mBuffer);
+ }
+ free(mLocalBufferData);
+}
+
+status_t TimestretchBufferProvider::getNextBuffer(
+ AudioBufferProvider::Buffer *pBuffer)
+{
+ ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu))",
+ this, pBuffer, pBuffer->frameCount);
+
+ // BYPASS
+ //return mTrackBufferProvider->getNextBuffer(pBuffer);
+
+ // check if previously processed data is sufficient.
+ if (pBuffer->frameCount <= mRemaining) {
+ ALOGV("previous sufficient");
+ pBuffer->raw = mLocalBufferData;
+ return OK;
+ }
+
+ // do we need to resize our buffer?
+ if (pBuffer->frameCount > mLocalBufferFrameCount) {
+ void *newmem;
+ if (posix_memalign(&newmem, 32, pBuffer->frameCount * mFrameSize) == OK) {
+ if (mRemaining != 0) {
+ memcpy(newmem, mLocalBufferData, mRemaining * mFrameSize);
+ }
+ free(mLocalBufferData);
+ mLocalBufferData = newmem;
+ mLocalBufferFrameCount = pBuffer->frameCount;
+ }
+ }
+
+ // need to fetch more data
+ const size_t outputDesired = pBuffer->frameCount - mRemaining;
+ size_t dstAvailable;
+ do {
+ mBuffer.frameCount = mPlaybackRate.mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL
+ ? outputDesired : outputDesired * mPlaybackRate.mSpeed + 1;
+
+ status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer);
+
+ ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+ if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
+ ALOGV("upstream provider cannot provide data");
+ if (mRemaining == 0) {
+ pBuffer->raw = NULL;
+ pBuffer->frameCount = 0;
+ return res;
+ } else { // return partial count
+ pBuffer->raw = mLocalBufferData;
+ pBuffer->frameCount = mRemaining;
+ return OK;
+ }
+ }
+
+ // time-stretch the data
+ dstAvailable = min(mLocalBufferFrameCount - mRemaining, outputDesired);
+ size_t srcAvailable = mBuffer.frameCount;
+ processFrames((uint8_t*)mLocalBufferData + mRemaining * mFrameSize, &dstAvailable,
+ mBuffer.raw, &srcAvailable);
+
+ // release all data consumed
+ mBuffer.frameCount = srcAvailable;
+ mTrackBufferProvider->releaseBuffer(&mBuffer);
+ } while (dstAvailable == 0); // try until we get output data or upstream provider fails.
+
+ // update buffer vars with the actual data processed and return with buffer
+ mRemaining += dstAvailable;
+
+ pBuffer->raw = mLocalBufferData;
+ pBuffer->frameCount = mRemaining;
+
+ return OK;
+}
+
+void TimestretchBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+ ALOGV("TimestretchBufferProvider(%p)::releaseBuffer(%p (%zu))",
+ this, pBuffer, pBuffer->frameCount);
+
+ // BYPASS
+ //return mTrackBufferProvider->releaseBuffer(pBuffer);
+
+ // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
+ if (pBuffer->frameCount < mRemaining) {
+ memcpy(mLocalBufferData,
+ (uint8_t*)mLocalBufferData + pBuffer->frameCount * mFrameSize,
+ (mRemaining - pBuffer->frameCount) * mFrameSize);
+ mRemaining -= pBuffer->frameCount;
+ } else if (pBuffer->frameCount == mRemaining) {
+ mRemaining = 0;
+ } else {
+ LOG_ALWAYS_FATAL("Releasing more frames(%zu) than available(%zu)",
+ pBuffer->frameCount, mRemaining);
+ }
+
+ pBuffer->raw = NULL;
+ pBuffer->frameCount = 0;
+}
+
+void TimestretchBufferProvider::reset()
+{
+ mRemaining = 0;
+}
+
+status_t TimestretchBufferProvider::setPlaybackRate(const AudioPlaybackRate &playbackRate)
+{
+ mPlaybackRate = playbackRate;
+ mFallbackFailErrorShown = false;
+ sonicSetSpeed(mSonicStream, mPlaybackRate.mSpeed);
+ //TODO: pitch is ignored for now
+ //TODO: optimize: if parameters are the same, don't do any extra computation.
+
+ mAudioPlaybackRateValid = isAudioPlaybackRateValid(mPlaybackRate);
+ return OK;
+}
+
+void TimestretchBufferProvider::processFrames(void *dstBuffer, size_t *dstFrames,
+ const void *srcBuffer, size_t *srcFrames)
+{
+ ALOGV("processFrames(%zu %zu) remaining(%zu)", *dstFrames, *srcFrames, mRemaining);
+ // Note dstFrames is the required number of frames.
+
+ if (!mAudioPlaybackRateValid) {
+ //fallback mode
+ // Ensure consumption from src is as expected.
+ // TODO: add logic to track "very accurate" consumption related to speed, original sampling
+ // rate, actual frames processed.
+
+ const size_t targetSrc = *dstFrames * mPlaybackRate.mSpeed;
+ if (*srcFrames < targetSrc) { // limit dst frames to that possible
+ *dstFrames = *srcFrames / mPlaybackRate.mSpeed;
+ } else if (*srcFrames > targetSrc + 1) {
+ *srcFrames = targetSrc + 1;
+ }
+ if (*dstFrames > 0) {
+ switch(mPlaybackRate.mFallbackMode) {
+ case AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT:
+ if (*dstFrames <= *srcFrames) {
+ size_t copySize = mFrameSize * *dstFrames;
+ memcpy(dstBuffer, srcBuffer, copySize);
+ } else {
+ // cyclically repeat the source.
+ for (size_t count = 0; count < *dstFrames; count += *srcFrames) {
+ size_t remaining = min(*srcFrames, *dstFrames - count);
+ memcpy((uint8_t*)dstBuffer + mFrameSize * count,
+ srcBuffer, mFrameSize * remaining);
+ }
+ }
+ break;
+ case AUDIO_TIMESTRETCH_FALLBACK_DEFAULT:
+ case AUDIO_TIMESTRETCH_FALLBACK_MUTE:
+ memset(dstBuffer,0, mFrameSize * *dstFrames);
+ break;
+ case AUDIO_TIMESTRETCH_FALLBACK_FAIL:
+ default:
+ if(!mFallbackFailErrorShown) {
+ ALOGE("invalid parameters in TimestretchBufferProvider fallbackMode:%d",
+ mPlaybackRate.mFallbackMode);
+ mFallbackFailErrorShown = true;
+ }
+ break;
+ }
+ }
+ } else {
+ switch (mFormat) {
+ case AUDIO_FORMAT_PCM_FLOAT:
+ if (sonicWriteFloatToStream(mSonicStream, (float*)srcBuffer, *srcFrames) != 1) {
+ ALOGE("sonicWriteFloatToStream cannot realloc");
+ *srcFrames = 0; // cannot consume all of srcBuffer
+ }
+ *dstFrames = sonicReadFloatFromStream(mSonicStream, (float*)dstBuffer, *dstFrames);
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ if (sonicWriteShortToStream(mSonicStream, (short*)srcBuffer, *srcFrames) != 1) {
+ ALOGE("sonicWriteShortToStream cannot realloc");
+ *srcFrames = 0; // cannot consume all of srcBuffer
+ }
+ *dstFrames = sonicReadShortFromStream(mSonicStream, (short*)dstBuffer, *dstFrames);
+ break;
+ default:
+ // could also be caught on construction
+ LOG_ALWAYS_FATAL("invalid format %#x for TimestretchBufferProvider", mFormat);
+ }
+ }
+}
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/media/libaudioprocessing/RecordBufferConverter.cpp b/media/libaudioprocessing/RecordBufferConverter.cpp
new file mode 100644
index 0000000..54151f5
--- /dev/null
+++ b/media/libaudioprocessing/RecordBufferConverter.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RecordBufferConverter"
+//#define LOG_NDEBUG 0
+
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <media/AudioMixer.h> // for UNITY_GAIN_FLOAT
+#include <media/AudioResampler.h>
+#include <media/BufferProviders.h>
+#include <media/RecordBufferConverter.h>
+#include <utils/Log.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+template <typename T>
+static inline T max(const T& a, const T& b)
+{
+ return a > b ? a : b;
+}
+
+namespace android {
+
+RecordBufferConverter::RecordBufferConverter(
+ audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+ uint32_t srcSampleRate,
+ audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+ uint32_t dstSampleRate) :
+ mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
+ // mSrcFormat
+ // mSrcSampleRate
+ // mDstChannelMask
+ // mDstFormat
+ // mDstSampleRate
+ // mSrcChannelCount
+ // mDstChannelCount
+ // mDstFrameSize
+ mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
+ mResampler(NULL),
+ mIsLegacyDownmix(false),
+ mIsLegacyUpmix(false),
+ mRequiresFloat(false),
+ mInputConverterProvider(NULL)
+{
+ (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
+ dstChannelMask, dstFormat, dstSampleRate);
+}
+
+RecordBufferConverter::~RecordBufferConverter() {
+ free(mBuf);
+ delete mResampler;
+ delete mInputConverterProvider;
+}
+
+void RecordBufferConverter::reset() {
+ if (mResampler != NULL) {
+ mResampler->reset();
+ }
+}
+
+size_t RecordBufferConverter::convert(void *dst,
+ AudioBufferProvider *provider, size_t frames)
+{
+ if (mInputConverterProvider != NULL) {
+ mInputConverterProvider->setBufferProvider(provider);
+ provider = mInputConverterProvider;
+ }
+
+ if (mResampler == NULL) {
+ ALOGV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+ mSrcSampleRate, mSrcFormat, mDstFormat);
+
+ AudioBufferProvider::Buffer buffer;
+ for (size_t i = frames; i > 0; ) {
+ buffer.frameCount = i;
+ status_t status = provider->getNextBuffer(&buffer);
+ if (status != OK || buffer.frameCount == 0) {
+ frames -= i; // cannot fill request.
+ break;
+ }
+ // format convert to destination buffer
+ convertNoResampler(dst, buffer.raw, buffer.frameCount);
+
+ dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
+ i -= buffer.frameCount;
+ provider->releaseBuffer(&buffer);
+ }
+ } else {
+ ALOGV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+ mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
+
+ // reallocate buffer if needed
+ if (mBufFrameSize != 0 && mBufFrames < frames) {
+ free(mBuf);
+ mBufFrames = frames;
+ (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+ }
+ // resampler accumulates, but we only have one source track
+ memset(mBuf, 0, frames * mBufFrameSize);
+ frames = mResampler->resample((int32_t*)mBuf, frames, provider);
+ // format convert to destination buffer
+ convertResampler(dst, mBuf, frames);
+ }
+ return frames;
+}
+
+status_t RecordBufferConverter::updateParameters(
+ audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+ uint32_t srcSampleRate,
+ audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+ uint32_t dstSampleRate)
+{
+ // quick evaluation if there is any change.
+ if (mSrcFormat == srcFormat
+ && mSrcChannelMask == srcChannelMask
+ && mSrcSampleRate == srcSampleRate
+ && mDstFormat == dstFormat
+ && mDstChannelMask == dstChannelMask
+ && mDstSampleRate == dstSampleRate) {
+ return NO_ERROR;
+ }
+
+ ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
+ " srcFormat:%#x dstFormat:%#x srcRate:%u dstRate:%u",
+ srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
+ const bool valid =
+ audio_is_input_channel(srcChannelMask)
+ && audio_is_input_channel(dstChannelMask)
+ && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
+ && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
+ && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
+ ; // no upsampling checks for now
+ if (!valid) {
+ return BAD_VALUE;
+ }
+
+ mSrcFormat = srcFormat;
+ mSrcChannelMask = srcChannelMask;
+ mSrcSampleRate = srcSampleRate;
+ mDstFormat = dstFormat;
+ mDstChannelMask = dstChannelMask;
+ mDstSampleRate = dstSampleRate;
+
+ // compute derived parameters
+ mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
+ mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
+ mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
+
+ // do we need to resample?
+ delete mResampler;
+ mResampler = NULL;
+ if (mSrcSampleRate != mDstSampleRate) {
+ mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
+ mSrcChannelCount, mDstSampleRate);
+ mResampler->setSampleRate(mSrcSampleRate);
+ mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
+ }
+
+ // are we running legacy channel conversion modes?
+ mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
+ || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
+ && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
+ mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
+ && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
+ || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
+
+ // do we need to process in float?
+ mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
+
+ // do we need a staging buffer to convert for destination (we can still optimize this)?
+ // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
+ if (mResampler != NULL) {
+ mBufFrameSize = max(mSrcChannelCount, (uint32_t)FCC_2)
+ * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+ } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
+ mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+ } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
+ mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
+ } else {
+ mBufFrameSize = 0;
+ }
+ mBufFrames = 0; // force the buffer to be resized.
+
+ // do we need an input converter buffer provider to give us float?
+ delete mInputConverterProvider;
+ mInputConverterProvider = NULL;
+ if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
+ mInputConverterProvider = new ReformatBufferProvider(
+ audio_channel_count_from_in_mask(mSrcChannelMask),
+ mSrcFormat,
+ AUDIO_FORMAT_PCM_FLOAT,
+ 256 /* provider buffer frame count */);
+ }
+
+ // do we need a remixer to do channel mask conversion
+ if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
+ (void) memcpy_by_index_array_initialization_from_channel_mask(
+ mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
+ }
+ return NO_ERROR;
+}
+
+void RecordBufferConverter::convertNoResampler(
+ void *dst, const void *src, size_t frames)
+{
+ // src is native type unless there is legacy upmix or downmix, whereupon it is float.
+ if (mBufFrameSize != 0 && mBufFrames < frames) {
+ free(mBuf);
+ mBufFrames = frames;
+ (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+ }
+ // do we need to do legacy upmix and downmix?
+ if (mIsLegacyUpmix || mIsLegacyDownmix) {
+ void *dstBuf = mBuf != NULL ? mBuf : dst;
+ if (mIsLegacyUpmix) {
+ upmix_to_stereo_float_from_mono_float((float *)dstBuf,
+ (const float *)src, frames);
+ } else /*mIsLegacyDownmix */ {
+ downmix_to_mono_float_from_stereo_float((float *)dstBuf,
+ (const float *)src, frames);
+ }
+ if (mBuf != NULL) {
+ memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
+ frames * mDstChannelCount);
+ }
+ return;
+ }
+ // do we need to do channel mask conversion?
+ if (mSrcChannelMask != mDstChannelMask) {
+ void *dstBuf = mBuf != NULL ? mBuf : dst;
+ memcpy_by_index_array(dstBuf, mDstChannelCount,
+ src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
+ if (dstBuf == dst) {
+ return; // format is the same
+ }
+ }
+ // convert to destination buffer
+ const void *convertBuf = mBuf != NULL ? mBuf : src;
+ memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
+ frames * mDstChannelCount);
+}
+
+void RecordBufferConverter::convertResampler(
+ void *dst, /*not-a-const*/ void *src, size_t frames)
+{
+ // src buffer format is ALWAYS float when entering this routine
+ if (mIsLegacyUpmix) {
+ ; // mono to stereo already handled by resampler
+ } else if (mIsLegacyDownmix
+ || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
+ // the resampler outputs stereo for mono input channel (a feature?)
+ // must convert to mono
+ downmix_to_mono_float_from_stereo_float((float *)src,
+ (const float *)src, frames);
+ } else if (mSrcChannelMask != mDstChannelMask) {
+ // convert to mono channel again for channel mask conversion (could be skipped
+ // with further optimization).
+ if (mSrcChannelCount == 1) {
+ downmix_to_mono_float_from_stereo_float((float *)src,
+ (const float *)src, frames);
+ }
+ // convert to destination format (in place, OK as float is larger than other types)
+ if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
+ memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+ frames * mSrcChannelCount);
+ }
+ // channel convert and save to dst
+ memcpy_by_index_array(dst, mDstChannelCount,
+ src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
+ return;
+ }
+ // convert to destination format and save to dst
+ memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+ frames * mDstChannelCount);
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/services/audioflinger/audio-resampler/Android.mk b/media/libaudioprocessing/audio-resampler/Android.mk
similarity index 100%
rename from services/audioflinger/audio-resampler/Android.mk
rename to media/libaudioprocessing/audio-resampler/Android.mk
diff --git a/services/audioflinger/audio-resampler/AudioResamplerCoefficients.cpp b/media/libaudioprocessing/audio-resampler/AudioResamplerCoefficients.cpp
similarity index 100%
rename from services/audioflinger/audio-resampler/AudioResamplerCoefficients.cpp
rename to media/libaudioprocessing/audio-resampler/AudioResamplerCoefficients.cpp
diff --git a/services/audioflinger/audio-resampler/filter_coefficients.h b/media/libaudioprocessing/audio-resampler/filter_coefficients.h
similarity index 100%
rename from services/audioflinger/audio-resampler/filter_coefficients.h
rename to media/libaudioprocessing/audio-resampler/filter_coefficients.h
diff --git a/services/audioflinger/AudioResampler.h b/media/libaudioprocessing/include/AudioResampler.h
similarity index 100%
rename from services/audioflinger/AudioResampler.h
rename to media/libaudioprocessing/include/AudioResampler.h
diff --git a/include/media/AudioResamplerPublic.h b/media/libaudioprocessing/include/AudioResamplerPublic.h
similarity index 100%
rename from include/media/AudioResamplerPublic.h
rename to media/libaudioprocessing/include/AudioResamplerPublic.h
diff --git a/media/libaudioprocessing/tests/Android.mk b/media/libaudioprocessing/tests/Android.mk
new file mode 100644
index 0000000..23e1c3a
--- /dev/null
+++ b/media/libaudioprocessing/tests/Android.mk
@@ -0,0 +1,87 @@
+# Build the unit tests for libaudioprocessing
+
+LOCAL_PATH := $(call my-dir)
+
+#
+# resampler unit test
+#
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := \
+ libaudioutils \
+ libaudioprocessing \
+ libcutils \
+ liblog \
+ libutils \
+
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+
+LOCAL_SRC_FILES := \
+ resampler_tests.cpp
+
+LOCAL_MODULE := resampler_tests
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_NATIVE_TEST)
+
+#
+# audio mixer test tool
+#
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ test-mixer.cpp \
+
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+
+LOCAL_STATIC_LIBRARIES := \
+ libsndfile \
+
+LOCAL_SHARED_LIBRARIES := \
+ libaudioprocessing \
+ libaudioutils \
+ libcutils \
+ liblog \
+ libutils \
+
+LOCAL_MODULE := test-mixer
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_EXECUTABLE)
+
+#
+# build audio resampler test tool
+#
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ test-resampler.cpp \
+
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+
+LOCAL_STATIC_LIBRARIES := \
+ libsndfile \
+
+LOCAL_SHARED_LIBRARIES := \
+ libaudioprocessing \
+ libaudioutils \
+ libcutils \
+ liblog \
+ libutils \
+
+LOCAL_MODULE := test-resampler
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS := -Werror -Wall
+
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaudioprocessing/tests/README b/media/libaudioprocessing/tests/README
new file mode 100644
index 0000000..ed7e2ed
--- /dev/null
+++ b/media/libaudioprocessing/tests/README
@@ -0,0 +1,13 @@
+For libsonic dependency:
+pushd $ANDROID_BUILD_TOP/external/sonic
+mm
+popd
+
+To build audio processing library:
+pushd ..
+Optionally uncomment USE_NEON=false in Android.mk
+mm
+popd
+
+Then build here:
+mm
diff --git a/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh b/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..704d095
--- /dev/null
+++ b/media/libaudioprocessing/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+pushd $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing
+pwd
+mm
+
+echo "waiting for device"
+adb root && adb wait-for-device remount
+adb push $OUT/system/lib/libaudioresampler.so /system/lib
+adb push $OUT/system/lib64/libaudioresampler.so /system/lib64
+adb push $OUT/data/nativetest/resampler_tests/resampler_tests /data/nativetest/resampler_tests/resampler_tests
+adb push $OUT/data/nativetest64/resampler_tests/resampler_tests /data/nativetest64/resampler_tests/resampler_tests
+
+sh $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing/tests/run_all_unit_tests.sh
+
+popd
diff --git a/media/libaudioprocessing/tests/mixer_to_wav_tests.sh b/media/libaudioprocessing/tests/mixer_to_wav_tests.sh
new file mode 100755
index 0000000..72b02fc
--- /dev/null
+++ b/media/libaudioprocessing/tests/mixer_to_wav_tests.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+#
+# This script uses test-mixer to generate WAV files
+# for evaluation of the AudioMixer component.
+#
+# Sine and chirp signals are used for input because they
+# show up as clear lines, either horizontal or diagonal,
+# on a spectrogram. This means easy verification of multiple
+# track mixing.
+#
+# After execution, look for created subdirectories like
+# mixer_i_i
+# mixer_i_f
+# mixer_f_f
+#
+# Recommend using a program such as audacity to evaluate
+# the output WAV files, e.g.
+#
+# cd testdir
+# audacity *.wav
+#
+# Using Audacity:
+#
+# Under "Waveform" view mode you can zoom into the
+# start of the WAV file to verify proper ramping.
+#
+# Select "Spectrogram" to see verify the lines
+# (sine = horizontal, chirp = diagonal) which should
+# be clear (except for around the start as the volume
+# ramping causes spectral distortion).
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+pushd $ANDROID_BUILD_TOP/frameworks/av/media/libaudioprocessing
+
+# build
+pwd
+mm
+
+# send to device
+echo "waiting for device"
+adb root && adb wait-for-device remount
+adb push $OUT/system/lib/libaudioprocessing.so /system/lib
+adb push $OUT/system/lib64/libaudioprocessing.so /system/lib64
+adb push $OUT/system/bin/test-mixer /system/bin
+
+# createwav creates a series of WAV files testing various
+# mixer settings
+# $1 = flags
+# $2 = directory
+function createwav() {
+# create directory if it doesn't exist
+ if [ ! -d $2 ]; then
+ mkdir $2
+ fi
+
+# Test:
+# process__genericResampling with mixed integer and float track input
+# track__Resample / track__genericResample
+ adb shell test-mixer $1 -s 48000 \
+ -o /sdcard/tm48000grif.wav \
+ sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+ sine:f,6,6000,19000 chirp:i,4,30000
+ adb pull /sdcard/tm48000grif.wav $2
+
+# Test:
+# process__genericResampling
+# track__Resample / track__genericResample
+ adb shell test-mixer $1 -s 48000 \
+ -o /sdcard/tm48000gr.wav \
+ sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+ sine:6,6000,19000
+ adb pull /sdcard/tm48000gr.wav $2
+
+# Test:
+# process__genericResample
+# track__Resample / track__genericResample
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+# Aux buffer
+ adb shell test-mixer $1 -c 5 -s 9307 \
+ -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \
+ sine:4,1000,3000 sine:1,2000,9307 chirp:3,9307
+ adb pull /sdcard/tm9307gra.wav $2
+ adb pull /sdcard/aux9307gra.wav $2
+
+# Test:
+# process__genericNoResampling
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+ adb shell test-mixer $1 -s 32000 \
+ -o /sdcard/tm32000gnr.wav \
+ sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000
+ adb pull /sdcard/tm32000gnr.wav $2
+
+# Test:
+# process__genericNoResampling
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+# Aux buffer
+ adb shell test-mixer $1 -s 32000 \
+ -a /sdcard/aux32000gnra.wav -o /sdcard/tm32000gnra.wav \
+ sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000
+ adb pull /sdcard/tm32000gnra.wav $2
+ adb pull /sdcard/aux32000gnra.wav $2
+
+# Test:
+# process__NoResampleOneTrack / process__OneTrack16BitsStereoNoResampling
+# Downmixer
+ adb shell test-mixer $1 -s 32000 \
+ -o /sdcard/tm32000nrot.wav \
+ sine:6,1000,32000
+ adb pull /sdcard/tm32000nrot.wav $2
+
+# Test:
+# process__NoResampleOneTrack / OneTrack16BitsStereoNoResampling
+# Aux buffer
+ adb shell test-mixer $1 -s 44100 \
+ -a /sdcard/aux44100nrota.wav -o /sdcard/tm44100nrota.wav \
+ sine:2,2000,44100
+ adb pull /sdcard/tm44100nrota.wav $2
+ adb pull /sdcard/aux44100nrota.wav $2
+}
+
+#
+# Call createwav to generate WAV files in various combinations
+#
+# i_i = integer input track, integer mixer output
+# f_f = float input track, float mixer output
+# i_f = integer input track, float_mixer output
+#
+# If the mixer output is float, then the output WAV file is pcm float.
+#
+# TODO: create a "snr" like "diff" to automatically
+# compare files in these directories together.
+#
+
+createwav "" "tests/mixer_i_i"
+createwav "-f -m" "tests/mixer_f_f"
+createwav "-m" "tests/mixer_i_f"
+
+popd
diff --git a/media/libaudioprocessing/tests/resampler_tests.cpp b/media/libaudioprocessing/tests/resampler_tests.cpp
new file mode 100644
index 0000000..a23c000
--- /dev/null
+++ b/media/libaudioprocessing/tests/resampler_tests.cpp
@@ -0,0 +1,486 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "audioflinger_resampler_tests"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <iostream>
+#include <utility>
+#include <vector>
+
+#include <gtest/gtest.h>
+#include <log/log.h>
+#include <media/AudioBufferProvider.h>
+
+#include <media/AudioResampler.h>
+#include "test_utils.h"
+
+template <typename T>
+static void printData(T *data, size_t size) {
+ const size_t stride = 8;
+ for (size_t i = 0; i < size; ) {
+ for (size_t j = 0; j < stride && i < size; ++j) {
+ std::cout << data[i++] << ' '; // extra space before newline
+ }
+ std::cout << '\n'; // or endl
+ }
+}
+
+void resample(int channels, void *output,
+ size_t outputFrames, const std::vector<size_t> &outputIncr,
+ android::AudioBufferProvider *provider, android::AudioResampler *resampler)
+{
+ for (size_t i = 0, j = 0; i < outputFrames; ) {
+ size_t thisFrames = outputIncr[j++];
+ if (j >= outputIncr.size()) {
+ j = 0;
+ }
+ if (thisFrames == 0 || thisFrames > outputFrames - i) {
+ thisFrames = outputFrames - i;
+ }
+ size_t framesResampled = resampler->resample(
+ (int32_t*) output + channels*i, thisFrames, provider);
+ // we should have enough buffer space, so there is no short count.
+ ASSERT_EQ(thisFrames, framesResampled);
+ i += thisFrames;
+ }
+}
+
+void buffercmp(const void *reference, const void *test,
+ size_t outputFrameSize, size_t outputFrames)
+{
+ for (size_t i = 0; i < outputFrames; ++i) {
+ int check = memcmp((const char*)reference + i * outputFrameSize,
+ (const char*)test + i * outputFrameSize, outputFrameSize);
+ if (check) {
+ ALOGE("Failure at frame %zu", i);
+ ASSERT_EQ(check, 0); /* fails */
+ }
+ }
+}
+
+void testBufferIncrement(size_t channels, bool useFloat,
+ unsigned inputFreq, unsigned outputFreq,
+ enum android::AudioResampler::src_quality quality)
+{
+ const audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+ // create the provider
+ std::vector<int> inputIncr;
+ SignalProvider provider;
+ if (useFloat) {
+ provider.setChirp<float>(channels,
+ 0., outputFreq/2., outputFreq, outputFreq/2000.);
+ } else {
+ provider.setChirp<int16_t>(channels,
+ 0., outputFreq/2., outputFreq, outputFreq/2000.);
+ }
+ provider.setIncr(inputIncr);
+
+ // calculate the output size
+ size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
+ size_t outputFrameSize = (channels == 1 ? 2 : channels) * (useFloat ? sizeof(float) : sizeof(int32_t));
+ size_t outputSize = outputFrameSize * outputFrames;
+ outputSize &= ~7;
+
+ // create the resampler
+ android::AudioResampler* resampler;
+
+ resampler = android::AudioResampler::create(format, channels, outputFreq, quality);
+ resampler->setSampleRate(inputFreq);
+ resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
+ android::AudioResampler::UNITY_GAIN_FLOAT);
+
+ // set up the reference run
+ std::vector<size_t> refIncr;
+ refIncr.push_back(outputFrames);
+ void* reference = calloc(outputFrames, outputFrameSize);
+ resample(channels, reference, outputFrames, refIncr, &provider, resampler);
+
+ provider.reset();
+
+#if 0
+ /* this test will fail - API interface issue: reset() does not clear internal buffers */
+ resampler->reset();
+#else
+ delete resampler;
+ resampler = android::AudioResampler::create(format, channels, outputFreq, quality);
+ resampler->setSampleRate(inputFreq);
+ resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
+ android::AudioResampler::UNITY_GAIN_FLOAT);
+#endif
+
+ // set up the test run
+ std::vector<size_t> outIncr;
+ outIncr.push_back(1);
+ outIncr.push_back(2);
+ outIncr.push_back(3);
+ void* test = calloc(outputFrames, outputFrameSize);
+ inputIncr.push_back(1);
+ inputIncr.push_back(3);
+ provider.setIncr(inputIncr);
+ resample(channels, test, outputFrames, outIncr, &provider, resampler);
+
+ // check
+ buffercmp(reference, test, outputFrameSize, outputFrames);
+
+ free(reference);
+ free(test);
+ delete resampler;
+}
+
+template <typename T>
+inline double sqr(T v)
+{
+ double dv = static_cast<double>(v);
+ return dv * dv;
+}
+
+template <typename T>
+double signalEnergy(T *start, T *end, unsigned stride)
+{
+ double accum = 0;
+
+ for (T *p = start; p < end; p += stride) {
+ accum += sqr(*p);
+ }
+ unsigned count = (end - start + stride - 1) / stride;
+ return accum / count;
+}
+
+// TI = resampler input type, int16_t or float
+// TO = resampler output type, int32_t or float
+template <typename TI, typename TO>
+void testStopbandDownconversion(size_t channels,
+ unsigned inputFreq, unsigned outputFreq,
+ unsigned passband, unsigned stopband,
+ enum android::AudioResampler::src_quality quality)
+{
+ // create the provider
+ std::vector<int> inputIncr;
+ SignalProvider provider;
+ provider.setChirp<TI>(channels,
+ 0., inputFreq/2., inputFreq, inputFreq/2000.);
+ provider.setIncr(inputIncr);
+
+ // calculate the output size
+ size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
+ size_t outputFrameSize = (channels == 1 ? 2 : channels) * sizeof(TO);
+ size_t outputSize = outputFrameSize * outputFrames;
+ outputSize &= ~7;
+
+ // create the resampler
+ android::AudioResampler* resampler;
+
+ resampler = android::AudioResampler::create(
+ is_same<TI, int16_t>::value ? AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_FLOAT,
+ channels, outputFreq, quality);
+ resampler->setSampleRate(inputFreq);
+ resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
+ android::AudioResampler::UNITY_GAIN_FLOAT);
+
+ // set up the reference run
+ std::vector<size_t> refIncr;
+ refIncr.push_back(outputFrames);
+ void* reference = calloc(outputFrames, outputFrameSize);
+ resample(channels, reference, outputFrames, refIncr, &provider, resampler);
+
+ TO *out = reinterpret_cast<TO *>(reference);
+
+ // check signal energy in passband
+ const unsigned passbandFrame = passband * outputFreq / 1000.;
+ const unsigned stopbandFrame = stopband * outputFreq / 1000.;
+
+ // check each channel separately
+ if (channels == 1) channels = 2; // workaround (mono duplicates output channel)
+
+ for (size_t i = 0; i < channels; ++i) {
+ double passbandEnergy = signalEnergy(out, out + passbandFrame * channels, channels);
+ double stopbandEnergy = signalEnergy(out + stopbandFrame * channels,
+ out + outputFrames * channels, channels);
+ double dbAtten = -10. * log10(stopbandEnergy / passbandEnergy);
+ ASSERT_GT(dbAtten, 60.);
+
+#if 0
+ // internal verification
+ printf("if:%d of:%d pbf:%d sbf:%d sbe: %f pbe: %f db: %.2f\n",
+ provider.getNumFrames(), outputFrames,
+ passbandFrame, stopbandFrame, stopbandEnergy, passbandEnergy, dbAtten);
+ for (size_t i = 0; i < 10; ++i) {
+ std::cout << out[i+passbandFrame*channels] << std::endl;
+ }
+ for (size_t i = 0; i < 10; ++i) {
+ std::cout << out[i+stopbandFrame*channels] << std::endl;
+ }
+#endif
+ }
+
+ free(reference);
+ delete resampler;
+}
+
+/* Buffer increment test
+ *
+ * We compare a reference output, where we consume and process the entire
+ * buffer at a time, and a test output, where we provide small chunks of input
+ * data and process small chunks of output (which may not be equivalent in size).
+ *
+ * Two subtests - fixed phase (3:2 down) and interpolated phase (147:320 up)
+ */
+TEST(audioflinger_resampler, bufferincrement_fixedphase) {
+ // all of these work
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ android::AudioResampler::LOW_QUALITY,
+ android::AudioResampler::MED_QUALITY,
+ android::AudioResampler::HIGH_QUALITY,
+ android::AudioResampler::VERY_HIGH_QUALITY,
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testBufferIncrement(2, false, 48000, 32000, kQualityArray[i]);
+ }
+}
+
+TEST(audioflinger_resampler, bufferincrement_interpolatedphase) {
+ // all of these work except low quality
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+// android::AudioResampler::LOW_QUALITY,
+ android::AudioResampler::MED_QUALITY,
+ android::AudioResampler::HIGH_QUALITY,
+ android::AudioResampler::VERY_HIGH_QUALITY,
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testBufferIncrement(2, false, 22050, 48000, kQualityArray[i]);
+ }
+}
+
+TEST(audioflinger_resampler, bufferincrement_fixedphase_multi) {
+ // only dynamic quality
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testBufferIncrement(4, false, 48000, 32000, kQualityArray[i]);
+ }
+}
+
+TEST(audioflinger_resampler, bufferincrement_interpolatedphase_multi_float) {
+ // only dynamic quality
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testBufferIncrement(8, true, 22050, 48000, kQualityArray[i]);
+ }
+}
+
+/* Simple aliasing test
+ *
+ * This checks stopband response of the chirp signal to make sure frequencies
+ * are properly suppressed. It uses downsampling because the stopband can be
+ * clearly isolated by input frequencies exceeding the output sample rate (nyquist).
+ */
+TEST(audioflinger_resampler, stopbandresponse_integer) {
+ // not all of these may work (old resamplers fail on downsampling)
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ //android::AudioResampler::LOW_QUALITY,
+ //android::AudioResampler::MED_QUALITY,
+ //android::AudioResampler::HIGH_QUALITY,
+ //android::AudioResampler::VERY_HIGH_QUALITY,
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ // in this test we assume a maximum transition band between 12kHz and 20kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<int16_t, int32_t>(
+ 2, 48000, 32000, 12000, 20000, kQualityArray[i]);
+ }
+
+ // in this test we assume a maximum transition band between 7kHz and 15kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ // (the weird ratio triggers interpolative resampling)
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<int16_t, int32_t>(
+ 2, 48000, 22101, 7000, 15000, kQualityArray[i]);
+ }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_integer_mono) {
+ // not all of these may work (old resamplers fail on downsampling)
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ //android::AudioResampler::LOW_QUALITY,
+ //android::AudioResampler::MED_QUALITY,
+ //android::AudioResampler::HIGH_QUALITY,
+ //android::AudioResampler::VERY_HIGH_QUALITY,
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ // in this test we assume a maximum transition band between 12kHz and 20kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<int16_t, int32_t>(
+ 1, 48000, 32000, 12000, 20000, kQualityArray[i]);
+ }
+
+ // in this test we assume a maximum transition band between 7kHz and 15kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ // (the weird ratio triggers interpolative resampling)
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<int16_t, int32_t>(
+ 1, 48000, 22101, 7000, 15000, kQualityArray[i]);
+ }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_integer_multichannel) {
+ // not all of these may work (old resamplers fail on downsampling)
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ //android::AudioResampler::LOW_QUALITY,
+ //android::AudioResampler::MED_QUALITY,
+ //android::AudioResampler::HIGH_QUALITY,
+ //android::AudioResampler::VERY_HIGH_QUALITY,
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ // in this test we assume a maximum transition band between 12kHz and 20kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<int16_t, int32_t>(
+ 8, 48000, 32000, 12000, 20000, kQualityArray[i]);
+ }
+
+ // in this test we assume a maximum transition band between 7kHz and 15kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ // (the weird ratio triggers interpolative resampling)
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<int16_t, int32_t>(
+ 8, 48000, 22101, 7000, 15000, kQualityArray[i]);
+ }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float) {
+ // not all of these may work (old resamplers fail on downsampling)
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ //android::AudioResampler::LOW_QUALITY,
+ //android::AudioResampler::MED_QUALITY,
+ //android::AudioResampler::HIGH_QUALITY,
+ //android::AudioResampler::VERY_HIGH_QUALITY,
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ // in this test we assume a maximum transition band between 12kHz and 20kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<float, float>(
+ 2, 48000, 32000, 12000, 20000, kQualityArray[i]);
+ }
+
+ // in this test we assume a maximum transition band between 7kHz and 15kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ // (the weird ratio triggers interpolative resampling)
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<float, float>(
+ 2, 48000, 22101, 7000, 15000, kQualityArray[i]);
+ }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float_mono) {
+ // not all of these may work (old resamplers fail on downsampling)
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ //android::AudioResampler::LOW_QUALITY,
+ //android::AudioResampler::MED_QUALITY,
+ //android::AudioResampler::HIGH_QUALITY,
+ //android::AudioResampler::VERY_HIGH_QUALITY,
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ // in this test we assume a maximum transition band between 12kHz and 20kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<float, float>(
+ 1, 48000, 32000, 12000, 20000, kQualityArray[i]);
+ }
+
+ // in this test we assume a maximum transition band between 7kHz and 15kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ // (the weird ratio triggers interpolative resampling)
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<float, float>(
+ 1, 48000, 22101, 7000, 15000, kQualityArray[i]);
+ }
+}
+
+TEST(audioflinger_resampler, stopbandresponse_float_multichannel) {
+ // not all of these may work (old resamplers fail on downsampling)
+ static const enum android::AudioResampler::src_quality kQualityArray[] = {
+ //android::AudioResampler::LOW_QUALITY,
+ //android::AudioResampler::MED_QUALITY,
+ //android::AudioResampler::HIGH_QUALITY,
+ //android::AudioResampler::VERY_HIGH_QUALITY,
+ android::AudioResampler::DYN_LOW_QUALITY,
+ android::AudioResampler::DYN_MED_QUALITY,
+ android::AudioResampler::DYN_HIGH_QUALITY,
+ };
+
+ // in this test we assume a maximum transition band between 12kHz and 20kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<float, float>(
+ 8, 48000, 32000, 12000, 20000, kQualityArray[i]);
+ }
+
+ // in this test we assume a maximum transition band between 7kHz and 15kHz.
+ // there must be at least 60dB relative attenuation between stopband and passband.
+ // (the weird ratio triggers interpolative resampling)
+ for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
+ testStopbandDownconversion<float, float>(
+ 8, 48000, 22101, 7000, 15000, kQualityArray[i]);
+ }
+}
+
diff --git a/media/libaudioprocessing/tests/run_all_unit_tests.sh b/media/libaudioprocessing/tests/run_all_unit_tests.sh
new file mode 100755
index 0000000..15a94c2
--- /dev/null
+++ b/media/libaudioprocessing/tests/run_all_unit_tests.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+fi
+
+echo "waiting for device"
+adb root && adb wait-for-device remount
+
+adb shell /data/nativetest/resampler_tests/resampler_tests
+adb shell /data/nativetest64/resampler_tests/resampler_tests
diff --git a/media/libaudioprocessing/tests/test-mixer.cpp b/media/libaudioprocessing/tests/test-mixer.cpp
new file mode 100644
index 0000000..75dbf91
--- /dev/null
+++ b/media/libaudioprocessing/tests/test-mixer.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <math.h>
+#include <vector>
+#include <audio_utils/primitives.h>
+#include <audio_utils/sndfile.h>
+#include <media/AudioBufferProvider.h>
+#include <media/AudioMixer.h>
+#include "test_utils.h"
+
+/* Testing is typically through creation of an output WAV file from several
+ * source inputs, to be later analyzed by an audio program such as Audacity.
+ *
+ * Sine or chirp functions are typically more useful as input to the mixer
+ * as they show up as straight lines on a spectrogram if successfully mixed.
+ *
+ * A sample shell script is provided: mixer_to_wave_tests.sh
+ */
+
+using namespace android;
+
+static void usage(const char* name) {
+ fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]"
+ " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
+ " (<input-file> | <command>)+\n", name);
+ fprintf(stderr, " -f enable floating point input track by default\n");
+ fprintf(stderr, " -m enable floating point mixer output\n");
+ fprintf(stderr, " -c number of mixer output channels\n");
+ fprintf(stderr, " -s mixer sample-rate\n");
+ fprintf(stderr, " -o <output-file> WAV file, pcm16 (or float if -m specified)\n");
+ fprintf(stderr, " -a <aux-buffer-file>\n");
+ fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n");
+ fprintf(stderr, " <input-file> is a WAV file\n");
+ fprintf(stderr, " <command> can be 'sine:[(i|f),]<channels>,<frequency>,<samplerate>'\n");
+ fprintf(stderr, " 'chirp:[(i|f),]<channels>,<samplerate>'\n");
+}
+
+static int writeFile(const char *filename, const void *buffer,
+ uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) {
+ if (filename == NULL) {
+ return 0; // ok to pass in NULL filename
+ }
+ // write output to file.
+ SF_INFO info;
+ info.frames = 0;
+ info.samplerate = sampleRate;
+ info.channels = channels;
+ info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16);
+ printf("saving file:%s channels:%u samplerate:%u frames:%zu\n",
+ filename, info.channels, info.samplerate, frames);
+ SNDFILE *sf = sf_open(filename, SFM_WRITE, &info);
+ if (sf == NULL) {
+ perror(filename);
+ return EXIT_FAILURE;
+ }
+ if (isBufferFloat) {
+ (void) sf_writef_float(sf, (float*)buffer, frames);
+ } else {
+ (void) sf_writef_short(sf, (short*)buffer, frames);
+ }
+ sf_close(sf);
+ return EXIT_SUCCESS;
+}
+
+const char *parseFormat(const char *s, bool *useFloat) {
+ if (!strncmp(s, "f,", 2)) {
+ *useFloat = true;
+ return s + 2;
+ }
+ if (!strncmp(s, "i,", 2)) {
+ *useFloat = false;
+ return s + 2;
+ }
+ return s;
+}
+
+int main(int argc, char* argv[]) {
+ const char* const progname = argv[0];
+ bool useInputFloat = false;
+ bool useMixerFloat = false;
+ bool useRamp = true;
+ uint32_t outputSampleRate = 48000;
+ uint32_t outputChannels = 2; // stereo for now
+ std::vector<int> Pvalues;
+ const char* outputFilename = NULL;
+ const char* auxFilename = NULL;
+ std::vector<int32_t> names;
+ std::vector<SignalProvider> providers;
+ std::vector<audio_format_t> formats;
+
+ for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
+ switch (ch) {
+ case 'f':
+ useInputFloat = true;
+ break;
+ case 'm':
+ useMixerFloat = true;
+ break;
+ case 'c':
+ outputChannels = atoi(optarg);
+ break;
+ case 's':
+ outputSampleRate = atoi(optarg);
+ break;
+ case 'o':
+ outputFilename = optarg;
+ break;
+ case 'a':
+ auxFilename = optarg;
+ break;
+ case 'P':
+ if (parseCSV(optarg, Pvalues) < 0) {
+ fprintf(stderr, "incorrect syntax for -P option\n");
+ return EXIT_FAILURE;
+ }
+ break;
+ case '?':
+ default:
+ usage(progname);
+ return EXIT_FAILURE;
+ }
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc == 0) {
+ usage(progname);
+ return EXIT_FAILURE;
+ }
+ if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) {
+ fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS);
+ return EXIT_FAILURE;
+ }
+
+ size_t outputFrames = 0;
+
+ // create providers for each track
+ names.resize(argc);
+ providers.resize(argc);
+ formats.resize(argc);
+ for (int i = 0; i < argc; ++i) {
+ static const char chirp[] = "chirp:";
+ static const char sine[] = "sine:";
+ static const double kSeconds = 1;
+ bool useFloat = useInputFloat;
+
+ if (!strncmp(argv[i], chirp, strlen(chirp))) {
+ std::vector<int> v;
+ const char *s = parseFormat(argv[i] + strlen(chirp), &useFloat);
+
+ parseCSV(s, v);
+ if (v.size() == 2) {
+ printf("creating chirp(%d %d)\n", v[0], v[1]);
+ if (useFloat) {
+ providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
+ } else {
+ providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
+ }
+ providers[i].setIncr(Pvalues);
+ } else {
+ fprintf(stderr, "malformed input '%s'\n", argv[i]);
+ }
+ } else if (!strncmp(argv[i], sine, strlen(sine))) {
+ std::vector<int> v;
+ const char *s = parseFormat(argv[i] + strlen(sine), &useFloat);
+
+ parseCSV(s, v);
+ if (v.size() == 3) {
+ printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
+ if (useFloat) {
+ providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
+ } else {
+ providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
+ }
+ providers[i].setIncr(Pvalues);
+ } else {
+ fprintf(stderr, "malformed input '%s'\n", argv[i]);
+ }
+ } else {
+ printf("creating filename(%s)\n", argv[i]);
+ if (useInputFloat) {
+ providers[i].setFile<float>(argv[i]);
+ formats[i] = AUDIO_FORMAT_PCM_FLOAT;
+ } else {
+ providers[i].setFile<short>(argv[i]);
+ formats[i] = AUDIO_FORMAT_PCM_16_BIT;
+ }
+ providers[i].setIncr(Pvalues);
+ }
+ // calculate the number of output frames
+ size_t nframes = (int64_t) providers[i].getNumFrames() * outputSampleRate
+ / providers[i].getSampleRate();
+ if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames
+ outputFrames = nframes;
+ }
+ }
+
+ // create the output buffer.
+ const size_t outputFrameSize = outputChannels
+ * (useMixerFloat ? sizeof(float) : sizeof(int16_t));
+ const size_t outputSize = outputFrames * outputFrameSize;
+ const audio_channel_mask_t outputChannelMask =
+ audio_channel_out_mask_from_count(outputChannels);
+ void *outputAddr = NULL;
+ (void) posix_memalign(&outputAddr, 32, outputSize);
+ memset(outputAddr, 0, outputSize);
+
+ // create the aux buffer, if needed.
+ const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always
+ const size_t auxSize = outputFrames * auxFrameSize;
+ void *auxAddr = NULL;
+ if (auxFilename) {
+ (void) posix_memalign(&auxAddr, 32, auxSize);
+ memset(auxAddr, 0, auxSize);
+ }
+
+ // create the mixer.
+ const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960
+ AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate);
+ audio_format_t mixerFormat = useMixerFloat
+ ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+ float f = AudioMixer::UNITY_GAIN_FLOAT / providers.size(); // normalize volume by # tracks
+ static float f0; // zero
+
+ // set up the tracks.
+ for (size_t i = 0; i < providers.size(); ++i) {
+ //printf("track %d out of %d\n", i, providers.size());
+ uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
+ int32_t name = mixer->getTrackName(channelMask,
+ formats[i], AUDIO_SESSION_OUTPUT_MIX);
+ ALOG_ASSERT(name >= 0);
+ names[i] = name;
+ mixer->setBufferProvider(name, &providers[i]);
+ mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+ (void *)outputAddr);
+ mixer->setParameter(
+ name,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_FORMAT,
+ (void *)(uintptr_t)mixerFormat);
+ mixer->setParameter(
+ name,
+ AudioMixer::TRACK,
+ AudioMixer::FORMAT,
+ (void *)(uintptr_t)formats[i]);
+ mixer->setParameter(
+ name,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_CHANNEL_MASK,
+ (void *)(uintptr_t)outputChannelMask);
+ mixer->setParameter(
+ name,
+ AudioMixer::TRACK,
+ AudioMixer::CHANNEL_MASK,
+ (void *)(uintptr_t)channelMask);
+ mixer->setParameter(
+ name,
+ AudioMixer::RESAMPLE,
+ AudioMixer::SAMPLE_RATE,
+ (void *)(uintptr_t)providers[i].getSampleRate());
+ if (useRamp) {
+ mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0);
+ mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0);
+ mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f);
+ mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f);
+ } else {
+ mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
+ mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
+ }
+ if (auxFilename) {
+ mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+ (void *) auxAddr);
+ mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0);
+ mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f);
+ }
+ mixer->enable(name);
+ }
+
+ // pump the mixer to process data.
+ size_t i;
+ for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) {
+ for (size_t j = 0; j < names.size(); ++j) {
+ mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+ (char *) outputAddr + i * outputFrameSize);
+ if (auxFilename) {
+ mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+ (char *) auxAddr + i * auxFrameSize);
+ }
+ }
+ mixer->process();
+ }
+ outputFrames = i; // reset output frames to the data actually produced.
+
+ // write to files
+ writeFile(outputFilename, outputAddr,
+ outputSampleRate, outputChannels, outputFrames, useMixerFloat);
+ if (auxFilename) {
+ // Aux buffer is always in q4_27 format for now.
+ // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count)
+ ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1);
+ writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false);
+ }
+
+ delete mixer;
+ free(outputAddr);
+ free(auxAddr);
+ return EXIT_SUCCESS;
+}
diff --git a/media/libaudioprocessing/tests/test-resampler.cpp b/media/libaudioprocessing/tests/test-resampler.cpp
new file mode 100644
index 0000000..fbc9326
--- /dev/null
+++ b/media/libaudioprocessing/tests/test-resampler.cpp
@@ -0,0 +1,515 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <time.h>
+#include <math.h>
+#include <audio_utils/primitives.h>
+#include <audio_utils/sndfile.h>
+#include <utils/Vector.h>
+#include <media/AudioBufferProvider.h>
+#include <media/AudioResampler.h>
+
+using namespace android;
+
+static bool gVerbose = false;
+
+static int usage(const char* name) {
+ fprintf(stderr,"Usage: %s [-p] [-f] [-F] [-v] [-c channels]"
+ " [-q {dq|lq|mq|hq|vhq|dlq|dmq|dhq}]"
+ " [-i input-sample-rate] [-o output-sample-rate]"
+ " [-O csv] [-P csv] [<input-file>]"
+ " <output-file>\n", name);
+ fprintf(stderr," -p enable profiling\n");
+ fprintf(stderr," -f enable filter profiling\n");
+ fprintf(stderr," -F enable floating point -q {dlq|dmq|dhq} only");
+ fprintf(stderr," -v verbose : log buffer provider calls\n");
+ fprintf(stderr," -c # channels (1-2 for lq|mq|hq; 1-8 for dlq|dmq|dhq)\n");
+ fprintf(stderr," -q resampler quality\n");
+ fprintf(stderr," dq : default quality\n");
+ fprintf(stderr," lq : low quality\n");
+ fprintf(stderr," mq : medium quality\n");
+ fprintf(stderr," hq : high quality\n");
+ fprintf(stderr," vhq : very high quality\n");
+ fprintf(stderr," dlq : dynamic low quality\n");
+ fprintf(stderr," dmq : dynamic medium quality\n");
+ fprintf(stderr," dhq : dynamic high quality\n");
+ fprintf(stderr," -i input file sample rate (ignored if input file is specified)\n");
+ fprintf(stderr," -o output file sample rate\n");
+ fprintf(stderr," -O # frames output per call to resample() in CSV format\n");
+ fprintf(stderr," -P # frames provided per call to resample() in CSV format\n");
+ return -1;
+}
+
+// Convert a list of integers in CSV format to a Vector of those values.
+// Returns the number of elements in the list, or -1 on error.
+int parseCSV(const char *string, Vector<int>& values)
+{
+ // pass 1: count the number of values and do syntax check
+ size_t numValues = 0;
+ bool hadDigit = false;
+ for (const char *p = string; ; ) {
+ switch (*p++) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ hadDigit = true;
+ break;
+ case '\0':
+ if (hadDigit) {
+ // pass 2: allocate and initialize vector of values
+ values.resize(++numValues);
+ values.editItemAt(0) = atoi(p = optarg);
+ for (size_t i = 1; i < numValues; ) {
+ if (*p++ == ',') {
+ values.editItemAt(i++) = atoi(p);
+ }
+ }
+ return numValues;
+ }
+ // fall through
+ case ',':
+ if (hadDigit) {
+ hadDigit = false;
+ numValues++;
+ break;
+ }
+ // fall through
+ default:
+ return -1;
+ }
+ }
+}
+
+int main(int argc, char* argv[]) {
+ const char* const progname = argv[0];
+ bool profileResample = false;
+ bool profileFilter = false;
+ bool useFloat = false;
+ int channels = 1;
+ int input_freq = 0;
+ int output_freq = 0;
+ AudioResampler::src_quality quality = AudioResampler::DEFAULT_QUALITY;
+ Vector<int> Ovalues;
+ Vector<int> Pvalues;
+
+ int ch;
+ while ((ch = getopt(argc, argv, "pfFvc:q:i:o:O:P:")) != -1) {
+ switch (ch) {
+ case 'p':
+ profileResample = true;
+ break;
+ case 'f':
+ profileFilter = true;
+ break;
+ case 'F':
+ useFloat = true;
+ break;
+ case 'v':
+ gVerbose = true;
+ break;
+ case 'c':
+ channels = atoi(optarg);
+ break;
+ case 'q':
+ if (!strcmp(optarg, "dq"))
+ quality = AudioResampler::DEFAULT_QUALITY;
+ else if (!strcmp(optarg, "lq"))
+ quality = AudioResampler::LOW_QUALITY;
+ else if (!strcmp(optarg, "mq"))
+ quality = AudioResampler::MED_QUALITY;
+ else if (!strcmp(optarg, "hq"))
+ quality = AudioResampler::HIGH_QUALITY;
+ else if (!strcmp(optarg, "vhq"))
+ quality = AudioResampler::VERY_HIGH_QUALITY;
+ else if (!strcmp(optarg, "dlq"))
+ quality = AudioResampler::DYN_LOW_QUALITY;
+ else if (!strcmp(optarg, "dmq"))
+ quality = AudioResampler::DYN_MED_QUALITY;
+ else if (!strcmp(optarg, "dhq"))
+ quality = AudioResampler::DYN_HIGH_QUALITY;
+ else {
+ usage(progname);
+ return -1;
+ }
+ break;
+ case 'i':
+ input_freq = atoi(optarg);
+ break;
+ case 'o':
+ output_freq = atoi(optarg);
+ break;
+ case 'O':
+ if (parseCSV(optarg, Ovalues) < 0) {
+ fprintf(stderr, "incorrect syntax for -O option\n");
+ return -1;
+ }
+ break;
+ case 'P':
+ if (parseCSV(optarg, Pvalues) < 0) {
+ fprintf(stderr, "incorrect syntax for -P option\n");
+ return -1;
+ }
+ break;
+ case '?':
+ default:
+ usage(progname);
+ return -1;
+ }
+ }
+
+ if (channels < 1
+ || channels > (quality < AudioResampler::DYN_LOW_QUALITY ? 2 : 8)) {
+ fprintf(stderr, "invalid number of audio channels %d\n", channels);
+ return -1;
+ }
+ if (useFloat && quality < AudioResampler::DYN_LOW_QUALITY) {
+ fprintf(stderr, "float processing is only possible for dynamic resamplers\n");
+ return -1;
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ const char* file_in = NULL;
+ const char* file_out = NULL;
+ if (argc == 1) {
+ file_out = argv[0];
+ } else if (argc == 2) {
+ file_in = argv[0];
+ file_out = argv[1];
+ } else {
+ usage(progname);
+ return -1;
+ }
+
+ // ----------------------------------------------------------
+
+ size_t input_size;
+ void* input_vaddr;
+ if (argc == 2) {
+ SF_INFO info;
+ info.format = 0;
+ SNDFILE *sf = sf_open(file_in, SFM_READ, &info);
+ if (sf == NULL) {
+ perror(file_in);
+ return EXIT_FAILURE;
+ }
+ input_size = info.frames * info.channels * sizeof(short);
+ input_vaddr = malloc(input_size);
+ (void) sf_readf_short(sf, (short *) input_vaddr, info.frames);
+ sf_close(sf);
+ channels = info.channels;
+ input_freq = info.samplerate;
+ } else {
+ // data for testing is exactly (input sampling rate/1000)/2 seconds
+ // so 44.1khz input is 22.05 seconds
+ double k = 1000; // Hz / s
+ double time = (input_freq / 2) / k;
+ size_t input_frames = size_t(input_freq * time);
+ input_size = channels * sizeof(int16_t) * input_frames;
+ input_vaddr = malloc(input_size);
+ int16_t* in = (int16_t*)input_vaddr;
+ for (size_t i=0 ; i<input_frames ; i++) {
+ double t = double(i) / input_freq;
+ double y = sin(M_PI * k * t * t);
+ int16_t yi = floor(y * 32767.0 + 0.5);
+ for (int j = 0; j < channels; j++) {
+ in[i*channels + j] = yi / (1 + j);
+ }
+ }
+ }
+ size_t input_framesize = channels * sizeof(int16_t);
+ size_t input_frames = input_size / input_framesize;
+
+ // For float processing, convert input int16_t to float array
+ if (useFloat) {
+ void *new_vaddr;
+
+ input_framesize = channels * sizeof(float);
+ input_size = input_frames * input_framesize;
+ new_vaddr = malloc(input_size);
+ memcpy_to_float_from_i16(reinterpret_cast<float*>(new_vaddr),
+ reinterpret_cast<int16_t*>(input_vaddr), input_frames * channels);
+ free(input_vaddr);
+ input_vaddr = new_vaddr;
+ }
+
+ // ----------------------------------------------------------
+
+ class Provider: public AudioBufferProvider {
+ const void* mAddr; // base address
+ const size_t mNumFrames; // total frames
+ const size_t mFrameSize; // size of each frame in bytes
+ size_t mNextFrame; // index of next frame to provide
+ size_t mUnrel; // number of frames not yet released
+ const Vector<int> mPvalues; // number of frames provided per call
+ size_t mNextPidx; // index of next entry in mPvalues to use
+ public:
+ Provider(const void* addr, size_t frames, size_t frameSize, const Vector<int>& Pvalues)
+ : mAddr(addr),
+ mNumFrames(frames),
+ mFrameSize(frameSize),
+ mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) {
+ }
+ virtual status_t getNextBuffer(Buffer* buffer) {
+ size_t requestedFrames = buffer->frameCount;
+ if (requestedFrames > mNumFrames - mNextFrame) {
+ buffer->frameCount = mNumFrames - mNextFrame;
+ }
+ if (!mPvalues.isEmpty()) {
+ size_t provided = mPvalues[mNextPidx++];
+ printf("mPvalue[%zu]=%zu not %zu\n", mNextPidx-1, provided, buffer->frameCount);
+ if (provided < buffer->frameCount) {
+ buffer->frameCount = provided;
+ }
+ if (mNextPidx >= mPvalues.size()) {
+ mNextPidx = 0;
+ }
+ }
+ if (gVerbose) {
+ printf("getNextBuffer() requested %zu frames out of %zu frames available,"
+ " and returned %zu frames\n",
+ requestedFrames, (size_t) (mNumFrames - mNextFrame), buffer->frameCount);
+ }
+ mUnrel = buffer->frameCount;
+ if (buffer->frameCount > 0) {
+ buffer->raw = (char *)mAddr + mFrameSize * mNextFrame;
+ return NO_ERROR;
+ } else {
+ buffer->raw = NULL;
+ return NOT_ENOUGH_DATA;
+ }
+ }
+ virtual void releaseBuffer(Buffer* buffer) {
+ if (buffer->frameCount > mUnrel) {
+ fprintf(stderr, "ERROR releaseBuffer() released %zu frames but only %zu available "
+ "to release\n", buffer->frameCount, mUnrel);
+ mNextFrame += mUnrel;
+ mUnrel = 0;
+ } else {
+ if (gVerbose) {
+ printf("releaseBuffer() released %zu frames out of %zu frames available "
+ "to release\n", buffer->frameCount, mUnrel);
+ }
+ mNextFrame += buffer->frameCount;
+ mUnrel -= buffer->frameCount;
+ }
+ buffer->frameCount = 0;
+ buffer->raw = NULL;
+ }
+ void reset() {
+ mNextFrame = 0;
+ }
+ } provider(input_vaddr, input_frames, input_framesize, Pvalues);
+
+ if (gVerbose) {
+ printf("%zu input frames\n", input_frames);
+ }
+
+ audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+ int output_channels = channels > 2 ? channels : 2; // output is at least stereo samples
+ size_t output_framesize = output_channels * (useFloat ? sizeof(float) : sizeof(int32_t));
+ size_t output_frames = ((int64_t) input_frames * output_freq) / input_freq;
+ size_t output_size = output_frames * output_framesize;
+
+ if (profileFilter) {
+ // Check how fast sample rate changes are that require filter changes.
+ // The delta sample rate changes must indicate a downsampling ratio,
+ // and must be larger than 10% changes.
+ //
+ // On fast devices, filters should be generated between 0.1ms - 1ms.
+ // (single threaded).
+ AudioResampler* resampler = AudioResampler::create(format, channels,
+ 8000, quality);
+ int looplimit = 100;
+ timespec start, end;
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ for (int i = 0; i < looplimit; ++i) {
+ resampler->setSampleRate(9000);
+ resampler->setSampleRate(12000);
+ resampler->setSampleRate(20000);
+ resampler->setSampleRate(30000);
+ }
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
+ int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
+ int64_t time = end_ns - start_ns;
+ printf("%.2f sample rate changes with filter calculation/sec\n",
+ looplimit * 4 / (time / 1e9));
+
+ // Check how fast sample rate changes are without filter changes.
+ // This should be very fast, probably 0.1us - 1us per sample rate
+ // change.
+ resampler->setSampleRate(1000);
+ looplimit = 1000;
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ for (int i = 0; i < looplimit; ++i) {
+ resampler->setSampleRate(1000+i);
+ }
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
+ end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
+ time = end_ns - start_ns;
+ printf("%.2f sample rate changes without filter calculation/sec\n",
+ looplimit / (time / 1e9));
+ resampler->reset();
+ delete resampler;
+ }
+
+ void* output_vaddr = malloc(output_size);
+ AudioResampler* resampler = AudioResampler::create(format, channels,
+ output_freq, quality);
+
+ resampler->setSampleRate(input_freq);
+ resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT);
+
+ if (profileResample) {
+ /*
+ * For profiling on mobile devices, upon experimentation
+ * it is better to run a few trials with a shorter loop limit,
+ * and take the minimum time.
+ *
+ * Long tests can cause CPU temperature to build up and thermal throttling
+ * to reduce CPU frequency.
+ *
+ * For frequency checks (index=0, or 1, etc.):
+ * "cat /sys/devices/system/cpu/cpu${index}/cpufreq/scaling_*_freq"
+ *
+ * For temperature checks (index=0, or 1, etc.):
+ * "cat /sys/class/thermal/thermal_zone${index}/temp"
+ *
+ * Another way to avoid thermal throttling is to fix the CPU frequency
+ * at a lower level which prevents excessive temperatures.
+ */
+ const int trials = 4;
+ const int looplimit = 4;
+ timespec start, end;
+ int64_t time = 0;
+
+ for (int n = 0; n < trials; ++n) {
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ for (int i = 0; i < looplimit; ++i) {
+ resampler->resample((int*) output_vaddr, output_frames, &provider);
+ provider.reset(); // during benchmarking reset only the provider
+ }
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
+ int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
+ int64_t diff_ns = end_ns - start_ns;
+ if (n == 0 || diff_ns < time) {
+ time = diff_ns; // save the best out of our trials.
+ }
+ }
+ // Mfrms/s is "Millions of output frames per second".
+ printf("quality: %d channels: %d msec: %" PRId64 " Mfrms/s: %.2lf\n",
+ quality, channels, time/1000000, output_frames * looplimit / (time / 1e9) / 1e6);
+ resampler->reset();
+
+ // TODO fix legacy bug: reset does not clear buffers.
+ // delete and recreate resampler here.
+ delete resampler;
+ resampler = AudioResampler::create(format, channels,
+ output_freq, quality);
+ resampler->setSampleRate(input_freq);
+ resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT);
+ }
+
+ memset(output_vaddr, 0, output_size);
+ if (gVerbose) {
+ printf("resample() %zu output frames\n", output_frames);
+ }
+ if (Ovalues.isEmpty()) {
+ Ovalues.push(output_frames);
+ }
+ for (size_t i = 0, j = 0; i < output_frames; ) {
+ size_t thisFrames = Ovalues[j++];
+ if (j >= Ovalues.size()) {
+ j = 0;
+ }
+ if (thisFrames == 0 || thisFrames > output_frames - i) {
+ thisFrames = output_frames - i;
+ }
+ resampler->resample((int*) output_vaddr + output_channels*i, thisFrames, &provider);
+ i += thisFrames;
+ }
+ if (gVerbose) {
+ printf("resample() complete\n");
+ }
+ resampler->reset();
+ if (gVerbose) {
+ printf("reset() complete\n");
+ }
+ delete resampler;
+ resampler = NULL;
+
+ // For float processing, convert output format from float to Q4.27,
+ // which is then converted to int16_t for final storage.
+ if (useFloat) {
+ memcpy_to_q4_27_from_float(reinterpret_cast<int32_t*>(output_vaddr),
+ reinterpret_cast<float*>(output_vaddr), output_frames * output_channels);
+ }
+
+ // mono takes left channel only (out of stereo output pair)
+ // stereo and multichannel preserve all channels.
+ int32_t* out = (int32_t*) output_vaddr;
+ int16_t* convert = (int16_t*) malloc(output_frames * channels * sizeof(int16_t));
+
+ const int volumeShift = 12; // shift requirement for Q4.27 to Q.15
+ // round to half towards zero and saturate at int16 (non-dithered)
+ const int roundVal = (1<<(volumeShift-1)) - 1; // volumePrecision > 0
+
+ for (size_t i = 0; i < output_frames; i++) {
+ for (int j = 0; j < channels; j++) {
+ int32_t s = out[i * output_channels + j] + roundVal; // add offset here
+ if (s < 0) {
+ s = (s + 1) >> volumeShift; // round to 0
+ if (s < -32768) {
+ s = -32768;
+ }
+ } else {
+ s = s >> volumeShift;
+ if (s > 32767) {
+ s = 32767;
+ }
+ }
+ convert[i * channels + j] = int16_t(s);
+ }
+ }
+
+ // write output to disk
+ SF_INFO info;
+ info.frames = 0;
+ info.samplerate = output_freq;
+ info.channels = channels;
+ info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
+ SNDFILE *sf = sf_open(file_out, SFM_WRITE, &info);
+ if (sf == NULL) {
+ perror(file_out);
+ return EXIT_FAILURE;
+ }
+ (void) sf_writef_short(sf, convert, output_frames);
+ sf_close(sf);
+
+ return EXIT_SUCCESS;
+}
diff --git a/services/audioflinger/tests/test_utils.h b/media/libaudioprocessing/tests/test_utils.h
similarity index 100%
rename from services/audioflinger/tests/test_utils.h
rename to media/libaudioprocessing/tests/test_utils.h
diff --git a/include/cpustats/CentralTendencyStatistics.h b/media/libcpustats/include/cpustats/CentralTendencyStatistics.h
similarity index 100%
rename from include/cpustats/CentralTendencyStatistics.h
rename to media/libcpustats/include/cpustats/CentralTendencyStatistics.h
diff --git a/include/cpustats/README.txt b/media/libcpustats/include/cpustats/README.txt
similarity index 100%
rename from include/cpustats/README.txt
rename to media/libcpustats/include/cpustats/README.txt
diff --git a/include/cpustats/ThreadCpuUsage.h b/media/libcpustats/include/cpustats/ThreadCpuUsage.h
similarity index 100%
rename from include/cpustats/ThreadCpuUsage.h
rename to media/libcpustats/include/cpustats/ThreadCpuUsage.h
diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf
index c3c4b67..14a171b 100644
--- a/media/libeffects/data/audio_effects.conf
+++ b/media/libeffects/data/audio_effects.conf
@@ -10,33 +10,33 @@
# the HW and SW effects
#proxy {
- #path /system/lib/soundfx/libeffectproxy.so
+ #path /vendor/lib/soundfx/libeffectproxy.so
#}
# This is the SW implementation library of the effect
#libSW {
- #path /system/lib/soundfx/libswwrapper.so
+ #path /vendor/lib/soundfx/libswwrapper.so
#}
# This is the HW implementation library for the effect
#libHW {
- #path /system/lib/soundfx/libhwwrapper.so
+ #path /vendor/lib/soundfx/libhwwrapper.so
#}
bundle {
- path /system/lib/soundfx/libbundlewrapper.so
+ path /vendor/lib/soundfx/libbundlewrapper.so
}
reverb {
- path /system/lib/soundfx/libreverbwrapper.so
+ path /vendor/lib/soundfx/libreverbwrapper.so
}
visualizer {
- path /system/lib/soundfx/libvisualizer.so
+ path /vendor/lib/soundfx/libvisualizer.so
}
downmix {
- path /system/lib/soundfx/libdownmix.so
+ path /vendor/lib/soundfx/libdownmix.so
}
loudness_enhancer {
- path /system/lib/soundfx/libldnhncr.so
+ path /vendor/lib/soundfx/libldnhncr.so
}
}
@@ -44,7 +44,7 @@
# audio HAL implements support for default software audio pre-processing effects
#
# pre_processing {
-# path /system/lib/soundfx/libaudiopreprocessing.so
+# path /vendor/lib/soundfx/libaudiopreprocessing.so
# }
# list of effects to load. Each effect element must contain a "library" and a "uuid" element.
diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk
index 8573f7e..73f6ef5 100644
--- a/media/libeffects/downmix/Android.mk
+++ b/media/libeffects/downmix/Android.mk
@@ -21,6 +21,7 @@
$(call include-path-for, audio-utils)
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
LOCAL_HEADER_LIBRARIES += libhardware_headers
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 5b74845..f27d5ca 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -392,7 +392,6 @@
downmix_module_t *pDwmModule = (downmix_module_t *) self;
downmix_object_t *pDownmixer;
- int retsize;
if (pDwmModule == NULL || pDwmModule->context.state == DOWNMIX_STATE_UNINITIALIZED) {
return -EINVAL;
diff --git a/media/libeffects/factory/Android.bp b/media/libeffects/factory/Android.bp
index 01554c2..16680bd 100644
--- a/media/libeffects/factory/Android.bp
+++ b/media/libeffects/factory/Android.bp
@@ -9,7 +9,7 @@
// Effect factory library
cc_library_shared {
name: "libeffects",
- vendor_available: true,
+ vendor: true,
srcs: ["EffectsFactory.c"],
shared_libs: [
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index ba20ac2..37c0bb7 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -29,7 +29,7 @@
#include <cutils/properties.h>
#include <log/log.h>
-#include <audio_effects/audio_effects_conf.h>
+#include <system/audio_effects/audio_effects_conf.h>
static list_elem_t *gEffectList; // list of effect_entry_t: all currently created effects
static list_elem_t *gLibraryList; // list of lib_entry_t: all currently loaded libraries
@@ -48,6 +48,7 @@
static int gCanQueryEffect; // indicates that call to EffectQueryEffect() is valid, i.e. that the list of effects
// was not modified since last call to EffectQueryNumberEffects()
+static list_elem_t *gLibraryFailedList; //list of lib_failed_entry_t: libraries failed to load
/////////////////////////////////////////////////
// Local functions prototypes
@@ -509,34 +510,81 @@
return 0;
}
+#ifdef __LP64__
+// audio_effects.conf always specifies 32 bit lib path: convert to 64 bit path if needed
+static const char *kLibraryPathRoot[] =
+ {"/odm/lib64/soundfx", "/vendor/lib64/soundfx", "/system/lib64/soundfx"};
+#else
+static const char *kLibraryPathRoot[] =
+ {"/odm/lib/soundfx", "/vendor/lib/soundfx", "/system/lib/soundfx"};
+#endif
+
+static const int kLibraryPathRootSize =
+ (sizeof(kLibraryPathRoot) / sizeof(kLibraryPathRoot[0]));
+
+// Checks if the library path passed as lib_path_in can be opened and if not
+// tries in standard effect library directories with just the library name and returns correct path
+// in lib_path_out
+int checkLibraryPath(const char *lib_path_in, char *lib_path_out) {
+ char *str;
+ const char *lib_name;
+ size_t len;
+
+ if (lib_path_in == NULL || lib_path_out == NULL) {
+ return -EINVAL;
+ }
+
+ strlcpy(lib_path_out, lib_path_in, PATH_MAX);
+
+ // Try exact path first
+ str = strstr(lib_path_out, "/lib/soundfx/");
+ if (str == NULL) {
+ return -EINVAL;
+ }
+
+ // Extract library name from input path
+ len = str - lib_path_out;
+ lib_name = lib_path_in + len + strlen("/lib/soundfx/");
+
+ // Then try with library name and standard path names in order of preference
+ for (int i = 0; i < kLibraryPathRootSize; i++) {
+ char path[PATH_MAX];
+
+ snprintf(path,
+ PATH_MAX,
+ "%s/%s",
+ kLibraryPathRoot[i],
+ lib_name);
+ if (F_OK == access(path, 0)) {
+ strcpy(lib_path_out, path);
+ ALOGW_IF(strncmp(lib_path_out, lib_path_in, PATH_MAX) != 0,
+ "checkLibraryPath() corrected library path %s to %s", lib_path_in, lib_path_out);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+
+
int loadLibrary(cnode *root, const char *name)
{
cnode *node;
- void *hdl;
+ void *hdl = NULL;
audio_effect_library_t *desc;
list_elem_t *e;
lib_entry_t *l;
char path[PATH_MAX];
- char *str;
- size_t len;
node = config_find(root, PATH_TAG);
if (node == NULL) {
return -EINVAL;
}
- // audio_effects.conf always specifies 32 bit lib path: convert to 64 bit path if needed
- strlcpy(path, node->value, PATH_MAX);
-#ifdef __LP64__
- str = strstr(path, "/lib/");
- if (str == NULL)
- return -EINVAL;
- len = str - path;
- path[len] = '\0';
- strlcat(path, "/lib64/", PATH_MAX);
- strlcat(path, node->value + len + strlen("/lib/"), PATH_MAX);
-#endif
- if (strlen(path) >= PATH_MAX - 1)
- return -EINVAL;
+
+ if (checkLibraryPath((const char *)node->value, path) != 0) {
+ ALOGW("loadLibrary() could not find library %s", path);
+ goto error;
+ }
hdl = dlopen(path, RTLD_NOW);
if (hdl == NULL) {
@@ -584,6 +632,17 @@
if (hdl != NULL) {
dlclose(hdl);
}
+ //add entry for library errors in gLibraryFailedList
+ lib_failed_entry_t *fl = malloc(sizeof(lib_failed_entry_t));
+ fl->name = strndup(name, PATH_MAX);
+ fl->path = strndup(path, PATH_MAX);
+
+ list_elem_t *fe = malloc(sizeof(list_elem_t));
+ fe->object = fl;
+ fe->next = gLibraryFailedList;
+ gLibraryFailedList = fe;
+ ALOGV("getLibrary() linked error in library %p for path %s", fl, path);
+
return -EINVAL;
}
@@ -986,16 +1045,31 @@
int EffectDumpEffects(int fd) {
char s[512];
+
+ list_elem_t *fe = gLibraryFailedList;
+ lib_failed_entry_t *fl = NULL;
+
+ dprintf(fd, "Libraries NOT loaded:\n");
+
+ while (fe) {
+ fl = (lib_failed_entry_t *)fe->object;
+ dprintf(fd, " Library %s\n", fl->name);
+ dprintf(fd, " path: %s\n", fl->path);
+ fe = fe->next;
+ }
+
list_elem_t *e = gLibraryList;
lib_entry_t *l = NULL;
effect_descriptor_t *d = NULL;
int found = 0;
int ret = 0;
+ dprintf(fd, "Libraries loaded:\n");
while (e) {
l = (lib_entry_t *)e->object;
list_elem_t *efx = l->effects;
- dprintf(fd, "Library %s\n", l->name);
+ dprintf(fd, " Library %s\n", l->name);
+ dprintf(fd, " path: %s\n", l->path);
if (!efx) {
dprintf(fd, " (no effects)\n");
}
diff --git a/media/libeffects/factory/EffectsFactory.h b/media/libeffects/factory/EffectsFactory.h
index b7936e0..72e0931 100644
--- a/media/libeffects/factory/EffectsFactory.h
+++ b/media/libeffects/factory/EffectsFactory.h
@@ -58,6 +58,11 @@
lib_entry_t *lib;
} effect_entry_t;
+typedef struct lib_failed_entry_s {
+ char *name;
+ char *path;
+} lib_failed_entry_t;
+
// Structure used to store the lib entry
// and the descriptor of the sub effects.
// The library entry is to be stored in case of
@@ -69,6 +74,7 @@
} sub_effect_entry_t;
+
////////////////////////////////////////////////////////////////////////////////
//
// Function: EffectGetSubEffects
diff --git a/media/libeffects/loudness/Android.mk b/media/libeffects/loudness/Android.mk
index a1c4495..712cbd5 100644
--- a/media/libeffects/loudness/Android.mk
+++ b/media/libeffects/loudness/Android.mk
@@ -9,6 +9,7 @@
dsp/core/dynamic_range_compression.cpp
LOCAL_CFLAGS+= -O2 -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
LOCAL_SHARED_LIBRARIES := \
libcutils \
diff --git a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
index cf00e60..9d29cf1 100644
--- a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
+++ b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
@@ -198,7 +198,6 @@
effect_handle_t *pHandle) {
ALOGV("LELib_Create()");
int ret;
- int i;
if (pHandle == NULL || uuid == NULL) {
return -EINVAL;
@@ -315,7 +314,6 @@
void *pCmdData, uint32_t *replySize, void *pReplyData) {
LoudnessEnhancerContext * pContext = (LoudnessEnhancerContext *)self;
- int retsize;
if (pContext == NULL || pContext->mState == LOUDNESS_ENHANCER_STATE_UNINITIALIZED) {
return -EINVAL;
diff --git a/media/libeffects/lvm/lib/Android.mk b/media/libeffects/lvm/lib/Android.mk
index 85e01e0..83e8288 100644
--- a/media/libeffects/lvm/lib/Android.mk
+++ b/media/libeffects/lvm/lib/Android.mk
@@ -121,6 +121,7 @@
$(LOCAL_PATH)/StereoWidening/lib
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
include $(BUILD_STATIC_LIBRARY)
@@ -179,4 +180,5 @@
$(LOCAL_PATH)/Common/src
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libeffects/lvm/wrapper/Android.mk b/media/libeffects/lvm/wrapper/Android.mk
index 131fa64..efd30fb 100644
--- a/media/libeffects/lvm/wrapper/Android.mk
+++ b/media/libeffects/lvm/wrapper/Android.mk
@@ -11,6 +11,7 @@
Bundle/EffectBundle.cpp
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
LOCAL_MODULE:= libbundlewrapper
@@ -43,6 +44,7 @@
Reverb/EffectReverb.cpp
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
LOCAL_MODULE:= libreverbwrapper
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index c380bdc..df6501b 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -148,7 +148,10 @@
void *pParam,
uint32_t *pValueSize,
void *pValue);
-int Equalizer_setParameter (EffectContext *pContext, void *pParam, void *pValue);
+int Equalizer_setParameter (EffectContext *pContext,
+ void *pParam,
+ uint32_t valueSize,
+ void *pValue);
int Equalizer_getParameter (EffectContext *pContext,
void *pParam,
uint32_t *pValueSize,
@@ -340,8 +343,10 @@
}
delete pContext;
}
- *pHandle = (effect_handle_t)NULL;
+ if (pHandle != NULL)
+ *pHandle = (effect_handle_t)NULL;
} else {
+ if (pHandle != NULL)
*pHandle = (effect_handle_t)pContext;
}
ALOGV("\tEffectCreate end..\n\n");
@@ -501,8 +506,6 @@
//----------------------------------------------------------------------------
int LvmBundle_init(EffectContext *pContext){
- int status;
-
ALOGV("\tLvmBundle_init start");
pContext->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
@@ -716,7 +719,6 @@
int frameCount,
EffectContext *pContext){
- LVM_ControlParams_t ActiveParams; /* Current control Parameters */
LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
LVM_INT16 *pOutTmp;
@@ -1040,7 +1042,6 @@
void LvmEffect_free(EffectContext *pContext){
LVM_ReturnStatus_en LvmStatus=LVM_SUCCESS; /* Function call status */
- LVM_ControlParams_t params; /* Control Parameters */
LVM_MemTab_t MemTab;
/* Free the algorithm memory */
@@ -1347,6 +1348,7 @@
case AUDIO_DEVICE_OUT_WIRED_HEADSET:
case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+ case AUDIO_DEVICE_OUT_USB_HEADSET:
return 0;
default :
return -EINVAL;
@@ -2015,8 +2017,6 @@
int status = 0;
int32_t *pParamTemp = (int32_t *)pParam;
int32_t param = *pParamTemp++;
- int32_t param2;
- char *name;
//ALOGV("\tBassBoost_getParameter start");
@@ -2133,7 +2133,6 @@
int status = 0;
int32_t *pParamTemp = (int32_t *)pParam;
int32_t param = *pParamTemp++;
- char *name;
//ALOGV("\tVirtualizer_getParameter start");
@@ -2290,7 +2289,6 @@
uint32_t *pValueSize,
void *pValue){
int status = 0;
- int bMute = 0;
int32_t *pParamTemp = (int32_t *)pParam;
int32_t param = *pParamTemp++;
int32_t param2;
@@ -2436,6 +2434,13 @@
}
break;
}
+
+ if (*pValueSize < 1) {
+ status = -EINVAL;
+ android_errorWriteLog(0x534e4554, "37536407");
+ break;
+ }
+
name = (char *)pValue;
strncpy(name, EqualizerGetPresetName(param2), *pValueSize - 1);
name[*pValueSize - 1] = 0;
@@ -2473,12 +2478,17 @@
// Inputs:
// pEqualizer - handle to instance data
// pParam - pointer to parameter
+// valueSize - value size
// pValue - pointer to value
+
//
// Outputs:
//
//----------------------------------------------------------------------------
-int Equalizer_setParameter (EffectContext *pContext, void *pParam, void *pValue){
+int Equalizer_setParameter (EffectContext *pContext,
+ void *pParam,
+ uint32_t valueSize,
+ void *pValue) {
int status = 0;
int32_t preset;
int32_t band;
@@ -2490,6 +2500,10 @@
//ALOGV("\tEqualizer_setParameter start");
switch (param) {
case EQ_PARAM_CUR_PRESET:
+ if (valueSize < sizeof(int16_t)) {
+ status = -EINVAL;
+ break;
+ }
preset = (int32_t)(*(uint16_t *)pValue);
//ALOGV("\tEqualizer_setParameter() EQ_PARAM_CUR_PRESET %d", preset);
@@ -2500,6 +2514,10 @@
EqualizerSetPreset(pContext, preset);
break;
case EQ_PARAM_BAND_LEVEL:
+ if (valueSize < sizeof(int16_t)) {
+ status = -EINVAL;
+ break;
+ }
band = *pParamTemp;
level = (int32_t)(*(int16_t *)pValue);
//ALOGV("\tEqualizer_setParameter() EQ_PARAM_BAND_LEVEL band %d, level %d", band, level);
@@ -2515,6 +2533,10 @@
break;
case EQ_PARAM_PROPERTIES: {
//ALOGV("\tEqualizer_setParameter() EQ_PARAM_PROPERTIES");
+ if (valueSize < sizeof(int16_t)) {
+ status = -EINVAL;
+ break;
+ }
int16_t *p = (int16_t *)pValue;
if ((int)p[0] >= EqualizerGetNumPresets()) {
status = -EINVAL;
@@ -2523,6 +2545,13 @@
if (p[0] >= 0) {
EqualizerSetPreset(pContext, (int)p[0]);
} else {
+ if (valueSize < (2 + FIVEBAND_NUMBANDS) * sizeof(int16_t)) {
+ android_errorWriteLog(0x534e4554, "37563371");
+ ALOGE("\tERROR Equalizer_setParameter() EQ_PARAM_PROPERTIES valueSize %d < %d",
+ (int)valueSize, (int)((2 + FIVEBAND_NUMBANDS) * sizeof(int16_t)));
+ status = -EINVAL;
+ break;
+ }
if ((int)p[1] != FIVEBAND_NUMBANDS) {
status = -EINVAL;
break;
@@ -2568,10 +2597,8 @@
uint32_t *pValueSize,
void *pValue){
int status = 0;
- int bMute = 0;
int32_t *pParamTemp = (int32_t *)pParam;
int32_t param = *pParamTemp++;;
- char *name;
//ALOGV("\tVolume_getParameter start");
@@ -2687,8 +2714,8 @@
case VOLUME_PARAM_ENABLESTEREOPOSITION:
positionEnabled = *(uint32_t *)pValue;
- status = VolumeEnableStereoPosition(pContext, positionEnabled);
- status = VolumeSetStereoPosition(pContext, pContext->pBundledContext->positionSaved);
+ (void) VolumeEnableStereoPosition(pContext, positionEnabled);
+ (void) VolumeSetStereoPosition(pContext, pContext->pBundledContext->positionSaved);
//ALOGV("\tVolume_setParameter() VOLUME_PARAM_ENABLESTEREOPOSITION called");
break;
@@ -2901,11 +2928,8 @@
audio_buffer_t *inBuffer,
audio_buffer_t *outBuffer){
EffectContext * pContext = (EffectContext *) self;
- LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; /* Function call status */
int status = 0;
int processStatus = 0;
- LVM_INT16 *in = (LVM_INT16 *)inBuffer->raw;
- LVM_INT16 *out = (LVM_INT16 *)outBuffer->raw;
//ALOGV("\tEffect_process Start : Enabled = %d Called = %d (%8d %8d %8d)",
//pContext->pBundledContext->NumberEffectsEnabled,pContext->pBundledContext->NumberEffectsCalled,
@@ -3034,7 +3058,6 @@
uint32_t *replySize,
void *pReplyData){
EffectContext * pContext = (EffectContext *) self;
- int retsize;
//ALOGV("\t\nEffect_command start");
@@ -3301,7 +3324,8 @@
*(int *)pReplyData = android::Equalizer_setParameter(pContext,
(void *)p->data,
- p->data + p->psize);
+ p->vsize,
+ p->data + p->psize);
}
if(pContext->EffectType == LVM_VOLUME){
//ALOGV("\tVolume_command cmdCode Case: EFFECT_CMD_SET_PARAM start");
@@ -3433,7 +3457,6 @@
int16_t leftdB, rightdB;
int16_t maxdB, pandB;
int32_t vol_ret[2] = {1<<24,1<<24}; // Apply no volume
- int status = 0;
LVM_ControlParams_t ActiveParams; /* Current control Parameters */
LVM_ReturnStatus_en LvmStatus=LVM_SUCCESS; /* Function call status */
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 9459b87..ee604eb 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -140,7 +140,7 @@
{120001, 460000},
{460001, 1800000},
{1800001, 7000000},
- {7000001, 1}};
+ {7000001, 20000000}};
//Note: If these frequencies change, please update LimitLevel values accordingly.
static const LVM_UINT16 EQNB_5BandPresetsFrequencies[] = {
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index fc82dd1..12a038f 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -327,6 +327,7 @@
} \
}
+#if 0
//----------------------------------------------------------------------------
// MonoTo2I_32()
//----------------------------------------------------------------------------
@@ -385,6 +386,7 @@
return;
}
+#endif
static inline int16_t clamp16(int32_t sample)
{
@@ -560,7 +562,6 @@
void Reverb_free(ReverbContext *pContext){
LVREV_ReturnStatus_en LvmStatus=LVREV_SUCCESS; /* Function call status */
- LVREV_ControlParams_st params; /* Control Parameters */
LVREV_MemoryTable_st MemTab;
/* Free the algorithm memory */
@@ -709,8 +710,6 @@
//----------------------------------------------------------------------------
int Reverb_init(ReverbContext *pContext){
- int status;
-
ALOGV("\tReverb_init start");
CHECK_ARG(pContext != NULL);
@@ -1543,7 +1542,6 @@
int status = 0;
int32_t *pParamTemp = (int32_t *)pParam;
int32_t param = *pParamTemp++;
- char *name;
t_reverb_settings *pProperties;
//ALOGV("\tReverb_getParameter start");
@@ -1899,7 +1897,6 @@
uint32_t *replySize,
void *pReplyData){
android::ReverbContext * pContext = (android::ReverbContext *) self;
- int retsize;
LVREV_ControlParams_st ActiveParams; /* Current control Parameters */
LVREV_ReturnStatus_en LvmStatus=LVREV_SUCCESS; /* Function call status */
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
index dff2bc0..0502638 100644
--- a/media/libeffects/preprocessing/Android.mk
+++ b/media/libeffects/preprocessing/Android.mk
@@ -29,6 +29,7 @@
-DWEBRTC_POSIX
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
LOCAL_HEADER_LIBRARIES += libhardware_headers
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index f48bac1..f2844ed 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -531,6 +531,7 @@
break;
case AUDIO_DEVICE_OUT_WIRED_HEADSET:
case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+ case AUDIO_DEVICE_OUT_USB_HEADSET:
default:
break;
}
@@ -933,7 +934,6 @@
int Session_SetConfig(preproc_session_t *session, effect_config_t *config)
{
- uint32_t sr;
uint32_t inCnl = audio_channel_count_from_in_mask(config->inputCfg.channels);
uint32_t outCnl = audio_channel_count_from_in_mask(config->outputCfg.channels);
@@ -1153,7 +1153,6 @@
preproc_session_t *PreProc_GetSession(int32_t procId, int32_t sessionId, int32_t ioId)
{
size_t i;
- int free = -1;
for (i = 0; i < PREPROC_NUM_SESSIONS; i++) {
if (sSessions[i].io == ioId) {
if (sSessions[i].createdMsk & (1 << procId)) {
@@ -1210,7 +1209,6 @@
audio_buffer_t *outBuffer)
{
preproc_effect_t * effect = (preproc_effect_t *)self;
- int status = 0;
if (effect == NULL){
ALOGV("PreProcessingFx_Process() ERROR effect == NULL");
@@ -1402,8 +1400,6 @@
void *pReplyData)
{
preproc_effect_t * effect = (preproc_effect_t *) self;
- int retsize;
- int status;
if (effect == NULL){
return -EINVAL;
@@ -1777,7 +1773,6 @@
audio_buffer_t *outBuffer __unused)
{
preproc_effect_t * effect = (preproc_effect_t *)self;
- int status = 0;
if (effect == NULL){
ALOGW("PreProcessingFx_ProcessReverse() ERROR effect == NULL");
@@ -1926,7 +1921,6 @@
int PreProcessingLib_Release(effect_handle_t interface)
{
- int status;
ALOGV("EffectRelease start %p", interface);
if (PreProc_Init() != 0) {
return sInitStatus;
diff --git a/media/libeffects/proxy/Android.mk b/media/libeffects/proxy/Android.mk
index eeba73f..c4de30d 100644
--- a/media/libeffects/proxy/Android.mk
+++ b/media/libeffects/proxy/Android.mk
@@ -23,6 +23,7 @@
EffectProxy.cpp
LOCAL_CFLAGS+= -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
LOCAL_SHARED_LIBRARIES := liblog libcutils libutils libdl libeffects
diff --git a/media/libeffects/proxy/EffectProxy.cpp b/media/libeffects/proxy/EffectProxy.cpp
index 0eddc15..42e44f0 100644
--- a/media/libeffects/proxy/EffectProxy.cpp
+++ b/media/libeffects/proxy/EffectProxy.cpp
@@ -47,12 +47,6 @@
};
-static const effect_descriptor_t *const gDescriptors[] =
-{
- &gProxyDescriptor,
-};
-
-
int EffectProxyCreate(const effect_uuid_t *uuid,
int32_t sessionId,
int32_t ioId,
@@ -245,6 +239,11 @@
// pCmdData points to a memory holding effect_offload_param_t structure
if (cmdCode == EFFECT_CMD_OFFLOAD) {
ALOGV("Effect_command() cmdCode = EFFECT_CMD_OFFLOAD");
+ if (replySize == NULL || *replySize < sizeof(int)) {
+ ALOGV("effectsOffload: Effect_command: CMD_OFFLOAD has no reply");
+ android_errorWriteLog(0x534e4554, "32448121");
+ return FAILED_TRANSACTION;
+ }
if (cmdSize == 0 || pCmdData == NULL) {
ALOGV("effectsOffload: Effect_command: CMD_OFFLOAD has no data");
*(int*)pReplyData = FAILED_TRANSACTION;
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index 7ec71c9..70409de 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -8,6 +8,7 @@
EffectVisualizer.cpp
LOCAL_CFLAGS+= -O2 -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
LOCAL_SHARED_LIBRARIES := \
libcutils \
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 6a126ef..0e82339 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -237,7 +237,6 @@
int32_t /*ioId*/,
effect_handle_t *pHandle) {
int ret;
- int i;
if (pHandle == NULL || uuid == NULL) {
return -EINVAL;
@@ -419,7 +418,6 @@
void *pCmdData, uint32_t *replySize, void *pReplyData) {
VisualizerContext * pContext = (VisualizerContext *)self;
- int retsize;
if (pContext == NULL || pContext->mState == VISUALIZER_STATE_UNINITIALIZED) {
return -EINVAL;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 205b3cd..e9b99b4 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -1,20 +1,39 @@
-cc_library_static {
+cc_library_headers {
+ name: "libmedia_headers",
+ vendor_available: true,
+ export_include_dirs: ["include"],
+}
+
+cc_library {
name: "libmedia_helper",
- srcs: ["AudioParameter.cpp"],
+ vendor_available: true,
+ srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
cflags: [
"-Werror",
"-Wno-error=deprecated-declarations",
"-Wall",
],
+ shared: {
+ shared_libs: ["libutils", "liblog"],
+ },
clang: true,
}
cc_library_shared {
name: "libmedia",
+ aidl: {
+ local_include_dirs: ["aidl"],
+ export_aidl_headers: true,
+ },
+
srcs: [
+ "aidl/android/IGraphicBufferSource.aidl",
+ "aidl/android/IOMXBufferSource.aidl",
+
"IDataSource.cpp",
"IHDCP.cpp",
+ "BufferingSettings.cpp",
"mediaplayer.cpp",
"IMediaCodecList.cpp",
"IMediaCodecService.cpp",
@@ -33,6 +52,7 @@
"IResourceManagerClient.cpp",
"IResourceManagerService.cpp",
"IStreamSource.cpp",
+ "MediaCodecBuffer.cpp",
"MediaCodecInfo.cpp",
"MediaDefs.cpp",
"MediaUtils.cpp",
@@ -40,6 +60,7 @@
"mediarecorder.cpp",
"IMediaMetadataRetriever.cpp",
"mediametadataretriever.cpp",
+ "MidiDeviceInfo.cpp",
"MidiIoWrapper.cpp",
"JetPlayer.cpp",
"IOMX.cpp",
@@ -50,8 +71,14 @@
"MediaProfiles.cpp",
"MediaResource.cpp",
"MediaResourcePolicy.cpp",
+ "OMXBuffer.cpp",
"Visualizer.cpp",
"StringArray.cpp",
+ "omx/1.0/WGraphicBufferSource.cpp",
+ "omx/1.0/WOmx.cpp",
+ "omx/1.0/WOmxBufferSource.cpp",
+ "omx/1.0/WOmxNode.cpp",
+ "omx/1.0/WOmxObserver.cpp",
],
shared_libs: [
@@ -70,18 +97,33 @@
"libdl",
"libaudioutils",
"libaudioclient",
+ "libmedia_helper",
+ "libmediadrm",
+ "libmediametrics",
+ "libbase",
+ "libhidlbase",
+ "libhidltransport",
+ "libhwbinder",
+ "libhidlmemory",
+ "android.hidl.memory@1.0",
+ "android.hidl.token@1.0-utils",
+ "android.hardware.graphics.common@1.0",
+ "android.hardware.graphics.bufferqueue@1.0",
+ "android.hardware.media@1.0",
+ "android.hardware.media.omx@1.0",
],
- header_libs: ["libaudioeffects"],
-
export_shared_lib_headers: [
"libbinder",
"libicuuc",
"libicui18n",
+ "libsonivox",
+ "libmediadrm",
+ "android.hidl.token@1.0-utils",
+ "android.hardware.media.omx@1.0",
+ "android.hidl.memory@1.0",
],
- whole_static_libs: ["libmedia_helper"],
-
// for memory heap analysis
static_libs: [
"libc_malloc_debug_backtrace",
@@ -93,6 +135,10 @@
"frameworks/av/media/libstagefright",
],
+ export_include_dirs: [
+ "aidl",
+ "include",
+ ],
cflags: [
"-Werror",
"-Wno-error=deprecated-declarations",
@@ -104,5 +150,9 @@
"unsigned-integer-overflow",
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 8c8cf45..65fc70b 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -19,8 +19,8 @@
#include <utils/Log.h>
-#include <hardware/audio.h>
#include <media/AudioParameter.h>
+#include <system/audio.h>
namespace android {
@@ -32,6 +32,19 @@
const char * const AudioParameter::keyFrameCount = AUDIO_PARAMETER_STREAM_FRAME_COUNT;
const char * const AudioParameter::keyInputSource = AUDIO_PARAMETER_STREAM_INPUT_SOURCE;
const char * const AudioParameter::keyScreenState = AUDIO_PARAMETER_KEY_SCREEN_STATE;
+const char * const AudioParameter::keyBtNrec = AUDIO_PARAMETER_KEY_BT_NREC;
+const char * const AudioParameter::keyHwAvSync = AUDIO_PARAMETER_HW_AV_SYNC;
+const char * const AudioParameter::keyMonoOutput = AUDIO_PARAMETER_MONO_OUTPUT;
+const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
+const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
+const char * const AudioParameter::keyStreamDisconnect = AUDIO_PARAMETER_DEVICE_DISCONNECT;
+const char * const AudioParameter::keyStreamSupportedFormats = AUDIO_PARAMETER_STREAM_SUP_FORMATS;
+const char * const AudioParameter::keyStreamSupportedChannels = AUDIO_PARAMETER_STREAM_SUP_CHANNELS;
+const char * const AudioParameter::keyStreamSupportedSamplingRates =
+ AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES;
+const char * const AudioParameter::valueOn = AUDIO_PARAMETER_VALUE_ON;
+const char * const AudioParameter::valueOff = AUDIO_PARAMETER_VALUE_OFF;
+const char * const AudioParameter::valueListSeparator = AUDIO_PARAMETER_VALUE_LIST_SEPARATOR;
AudioParameter::AudioParameter(const String8& keyValuePairs)
{
@@ -70,15 +83,17 @@
mParameters.clear();
}
-String8 AudioParameter::toString()
+String8 AudioParameter::toStringImpl(bool useValues) const
{
String8 str = String8("");
size_t size = mParameters.size();
for (size_t i = 0; i < size; i++) {
str += mParameters.keyAt(i);
- str += "=";
- str += mParameters.valueAt(i);
+ if (useValues) {
+ str += "=";
+ str += mParameters.valueAt(i);
+ }
if (i < (size - 1)) str += ";";
}
return str;
@@ -95,6 +110,11 @@
}
}
+status_t AudioParameter::addKey(const String8& key)
+{
+ return add(key, String8());
+}
+
status_t AudioParameter::addInt(const String8& key, const int value)
{
char str[12];
@@ -127,7 +147,7 @@
}
}
-status_t AudioParameter::get(const String8& key, String8& value)
+status_t AudioParameter::get(const String8& key, String8& value) const
{
if (mParameters.indexOfKey(key) >= 0) {
value = mParameters.valueFor(key);
@@ -137,7 +157,7 @@
}
}
-status_t AudioParameter::getInt(const String8& key, int& value)
+status_t AudioParameter::getInt(const String8& key, int& value) const
{
String8 str8;
status_t result = get(key, str8);
@@ -153,7 +173,7 @@
return result;
}
-status_t AudioParameter::getFloat(const String8& key, float& value)
+status_t AudioParameter::getFloat(const String8& key, float& value) const
{
String8 str8;
status_t result = get(key, str8);
@@ -169,7 +189,17 @@
return result;
}
-status_t AudioParameter::getAt(size_t index, String8& key, String8& value)
+status_t AudioParameter::getAt(size_t index, String8& key) const
+{
+ if (mParameters.size() > index) {
+ key = mParameters.keyAt(index);
+ return NO_ERROR;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+status_t AudioParameter::getAt(size_t index, String8& key, String8& value) const
{
if (mParameters.size() > index) {
key = mParameters.keyAt(index);
diff --git a/media/libmedia/BufferingSettings.cpp b/media/libmedia/BufferingSettings.cpp
new file mode 100644
index 0000000..a69497e
--- /dev/null
+++ b/media/libmedia/BufferingSettings.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BufferingSettings"
+//#define LOG_NDEBUG 0
+
+#include <binder/Parcel.h>
+
+#include <media/BufferingSettings.h>
+
+namespace android {
+
+// static
+bool BufferingSettings::IsValidBufferingMode(int mode) {
+ return (mode >= BUFFERING_MODE_NONE && mode < BUFFERING_MODE_COUNT);
+}
+
+// static
+bool BufferingSettings::IsTimeBasedBufferingMode(int mode) {
+ return (mode == BUFFERING_MODE_TIME_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
+}
+
+// static
+bool BufferingSettings::IsSizeBasedBufferingMode(int mode) {
+ return (mode == BUFFERING_MODE_SIZE_ONLY || mode == BUFFERING_MODE_TIME_THEN_SIZE);
+}
+
+BufferingSettings::BufferingSettings()
+ : mInitialBufferingMode(BUFFERING_MODE_NONE),
+ mRebufferingMode(BUFFERING_MODE_NONE),
+ mInitialWatermarkMs(kNoWatermark),
+ mInitialWatermarkKB(kNoWatermark),
+ mRebufferingWatermarkLowMs(kNoWatermark),
+ mRebufferingWatermarkHighMs(kNoWatermark),
+ mRebufferingWatermarkLowKB(kNoWatermark),
+ mRebufferingWatermarkHighKB(kNoWatermark) { }
+
+status_t BufferingSettings::readFromParcel(const Parcel* parcel) {
+ if (parcel == nullptr) {
+ return BAD_VALUE;
+ }
+ mInitialBufferingMode = (BufferingMode)parcel->readInt32();
+ mRebufferingMode = (BufferingMode)parcel->readInt32();
+ mInitialWatermarkMs = parcel->readInt32();
+ mInitialWatermarkKB = parcel->readInt32();
+ mRebufferingWatermarkLowMs = parcel->readInt32();
+ mRebufferingWatermarkHighMs = parcel->readInt32();
+ mRebufferingWatermarkLowKB = parcel->readInt32();
+ mRebufferingWatermarkHighKB = parcel->readInt32();
+
+ return OK;
+}
+
+status_t BufferingSettings::writeToParcel(Parcel* parcel) const {
+ if (parcel == nullptr) {
+ return BAD_VALUE;
+ }
+ parcel->writeInt32(mInitialBufferingMode);
+ parcel->writeInt32(mRebufferingMode);
+ parcel->writeInt32(mInitialWatermarkMs);
+ parcel->writeInt32(mInitialWatermarkKB);
+ parcel->writeInt32(mRebufferingWatermarkLowMs);
+ parcel->writeInt32(mRebufferingWatermarkHighMs);
+ parcel->writeInt32(mRebufferingWatermarkLowKB);
+ parcel->writeInt32(mRebufferingWatermarkHighKB);
+
+ return OK;
+}
+
+String8 BufferingSettings::toString() const {
+ String8 s;
+ s.appendFormat("initialMode(%d), rebufferingMode(%d), "
+ "initialMarks(%d ms, %d KB), rebufferingMarks(%d, %d)ms, (%d, %d)KB",
+ mInitialBufferingMode, mRebufferingMode,
+ mInitialWatermarkMs, mInitialWatermarkKB,
+ mRebufferingWatermarkLowMs, mRebufferingWatermarkHighMs,
+ mRebufferingWatermarkLowKB, mRebufferingWatermarkHighKB);
+ return s;
+}
+
+} // namespace android
diff --git a/media/libmedia/IDataSource.cpp b/media/libmedia/IDataSource.cpp
index 7df3b65..31c85af 100644
--- a/media/libmedia/IDataSource.cpp
+++ b/media/libmedia/IDataSource.cpp
@@ -55,8 +55,16 @@
data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
data.writeInt64(offset);
data.writeInt64(size);
- remote()->transact(READ_AT, data, &reply);
- return reply.readInt64();
+ status_t err = remote()->transact(READ_AT, data, &reply);
+ if (err != OK) {
+ return err;
+ }
+ int64_t value = 0;
+ err = reply.readInt64(&value);
+ if (err != OK) {
+ return err;
+ }
+ return (ssize_t)value;
}
virtual status_t getSize(off64_t* size) {
diff --git a/media/libmedia/IMediaCodecService.cpp b/media/libmedia/IMediaCodecService.cpp
index dcf2b27..2d62419 100644
--- a/media/libmedia/IMediaCodecService.cpp
+++ b/media/libmedia/IMediaCodecService.cpp
@@ -33,7 +33,7 @@
class BpMediaCodecService : public BpInterface<IMediaCodecService>
{
public:
- BpMediaCodecService(const sp<IBinder>& impl)
+ explicit BpMediaCodecService(const sp<IBinder>& impl)
: BpInterface<IMediaCodecService>(impl)
{
}
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index 4be1118..f08fabb 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -21,6 +21,7 @@
#include <stdint.h>
#include <sys/types.h>
+#include <android/media/ICas.h>
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
#include <media/IMediaExtractor.h>
@@ -34,16 +35,16 @@
GETTRACKMETADATA,
GETMETADATA,
FLAGS,
- SETDRMFLAG,
- GETDRMFLAG,
GETDRMTRACKINFO,
+ SETMEDIACAS,
SETUID,
- NAME
+ NAME,
+ GETMETRICS
};
class BpMediaExtractor : public BpInterface<IMediaExtractor> {
public:
- BpMediaExtractor(const sp<IBinder>& impl)
+ explicit BpMediaExtractor(const sp<IBinder>& impl)
: BpInterface<IMediaExtractor>(impl)
{
}
@@ -96,22 +97,40 @@
return NULL;
}
+ virtual status_t getMetrics(Parcel * reply) {
+ Parcel data;
+ data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+ status_t ret = remote()->transact(GETMETRICS, data, reply);
+ if (ret == NO_ERROR) {
+ return OK;
+ }
+ return UNKNOWN_ERROR;
+ }
+
virtual uint32_t flags() const {
ALOGV("flags NOT IMPLEMENTED");
return 0;
}
- virtual void setDrmFlag(bool flag __unused) {
- ALOGV("setDrmFlag NOT IMPLEMENTED");
- }
- virtual bool getDrmFlag() {
- ALOGV("getDrmFlag NOT IMPLEMENTED");
- return false;
- }
virtual char* getDrmTrackInfo(size_t trackID __unused, int *len __unused) {
ALOGV("getDrmTrackInfo NOT IMPLEMENTED");
return NULL;
}
+
+ virtual status_t setMediaCas(const sp<ICas> & cas) {
+ ALOGV("setMediaCas");
+
+ Parcel data, reply;
+ data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+ data.writeStrongBinder(IInterface::asBinder(cas));
+
+ status_t err = remote()->transact(SETMEDIACAS, data, &reply);
+ if (err != NO_ERROR) {
+ return err;
+ }
+ return reply.readInt32();
+ }
+
virtual void setUID(uid_t uid __unused) {
ALOGV("setUID NOT IMPLEMENTED");
}
@@ -178,6 +197,26 @@
}
return UNKNOWN_ERROR;
}
+ case GETMETRICS: {
+ CHECK_INTERFACE(IMediaExtractor, data, reply);
+ status_t ret = getMetrics(reply);
+ return ret;
+ }
+ case SETMEDIACAS: {
+ ALOGV("setMediaCas");
+ CHECK_INTERFACE(IMediaExtractor, data, reply);
+
+ sp<IBinder> casBinder;
+ status_t err = data.readNullableStrongBinder(&casBinder);
+ if (err != NO_ERROR) {
+ ALOGE("Error reading cas from parcel");
+ return err;
+ }
+ sp<ICas> cas = interface_cast<ICas>(casBinder);
+
+ reply->writeInt32(setMediaCas(cas));
+ return OK;
+ }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaExtractorService.cpp b/media/libmedia/IMediaExtractorService.cpp
index d170c22..7c0d08d 100644
--- a/media/libmedia/IMediaExtractorService.cpp
+++ b/media/libmedia/IMediaExtractorService.cpp
@@ -23,17 +23,19 @@
#include <sys/types.h>
#include <binder/Parcel.h>
#include <media/IMediaExtractorService.h>
+#include <media/stagefright/MediaExtractor.h>
namespace android {
enum {
- MAKE_EXTRACTOR = IBinder::FIRST_CALL_TRANSACTION
+ MAKE_EXTRACTOR = IBinder::FIRST_CALL_TRANSACTION,
+ MAKE_IDATA_SOURCE_FD,
};
class BpMediaExtractorService : public BpInterface<IMediaExtractorService>
{
public:
- BpMediaExtractorService(const sp<IBinder>& impl)
+ explicit BpMediaExtractorService(const sp<IBinder>& impl)
: BpInterface<IMediaExtractorService>(impl)
{
}
@@ -52,6 +54,21 @@
return NULL;
}
+ virtual sp<IDataSource> makeIDataSource(int fd, int64_t offset, int64_t length)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaExtractorService::getInterfaceDescriptor());
+ data.writeFileDescriptor(fd);
+ data.writeInt64(offset);
+ data.writeInt64(length);
+ status_t ret = remote()->transact(MAKE_IDATA_SOURCE_FD, data, &reply);
+ ALOGV("fd:%d offset:%lld length:%lld ret:%d",
+ fd, (long long)offset, (long long)length, ret);
+ if (ret == NO_ERROR) {
+ return interface_cast<IDataSource>(reply.readStrongBinder());
+ }
+ return nullptr;
+ }
};
IMPLEMENT_META_INTERFACE(MediaExtractorService, "android.media.IMediaExtractorService");
@@ -80,6 +97,23 @@
reply->writeStrongBinder(IInterface::asBinder(ex));
return NO_ERROR;
}
+
+ case MAKE_IDATA_SOURCE_FD: {
+ CHECK_INTERFACE(IMediaExtractorService, data, reply);
+ const int fd = dup(data.readFileDescriptor()); // -1 fd checked in makeIDataSource
+ const int64_t offset = data.readInt64();
+ const int64_t length = data.readInt64();
+ ALOGV("fd %d offset%lld length:%lld", fd, (long long)offset, (long long)length);
+ sp<IDataSource> source = makeIDataSource(fd, offset, length);
+ reply->writeStrongBinder(IInterface::asBinder(source));
+ // The FileSource closes the descriptor, so if it is not created
+ // we need to close the descriptor explicitly.
+ if (source.get() == nullptr && fd != -1) {
+ close(fd);
+ }
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
index e4b717b..1bb8d67 100644
--- a/media/libmedia/IMediaHTTPConnection.cpp
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -124,6 +124,14 @@
ALOGE("got %zu, but memory has %zu", len, mMemory->size());
return ERROR_OUT_OF_RANGE;
}
+ if(buffer == NULL) {
+ ALOGE("readAt got a NULL buffer");
+ return UNKNOWN_ERROR;
+ }
+ if (mMemory->pointer() == NULL) {
+ ALOGE("readAt got a NULL mMemory->pointer()");
+ return UNKNOWN_ERROR;
+ }
memcpy(buffer, mMemory->pointer(), len);
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index f8345e4..3996227 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -23,6 +23,7 @@
#include <media/AudioResamplerPublic.h>
#include <media/AVSyncSettings.h>
+#include <media/BufferingSettings.h>
#include <media/IDataSource.h>
#include <media/IMediaHTTPService.h>
@@ -40,6 +41,8 @@
SET_DATA_SOURCE_FD,
SET_DATA_SOURCE_STREAM,
SET_DATA_SOURCE_CALLBACK,
+ SET_BUFFERING_SETTINGS,
+ GET_DEFAULT_BUFFERING_SETTINGS,
PREPARE_ASYNC,
START,
STOP,
@@ -67,8 +70,25 @@
SET_RETRANSMIT_ENDPOINT,
GET_RETRANSMIT_ENDPOINT,
SET_NEXT_PLAYER,
+ APPLY_VOLUME_SHAPER,
+ GET_VOLUME_SHAPER_STATE,
+ // Modular DRM
+ PREPARE_DRM,
+ RELEASE_DRM,
};
+// ModDrm helpers
+static void readVector(const Parcel& reply, Vector<uint8_t>& vector) {
+ uint32_t size = reply.readUint32();
+ vector.insertAt((size_t)0, size);
+ reply.read(vector.editArray(), size);
+}
+
+static void writeVector(Parcel& data, Vector<uint8_t> const& vector) {
+ data.writeUint32(vector.size());
+ data.write(vector.array(), vector.size());
+}
+
class BpMediaPlayer: public BpInterface<IMediaPlayer>
{
public:
@@ -148,6 +168,30 @@
return reply.readInt32();
}
+ status_t setBufferingSettings(const BufferingSettings& buffering)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ buffering.writeToParcel(&data);
+ remote()->transact(SET_BUFFERING_SETTINGS, data, &reply);
+ return reply.readInt32();
+ }
+
+ status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
+ {
+ if (buffering == nullptr) {
+ return BAD_VALUE;
+ }
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ remote()->transact(GET_DEFAULT_BUFFERING_SETTINGS, data, &reply);
+ status_t err = reply.readInt32();
+ if (err == OK) {
+ err = buffering->readFromParcel(&reply);
+ }
+ return err;
+ }
+
status_t prepareAsync()
{
Parcel data, reply;
@@ -246,11 +290,12 @@
return reply.readInt32();
}
- status_t seekTo(int msec)
+ status_t seekTo(int msec, MediaPlayerSeekMode mode)
{
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
data.writeInt32(msec);
+ data.writeInt32(mode);
remote()->transact(SEEK_TO, data, &reply);
return reply.readInt32();
}
@@ -419,6 +464,89 @@
return err;
}
+
+ virtual VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+ status_t tmp;
+ status_t status = configuration.get() == nullptr
+ ? data.writeInt32(0)
+ : (tmp = data.writeInt32(1)) != NO_ERROR
+ ? tmp : configuration->writeToParcel(&data);
+ if (status != NO_ERROR) {
+ return VolumeShaper::Status(status);
+ }
+
+ status = operation.get() == nullptr
+ ? status = data.writeInt32(0)
+ : (tmp = data.writeInt32(1)) != NO_ERROR
+ ? tmp : operation->writeToParcel(&data);
+ if (status != NO_ERROR) {
+ return VolumeShaper::Status(status);
+ }
+
+ int32_t remoteVolumeShaperStatus;
+ status = remote()->transact(APPLY_VOLUME_SHAPER, data, &reply);
+ if (status == NO_ERROR) {
+ status = reply.readInt32(&remoteVolumeShaperStatus);
+ }
+ if (status != NO_ERROR) {
+ return VolumeShaper::Status(status);
+ }
+ return VolumeShaper::Status(remoteVolumeShaperStatus);
+ }
+
+ virtual sp<VolumeShaper::State> getVolumeShaperState(int id) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+ data.writeInt32(id);
+ status_t status = remote()->transact(GET_VOLUME_SHAPER_STATE, data, &reply);
+ if (status != NO_ERROR) {
+ return nullptr;
+ }
+ sp<VolumeShaper::State> state = new VolumeShaper::State();
+ status = state->readFromParcel(reply);
+ if (status != NO_ERROR) {
+ return nullptr;
+ }
+ return state;
+ }
+
+ // Modular DRM
+ status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+ data.write(uuid, 16);
+ writeVector(data, drmSessionId);
+
+ status_t status = remote()->transact(PREPARE_DRM, data, &reply);
+ if (status != OK) {
+ ALOGE("prepareDrm: binder call failed: %d", status);
+ return status;
+ }
+
+ return reply.readInt32();
+ }
+
+ status_t releaseDrm()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(RELEASE_DRM, data, &reply);
+ if (status != OK) {
+ ALOGE("releaseDrm: binder call failed: %d", status);
+ return status;
+ }
+
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
@@ -496,6 +624,23 @@
reply->writeInt32(setVideoSurfaceTexture(bufferProducer));
return NO_ERROR;
} break;
+ case SET_BUFFERING_SETTINGS: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ BufferingSettings buffering;
+ buffering.readFromParcel(&data);
+ reply->writeInt32(setBufferingSettings(buffering));
+ return NO_ERROR;
+ } break;
+ case GET_DEFAULT_BUFFERING_SETTINGS: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ BufferingSettings buffering;
+ status_t err = getDefaultBufferingSettings(&buffering);
+ reply->writeInt32(err);
+ if (err == OK) {
+ buffering.writeToParcel(reply);
+ }
+ return NO_ERROR;
+ } break;
case PREPARE_ASYNC: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
reply->writeInt32(prepareAsync());
@@ -573,7 +718,9 @@
} break;
case SEEK_TO: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
- reply->writeInt32(seekTo(data.readInt32()));
+ int msec = data.readInt32();
+ MediaPlayerSeekMode mode = (MediaPlayerSeekMode)data.readInt32();
+ reply->writeInt32(seekTo(msec, mode));
return NO_ERROR;
} break;
case GET_CURRENT_POSITION: {
@@ -694,6 +841,64 @@
return NO_ERROR;
} break;
+
+ case APPLY_VOLUME_SHAPER: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ sp<VolumeShaper::Configuration> configuration;
+ sp<VolumeShaper::Operation> operation;
+
+ int32_t present;
+ status_t status = data.readInt32(&present);
+ if (status == NO_ERROR && present != 0) {
+ configuration = new VolumeShaper::Configuration();
+ status = configuration->readFromParcel(data);
+ }
+ if (status == NO_ERROR) {
+ status = data.readInt32(&present);
+ }
+ if (status == NO_ERROR && present != 0) {
+ operation = new VolumeShaper::Operation();
+ status = operation->readFromParcel(data);
+ }
+ if (status == NO_ERROR) {
+ status = (status_t)applyVolumeShaper(configuration, operation);
+ }
+ reply->writeInt32(status);
+ return NO_ERROR;
+ } break;
+ case GET_VOLUME_SHAPER_STATE: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ int id;
+ status_t status = data.readInt32(&id);
+ if (status == NO_ERROR) {
+ sp<VolumeShaper::State> state = getVolumeShaperState(id);
+ if (state.get() != nullptr) {
+ status = state->writeToParcel(reply);
+ }
+ }
+ return NO_ERROR;
+ } break;
+
+ // Modular DRM
+ case PREPARE_DRM: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+
+ uint8_t uuid[16];
+ data.read(uuid, sizeof(uuid));
+ Vector<uint8_t> drmSessionId;
+ readVector(data, drmSessionId);
+
+ uint32_t result = prepareDrm(uuid, drmSessionId);
+ reply->writeInt32(result);
+ return OK;
+ }
+ case RELEASE_DRM: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+
+ uint32_t result = releaseDrm();
+ reply->writeInt32(result);
+ return OK;
+ }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index a6860e2..5282352 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -29,6 +29,7 @@
#include <media/IMediaRecorder.h>
#include <gui/Surface.h>
#include <gui/IGraphicBufferProducer.h>
+#include <media/stagefright/PersistentSurface.h>
namespace android {
@@ -49,6 +50,7 @@
SET_VIDEO_ENCODER,
SET_AUDIO_ENCODER,
SET_OUTPUT_FILE_FD,
+ SET_NEXT_OUTPUT_FILE_FD,
SET_VIDEO_SIZE,
SET_VIDEO_FRAMERATE,
SET_PARAMETERS,
@@ -57,7 +59,9 @@
SET_LISTENER,
SET_CLIENT_NAME,
PAUSE,
- RESUME
+ RESUME,
+ GET_METRICS,
+
};
class BpMediaRecorder: public BpInterface<IMediaRecorder>
@@ -79,12 +83,12 @@
return reply.readInt32();
}
- status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface)
+ status_t setInputSurface(const sp<PersistentSurface>& surface)
{
ALOGV("setInputSurface(%p)", surface.get());
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(surface));
+ surface->writeToParcel(&data);
remote()->transact(SET_INPUT_SURFACE, data, &reply);
return reply.readInt32();
}
@@ -171,17 +175,24 @@
return reply.readInt32();
}
- status_t setOutputFile(int fd, int64_t offset, int64_t length) {
- ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
+ status_t setOutputFile(int fd) {
+ ALOGV("setOutputFile(%d)", fd);
Parcel data, reply;
data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
data.writeFileDescriptor(fd);
- data.writeInt64(offset);
- data.writeInt64(length);
remote()->transact(SET_OUTPUT_FILE_FD, data, &reply);
return reply.readInt32();
}
+ status_t setNextOutputFile(int fd) {
+ ALOGV("setNextOutputFile(%d)", fd);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ data.writeFileDescriptor(fd);
+ remote()->transact(SET_NEXT_OUTPUT_FILE_FD, data, &reply);
+ return reply.readInt32();
+ }
+
status_t setVideoSize(int width, int height)
{
ALOGV("setVideoSize(%dx%d)", width, height);
@@ -252,6 +263,18 @@
return reply.readInt32();
}
+ status_t getMetrics(Parcel* reply)
+ {
+ ALOGV("getMetrics");
+ Parcel data;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ status_t ret = remote()->transact(GET_METRICS, data, reply);
+ if (ret == NO_ERROR) {
+ return OK;
+ }
+ return UNKNOWN_ERROR;
+ }
+
status_t start()
{
ALOGV("start");
@@ -388,6 +411,11 @@
reply->writeInt32(ret);
return NO_ERROR;
} break;
+ case GET_METRICS: {
+ ALOGV("GET_METRICS");
+ status_t ret = getMetrics(reply);
+ return ret;
+ } break;
case SET_VIDEO_SOURCE: {
ALOGV("SET_VIDEO_SOURCE");
CHECK_INTERFACE(IMediaRecorder, data, reply);
@@ -428,9 +456,15 @@
ALOGV("SET_OUTPUT_FILE_FD");
CHECK_INTERFACE(IMediaRecorder, data, reply);
int fd = dup(data.readFileDescriptor());
- int64_t offset = data.readInt64();
- int64_t length = data.readInt64();
- reply->writeInt32(setOutputFile(fd, offset, length));
+ reply->writeInt32(setOutputFile(fd));
+ ::close(fd);
+ return NO_ERROR;
+ } break;
+ case SET_NEXT_OUTPUT_FILE_FD: {
+ ALOGV("SET_NEXT_OUTPUT_FILE_FD");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ int fd = dup(data.readFileDescriptor());
+ reply->writeInt32(setNextOutputFile(fd));
::close(fd);
return NO_ERROR;
} break;
@@ -490,8 +524,8 @@
case SET_INPUT_SURFACE: {
ALOGV("SET_INPUT_SURFACE");
CHECK_INTERFACE(IMediaRecorder, data, reply);
- sp<IGraphicBufferConsumer> surface = interface_cast<IGraphicBufferConsumer>(
- data.readStrongBinder());
+ sp<PersistentSurface> surface = new PersistentSurface();
+ surface->readFromParcel(&data);
reply->writeInt32(setInputSurface(surface));
return NO_ERROR;
} break;
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 595bad9..724b3a0 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -67,7 +67,7 @@
class BpMediaSource : public BpInterface<IMediaSource> {
public:
- BpMediaSource(const sp<IBinder>& impl)
+ explicit BpMediaSource(const sp<IBinder>& impl)
: BpInterface<IMediaSource>(impl), mBuffersSinceStop(0)
{
}
@@ -389,7 +389,7 @@
}
}
if (transferBuf != nullptr) { // Using shared buffers.
- if (!transferBuf->isObserved()) {
+ if (!transferBuf->isObserved() && transferBuf != buf) {
// Transfer buffer must be part of a MediaBufferGroup.
ALOGV("adding shared memory buffer %p to local group", transferBuf);
mGroup->add_buffer(transferBuf);
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index beca464..43130eb 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -25,43 +25,41 @@
#include <media/IOMX.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/openmax/OMX_IndexExt.h>
+#include <media/OMXBuffer.h>
#include <utils/NativeHandle.h>
+#include <gui/IGraphicBufferProducer.h>
+
+#include <omx/1.0/WOmxNode.h>
+#include <android/IGraphicBufferSource.h>
+#include <android/IOMXBufferSource.h>
namespace android {
enum {
CONNECT = IBinder::FIRST_CALL_TRANSACTION,
- LIVES_LOCALLY,
LIST_NODES,
ALLOCATE_NODE,
+ CREATE_INPUT_SURFACE,
FREE_NODE,
SEND_COMMAND,
GET_PARAMETER,
SET_PARAMETER,
GET_CONFIG,
SET_CONFIG,
- GET_STATE,
- ENABLE_NATIVE_BUFFERS,
- USE_BUFFER,
- USE_GRAPHIC_BUFFER,
- CREATE_INPUT_SURFACE,
- CREATE_PERSISTENT_INPUT_SURFACE,
+ SET_PORT_MODE,
SET_INPUT_SURFACE,
- SIGNAL_END_OF_INPUT_STREAM,
- STORE_META_DATA_IN_BUFFERS,
PREPARE_FOR_ADAPTIVE_PLAYBACK,
ALLOC_SECURE_BUFFER,
- ALLOC_BUFFER_WITH_BACKUP,
+ USE_BUFFER,
FREE_BUFFER,
FILL_BUFFER,
EMPTY_BUFFER,
GET_EXTENSION_INDEX,
OBSERVER_ON_MSG,
GET_GRAPHIC_BUFFER_USAGE,
- SET_INTERNAL_OPTION,
- UPDATE_GRAPHIC_BUFFER_IN_META,
CONFIGURE_VIDEO_TUNNEL_MODE,
- UPDATE_NATIVE_HANDLE_IN_META,
+ DISPATCH_MESSAGE,
+ SET_QUIRKS,
};
class BpOMX : public BpInterface<IOMX> {
@@ -70,16 +68,6 @@
: BpInterface<IOMX>(impl) {
}
- virtual bool livesLocally(node_id node, pid_t pid) {
- Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(pid);
- remote()->transact(LIVES_LOCALLY, data, &reply);
-
- return reply.readInt32() != 0;
- }
-
virtual status_t listNodes(List<ComponentInfo> *list) {
list->clear();
@@ -104,8 +92,7 @@
virtual status_t allocateNode(
const char *name, const sp<IOMXObserver> &observer,
- sp<IBinder> *nodeBinder,
- node_id *node) {
+ sp<IOMXNode> *omxNode) {
Parcel data, reply;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
data.writeCString(name);
@@ -114,31 +101,58 @@
status_t err = reply.readInt32();
if (err == OK) {
- *node = (node_id)reply.readInt32();
- if (nodeBinder != NULL) {
- *nodeBinder = remote();
- }
+ *omxNode = IOMXNode::asInterface(reply.readStrongBinder());
} else {
- *node = 0;
+ omxNode->clear();
}
return err;
}
- virtual status_t freeNode(node_id node) {
+ virtual status_t createInputSurface(
+ sp<IGraphicBufferProducer> *bufferProducer,
+ sp<IGraphicBufferSource> *bufferSource) {
Parcel data, reply;
+ status_t err;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ err = remote()->transact(CREATE_INPUT_SURFACE, data, &reply);
+ if (err != OK) {
+ ALOGW("binder transaction failed: %d", err);
+ return err;
+ }
+
+ err = reply.readInt32();
+ if (err != OK) {
+ return err;
+ }
+
+ *bufferProducer = IGraphicBufferProducer::asInterface(
+ reply.readStrongBinder());
+ *bufferSource = IGraphicBufferSource::asInterface(
+ reply.readStrongBinder());
+
+ return err;
+ }
+};
+
+class BpOMXNode : public BpInterface<IOMXNode> {
+public:
+ explicit BpOMXNode(const sp<IBinder> &impl)
+ : BpInterface<IOMXNode>(impl) {
+ }
+
+ virtual status_t freeNode() {
+ Parcel data, reply;
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
remote()->transact(FREE_NODE, data, &reply);
return reply.readInt32();
}
virtual status_t sendCommand(
- node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
+ OMX_COMMANDTYPE cmd, OMX_S32 param) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(cmd);
data.writeInt32(param);
remote()->transact(SEND_COMMAND, data, &reply);
@@ -147,11 +161,10 @@
}
virtual status_t getParameter(
- node_id node, OMX_INDEXTYPE index,
+ OMX_INDEXTYPE index,
void *params, size_t size) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(index);
data.writeInt64(size);
data.write(params, size);
@@ -168,11 +181,10 @@
}
virtual status_t setParameter(
- node_id node, OMX_INDEXTYPE index,
+ OMX_INDEXTYPE index,
const void *params, size_t size) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(index);
data.writeInt64(size);
data.write(params, size);
@@ -182,11 +194,10 @@
}
virtual status_t getConfig(
- node_id node, OMX_INDEXTYPE index,
+ OMX_INDEXTYPE index,
void *params, size_t size) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(index);
data.writeInt64(size);
data.write(params, size);
@@ -203,11 +214,10 @@
}
virtual status_t setConfig(
- node_id node, OMX_INDEXTYPE index,
+ OMX_INDEXTYPE index,
const void *params, size_t size) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(index);
data.writeInt64(size);
data.write(params, size);
@@ -216,36 +226,21 @@
return reply.readInt32();
}
- virtual status_t getState(
- node_id node, OMX_STATETYPE* state) {
+ virtual status_t setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- remote()->transact(GET_STATE, data, &reply);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
+ data.writeInt32(port_index);
+ data.writeInt32(mode);
+ remote()->transact(SET_PORT_MODE, data, &reply);
- *state = static_cast<OMX_STATETYPE>(reply.readInt32());
return reply.readInt32();
}
- virtual status_t enableNativeBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
- Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.writeInt32((uint32_t)graphic);
- data.writeInt32((uint32_t)enable);
- remote()->transact(ENABLE_NATIVE_BUFFERS, data, &reply);
-
- status_t err = reply.readInt32();
- return err;
- }
-
virtual status_t getGraphicBufferUsage(
- node_id node, OMX_U32 port_index, OMX_U32* usage) {
+ OMX_U32 port_index, OMX_U32* usage) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(port_index);
remote()->transact(GET_GRAPHIC_BUFFER_USAGE, data, &reply);
@@ -255,17 +250,19 @@
}
virtual status_t useBuffer(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize) {
+ OMX_U32 port_index, const OMXBuffer &omxBuf, buffer_id *buffer) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(port_index);
- data.writeStrongBinder(IInterface::asBinder(params));
- data.writeInt32(allottedSize);
+
+ status_t err = omxBuf.writeToParcel(&data);
+ if (err != OK) {
+ return err;
+ }
+
remote()->transact(USE_BUFFER, data, &reply);
- status_t err = reply.readInt32();
+ err = reply.readInt32();
if (err != OK) {
*buffer = 0;
@@ -277,185 +274,30 @@
return err;
}
-
- virtual status_t useGraphicBuffer(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
- Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.write(*graphicBuffer);
- remote()->transact(USE_GRAPHIC_BUFFER, data, &reply);
-
- status_t err = reply.readInt32();
- if (err != OK) {
- *buffer = 0;
-
- return err;
- }
-
- *buffer = (buffer_id)reply.readInt32();
-
- return err;
- }
-
- virtual status_t updateGraphicBufferInMeta(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
- Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.write(*graphicBuffer);
- data.writeInt32((int32_t)buffer);
- remote()->transact(UPDATE_GRAPHIC_BUFFER_IN_META, data, &reply);
-
- status_t err = reply.readInt32();
- return err;
- }
-
- virtual status_t updateNativeHandleInMeta(
- node_id node, OMX_U32 port_index,
- const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
- Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.writeInt32(nativeHandle != NULL);
- if (nativeHandle != NULL) {
- data.writeNativeHandle(nativeHandle->handle());
- }
- data.writeInt32((int32_t)buffer);
- remote()->transact(UPDATE_NATIVE_HANDLE_IN_META, data, &reply);
-
- status_t err = reply.readInt32();
- return err;
- }
-
- virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index, android_dataspace dataSpace,
- sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
- Parcel data, reply;
- status_t err;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.writeInt32(dataSpace);
- err = remote()->transact(CREATE_INPUT_SURFACE, data, &reply);
- if (err != OK) {
- ALOGW("binder transaction failed: %d", err);
- return err;
- }
-
- // read type even if createInputSurface failed
- int negotiatedType = reply.readInt32();
- if (type != NULL) {
- *type = (MetadataBufferType)negotiatedType;
- }
-
- err = reply.readInt32();
- if (err != OK) {
- return err;
- }
-
- *bufferProducer = IGraphicBufferProducer::asInterface(
- reply.readStrongBinder());
-
- return err;
- }
-
- virtual status_t createPersistentInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferConsumer> *bufferConsumer) {
- Parcel data, reply;
- status_t err;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- err = remote()->transact(CREATE_PERSISTENT_INPUT_SURFACE, data, &reply);
- if (err != OK) {
- ALOGW("binder transaction failed: %d", err);
- return err;
- }
-
- err = reply.readInt32();
- if (err != OK) {
- return err;
- }
-
- *bufferProducer = IGraphicBufferProducer::asInterface(
- reply.readStrongBinder());
- *bufferConsumer = IGraphicBufferConsumer::asInterface(
- reply.readStrongBinder());
-
- return err;
- }
-
virtual status_t setInputSurface(
- node_id node, OMX_U32 port_index,
- const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
+ const sp<IOMXBufferSource> &bufferSource) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- status_t err;
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.writeStrongBinder(IInterface::asBinder(bufferConsumer));
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
- err = remote()->transact(SET_INPUT_SURFACE, data, &reply);
+ data.writeStrongBinder(IInterface::asBinder(bufferSource));
+
+ status_t err = remote()->transact(SET_INPUT_SURFACE, data, &reply);
if (err != OK) {
ALOGW("binder transaction failed: %d", err);
return err;
}
- // read type even if setInputSurface failed
- int negotiatedType = reply.readInt32();
- if (type != NULL) {
- *type = (MetadataBufferType)negotiatedType;
- }
+ err = reply.readInt32();
- return reply.readInt32();
- }
-
- virtual status_t signalEndOfInputStream(node_id node) {
- Parcel data, reply;
- status_t err;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- err = remote()->transact(SIGNAL_END_OF_INPUT_STREAM, data, &reply);
- if (err != OK) {
- ALOGW("binder transaction failed: %d", err);
- return err;
- }
-
- return reply.readInt32();
- }
-
- virtual status_t storeMetaDataInBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
- Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.writeInt32((int32_t)enable);
- data.writeInt32(type == NULL ? kMetadataBufferTypeANWBuffer : *type);
-
- remote()->transact(STORE_META_DATA_IN_BUFFERS, data, &reply);
-
- // read type even storeMetaDataInBuffers failed
- int negotiatedType = reply.readInt32();
- if (type != NULL) {
- *type = (MetadataBufferType)negotiatedType;
- }
-
- return reply.readInt32();
+ return err;
}
virtual status_t prepareForAdaptivePlayback(
- node_id node, OMX_U32 port_index, OMX_BOOL enable,
+ OMX_U32 port_index, OMX_BOOL enable,
OMX_U32 max_width, OMX_U32 max_height) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(port_index);
data.writeInt32((int32_t)enable);
data.writeInt32(max_width);
@@ -467,11 +309,10 @@
}
virtual status_t configureVideoTunnelMode(
- node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 portIndex, OMX_BOOL tunneled,
OMX_U32 audioHwSync, native_handle_t **sidebandHandle ) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(portIndex);
data.writeInt32((int32_t)tunneled);
data.writeInt32(audioHwSync);
@@ -486,11 +327,10 @@
virtual status_t allocateSecureBuffer(
- node_id node, OMX_U32 port_index, size_t size,
+ OMX_U32 port_index, size_t size,
buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(port_index);
data.writeInt64(size);
remote()->transact(ALLOC_SECURE_BUFFER, data, &reply);
@@ -514,34 +354,10 @@
return err;
}
- virtual status_t allocateBufferWithBackup(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize) {
- Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.writeStrongBinder(IInterface::asBinder(params));
- data.writeInt32(allottedSize);
- remote()->transact(ALLOC_BUFFER_WITH_BACKUP, data, &reply);
-
- status_t err = reply.readInt32();
- if (err != OK) {
- *buffer = 0;
-
- return err;
- }
-
- *buffer = (buffer_id)reply.readInt32();
-
- return err;
- }
-
virtual status_t freeBuffer(
- node_id node, OMX_U32 port_index, buffer_id buffer) {
+ OMX_U32 port_index, buffer_id buffer) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32(port_index);
data.writeInt32((int32_t)buffer);
remote()->transact(FREE_BUFFER, data, &reply);
@@ -549,11 +365,15 @@
return reply.readInt32();
}
- virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
+ virtual status_t fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32((int32_t)buffer);
+ status_t err = omxBuf.writeToParcel(&data);
+ if (err != OK) {
+ return err;
+ }
data.writeInt32(fenceFd >= 0);
if (fenceFd >= 0) {
data.writeFileDescriptor(fenceFd, true /* takeOwnership */);
@@ -564,16 +384,15 @@
}
virtual status_t emptyBuffer(
- node_id node,
- buffer_id buffer,
- OMX_U32 range_offset, OMX_U32 range_length,
+ buffer_id buffer, const OMXBuffer &omxBuf,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeInt32((int32_t)buffer);
- data.writeInt32(range_offset);
- data.writeInt32(range_length);
+ status_t err = omxBuf.writeToParcel(&data);
+ if (err != OK) {
+ return err;
+ }
data.writeInt32(flags);
data.writeInt64(timestamp);
data.writeInt32(fenceFd >= 0);
@@ -586,12 +405,10 @@
}
virtual status_t getExtensionIndex(
- node_id node,
const char *parameter_name,
OMX_INDEXTYPE *index) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
data.writeCString(parameter_name);
remote()->transact(GET_EXTENSION_INDEX, data, &reply);
@@ -606,26 +423,136 @@
return err;
}
- virtual status_t setInternalOption(
- node_id node,
- OMX_U32 port_index,
- InternalOptionType type,
- const void *optionData,
- size_t size) {
+ virtual status_t dispatchMessage(const omx_message &msg) {
Parcel data, reply;
- data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
- data.writeInt32((int32_t)node);
- data.writeInt32(port_index);
- data.writeInt64(size);
- data.write(optionData, size);
- data.writeInt32(type);
- remote()->transact(SET_INTERNAL_OPTION, data, &reply);
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
+ data.writeInt32(msg.fenceFd >= 0);
+ if (msg.fenceFd >= 0) {
+ data.writeFileDescriptor(msg.fenceFd, true /* takeOwnership */);
+ }
+ data.writeInt32(msg.type);
+ data.write(&msg.u, sizeof(msg.u));
+
+ remote()->transact(DISPATCH_MESSAGE, data, &reply);
+
+ return reply.readInt32();
+ }
+
+ virtual status_t setQuirks(OMX_U32 quirks) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IOMXNode::getInterfaceDescriptor());
+ data.writeInt32(quirks);
+
+ remote()->transact(SET_QUIRKS, data, &reply);
return reply.readInt32();
}
};
+using ::android::hardware::media::omx::V1_0::utils::LWOmxNode;
+class HpOMXNode : public HpInterface<BpOMXNode, LWOmxNode> {
+public:
+ HpOMXNode(const sp<IBinder>& base) : PBase(base) {}
+
+ virtual status_t freeNode() {
+ return mBase->freeNode();
+ }
+
+ virtual status_t sendCommand(
+ OMX_COMMANDTYPE cmd, OMX_S32 param) {
+ return mBase->sendCommand(cmd, param);
+ }
+
+ virtual status_t getParameter(
+ OMX_INDEXTYPE index, void *params, size_t size) {
+ return mBase->getParameter(index, params, size);
+ }
+
+ virtual status_t setParameter(
+ OMX_INDEXTYPE index, const void *params, size_t size) {
+ return mBase->setParameter(index, params, size);
+ }
+
+ virtual status_t getConfig(
+ OMX_INDEXTYPE index, void *params, size_t size) {
+ return mBase->getConfig(index, params, size);
+ }
+
+ virtual status_t setConfig(
+ OMX_INDEXTYPE index, const void *params, size_t size) {
+ return mBase->setConfig(index, params, size);
+ }
+
+ virtual status_t setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) {
+ return mBase->setPortMode(port_index, mode);
+ }
+
+ virtual status_t prepareForAdaptivePlayback(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
+ return mBase->prepareForAdaptivePlayback(
+ portIndex, enable, maxFrameWidth, maxFrameHeight);
+ }
+
+ virtual status_t configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
+ return mBase->configureVideoTunnelMode(
+ portIndex, tunneled, audioHwSync, sidebandHandle);
+ }
+
+ virtual status_t getGraphicBufferUsage(
+ OMX_U32 port_index, OMX_U32* usage) {
+ return mBase->getGraphicBufferUsage(port_index, usage);
+ }
+
+ virtual status_t setInputSurface(
+ const sp<IOMXBufferSource> &bufferSource) {
+ return mBase->setInputSurface(bufferSource);
+ }
+
+ virtual status_t allocateSecureBuffer(
+ OMX_U32 port_index, size_t size, buffer_id *buffer,
+ void **buffer_data, sp<NativeHandle> *native_handle) {
+ return mBase->allocateSecureBuffer(
+ port_index, size, buffer, buffer_data, native_handle);
+ }
+
+ virtual status_t useBuffer(
+ OMX_U32 port_index, const OMXBuffer &omxBuf, buffer_id *buffer) {
+ return mBase->useBuffer(port_index, omxBuf, buffer);
+ }
+
+ virtual status_t freeBuffer(
+ OMX_U32 port_index, buffer_id buffer) {
+ return mBase->freeBuffer(port_index, buffer);
+ }
+
+ virtual status_t fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd = -1) {
+ return mBase->fillBuffer(buffer, omxBuf, fenceFd);
+ }
+
+ virtual status_t emptyBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) {
+ return mBase->emptyBuffer(buffer, omxBuf, flags, timestamp, fenceFd);
+ }
+
+ virtual status_t getExtensionIndex(
+ const char *parameter_name,
+ OMX_INDEXTYPE *index) {
+ return mBase->getExtensionIndex(parameter_name, index);
+ }
+
+ virtual status_t dispatchMessage(const omx_message &msg) {
+ return mBase->dispatchMessage(msg);
+ }
+};
+
IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
+IMPLEMENT_HYBRID_META_INTERFACE(OMXNode, IOmxNode, "android.hardware.IOMXNode");
////////////////////////////////////////////////////////////////////////////////
@@ -638,16 +565,6 @@
status_t BnOMX::onTransact(
uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
switch (code) {
- case LIVES_LOCALLY:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
- node_id node = (node_id)data.readInt32();
- pid_t pid = (pid_t)data.readInt32();
- reply->writeInt32(livesLocally(node, pid));
-
- return OK;
- }
-
case LIST_NODES:
{
CHECK_OMX_INTERFACE(IOMX, data, reply);
@@ -686,40 +603,62 @@
return NO_ERROR;
}
- node_id node;
+ sp<IOMXNode> omxNode;
- status_t err = allocateNode(name, observer,
- NULL /* nodeBinder */, &node);
+ status_t err = allocateNode(name, observer, &omxNode);
+
reply->writeInt32(err);
if (err == OK) {
- reply->writeInt32((int32_t)node);
+ reply->writeStrongBinder(IInterface::asBinder(omxNode));
}
return NO_ERROR;
}
- case FREE_NODE:
+ case CREATE_INPUT_SURFACE:
{
CHECK_OMX_INTERFACE(IOMX, data, reply);
- node_id node = (node_id)data.readInt32();
+ sp<IGraphicBufferProducer> bufferProducer;
+ sp<IGraphicBufferSource> bufferSource;
+ status_t err = createInputSurface(&bufferProducer, &bufferSource);
- reply->writeInt32(freeNode(node));
+ reply->writeInt32(err);
+
+ if (err == OK) {
+ reply->writeStrongBinder(IInterface::asBinder(bufferProducer));
+ reply->writeStrongBinder(IInterface::asBinder(bufferSource));
+ }
+
+ return NO_ERROR;
+ }
+
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+status_t BnOMXNode::onTransact(
+ uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
+ switch (code) {
+ case FREE_NODE:
+ {
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
+
+ reply->writeInt32(freeNode());
return NO_ERROR;
}
case SEND_COMMAND:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
OMX_COMMANDTYPE cmd =
static_cast<OMX_COMMANDTYPE>(data.readInt32());
OMX_S32 param = data.readInt32();
- reply->writeInt32(sendCommand(node, cmd, param));
+ reply->writeInt32(sendCommand(cmd, param));
return NO_ERROR;
}
@@ -728,11 +667,9 @@
case SET_PARAMETER:
case GET_CONFIG:
case SET_CONFIG:
- case SET_INTERNAL_OPTION:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
OMX_INDEXTYPE index = static_cast<OMX_INDEXTYPE>(data.readInt32());
size_t size = data.readInt64();
@@ -742,8 +679,7 @@
size_t pageSize = 0;
size_t allocSize = 0;
bool isUsageBits = (index == (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits);
- if ((isUsageBits && size < 4) ||
- (!isUsageBits && code != SET_INTERNAL_OPTION && size < 8)) {
+ if ((isUsageBits && size < 4) || (!isUsageBits && size < 8)) {
// we expect the structure to contain at least the size and
// version, 8 bytes total
ALOGE("b/27207275 (%zu) (%d/%d)", size, int(index), int(code));
@@ -765,8 +701,7 @@
} else {
err = NOT_ENOUGH_DATA;
OMX_U32 declaredSize = *(OMX_U32*)params;
- if (code != SET_INTERNAL_OPTION &&
- index != (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits &&
+ if (index != (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits &&
declaredSize > size) {
// the buffer says it's bigger than it actually is
ALOGE("b/27207275 (%u/%zu)", declaredSize, size);
@@ -781,26 +716,17 @@
} else {
switch (code) {
case GET_PARAMETER:
- err = getParameter(node, index, params, size);
+ err = getParameter(index, params, size);
break;
case SET_PARAMETER:
- err = setParameter(node, index, params, size);
+ err = setParameter(index, params, size);
break;
case GET_CONFIG:
- err = getConfig(node, index, params, size);
+ err = getConfig(index, params, size);
break;
case SET_CONFIG:
- err = setConfig(node, index, params, size);
+ err = setConfig(index, params, size);
break;
- case SET_INTERNAL_OPTION:
- {
- InternalOptionType type =
- (InternalOptionType)data.readInt32();
-
- err = setInternalOption(node, index, type, params, size);
- break;
- }
-
default:
TRESPASS();
}
@@ -826,44 +752,24 @@
return NO_ERROR;
}
- case GET_STATE:
+ case SET_PORT_MODE:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
- OMX_STATETYPE state = OMX_StateInvalid;
-
- status_t err = getState(node, &state);
- reply->writeInt32(state);
- reply->writeInt32(err);
-
- return NO_ERROR;
- }
-
- case ENABLE_NATIVE_BUFFERS:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
OMX_U32 port_index = data.readInt32();
- OMX_BOOL graphic = (OMX_BOOL)data.readInt32();
- OMX_BOOL enable = (OMX_BOOL)data.readInt32();
-
- status_t err = enableNativeBuffers(node, port_index, graphic, enable);
- reply->writeInt32(err);
+ IOMX::PortMode mode = (IOMX::PortMode) data.readInt32();
+ reply->writeInt32(setPortMode(port_index, mode));
return NO_ERROR;
}
case GET_GRAPHIC_BUFFER_USAGE:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
OMX_U32 usage = 0;
- status_t err = getGraphicBufferUsage(node, port_index, &usage);
+ status_t err = getGraphicBufferUsage(port_index, &usage);
reply->writeInt32(err);
reply->writeInt32(usage);
@@ -872,22 +778,18 @@
case USE_BUFFER:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
- sp<IMemory> params =
- interface_cast<IMemory>(data.readStrongBinder());
- OMX_U32 allottedSize = data.readInt32();
- if (params == NULL) {
- ALOGE("b/26392700");
- reply->writeInt32(INVALID_OPERATION);
- return NO_ERROR;
+ OMXBuffer omxBuf;
+ status_t err = omxBuf.readFromParcel(&data);
+ if (err != OK) {
+ return err;
}
buffer_id buffer;
- status_t err = useBuffer(node, port_index, params, &buffer, allottedSize);
+ err = useBuffer(port_index, omxBuf, &buffer);
reply->writeInt32(err);
if (err == OK) {
@@ -897,160 +799,14 @@
return NO_ERROR;
}
- case USE_GRAPHIC_BUFFER:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
- OMX_U32 port_index = data.readInt32();
- sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
- data.read(*graphicBuffer);
-
- buffer_id buffer;
- status_t err = useGraphicBuffer(
- node, port_index, graphicBuffer, &buffer);
- reply->writeInt32(err);
-
- if (err == OK) {
- reply->writeInt32((int32_t)buffer);
- }
-
- return NO_ERROR;
- }
-
- case UPDATE_GRAPHIC_BUFFER_IN_META:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
- OMX_U32 port_index = data.readInt32();
- sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
- data.read(*graphicBuffer);
- buffer_id buffer = (buffer_id)data.readInt32();
-
- status_t err = updateGraphicBufferInMeta(
- node, port_index, graphicBuffer, buffer);
- reply->writeInt32(err);
-
- return NO_ERROR;
- }
-
- case UPDATE_NATIVE_HANDLE_IN_META:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
- OMX_U32 port_index = data.readInt32();
- native_handle *handle = NULL;
- if (data.readInt32()) {
- handle = data.readNativeHandle();
- }
- buffer_id buffer = (buffer_id)data.readInt32();
-
- status_t err = updateNativeHandleInMeta(
- node, port_index, NativeHandle::create(handle, true /* ownshandle */), buffer);
- reply->writeInt32(err);
-
- return NO_ERROR;
- }
-
- case CREATE_INPUT_SURFACE:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
- OMX_U32 port_index = data.readInt32();
- android_dataspace dataSpace = (android_dataspace)data.readInt32();
-
- sp<IGraphicBufferProducer> bufferProducer;
- MetadataBufferType type = kMetadataBufferTypeInvalid;
- status_t err = createInputSurface(node, port_index, dataSpace, &bufferProducer, &type);
-
- if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
- android_errorWriteLog(0x534e4554, "26324358");
- }
-
- reply->writeInt32(type);
- reply->writeInt32(err);
-
- if (err == OK) {
- reply->writeStrongBinder(IInterface::asBinder(bufferProducer));
- }
-
- return NO_ERROR;
- }
-
- case CREATE_PERSISTENT_INPUT_SURFACE:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- sp<IGraphicBufferProducer> bufferProducer;
- sp<IGraphicBufferConsumer> bufferConsumer;
- status_t err = createPersistentInputSurface(
- &bufferProducer, &bufferConsumer);
-
- reply->writeInt32(err);
-
- if (err == OK) {
- reply->writeStrongBinder(IInterface::asBinder(bufferProducer));
- reply->writeStrongBinder(IInterface::asBinder(bufferConsumer));
- }
-
- return NO_ERROR;
- }
-
case SET_INPUT_SURFACE:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
- OMX_U32 port_index = data.readInt32();
+ sp<IOMXBufferSource> bufferSource =
+ interface_cast<IOMXBufferSource>(data.readStrongBinder());
- sp<IGraphicBufferConsumer> bufferConsumer =
- interface_cast<IGraphicBufferConsumer>(data.readStrongBinder());
-
- MetadataBufferType type = kMetadataBufferTypeInvalid;
-
- status_t err = INVALID_OPERATION;
- if (bufferConsumer == NULL) {
- ALOGE("b/26392700");
- } else {
- err = setInputSurface(node, port_index, bufferConsumer, &type);
-
- if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
- android_errorWriteLog(0x534e4554, "26324358");
- }
- }
-
- reply->writeInt32(type);
- reply->writeInt32(err);
- return NO_ERROR;
- }
-
- case SIGNAL_END_OF_INPUT_STREAM:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
-
- status_t err = signalEndOfInputStream(node);
- reply->writeInt32(err);
-
- return NO_ERROR;
- }
-
- case STORE_META_DATA_IN_BUFFERS:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
- OMX_U32 port_index = data.readInt32();
- OMX_BOOL enable = (OMX_BOOL)data.readInt32();
-
- MetadataBufferType type = (MetadataBufferType)data.readInt32();
- status_t err = storeMetaDataInBuffers(node, port_index, enable, &type);
-
- reply->writeInt32(type);
+ status_t err = setInputSurface(bufferSource);
reply->writeInt32(err);
return NO_ERROR;
@@ -1058,16 +814,15 @@
case PREPARE_FOR_ADAPTIVE_PLAYBACK:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
OMX_BOOL enable = (OMX_BOOL)data.readInt32();
OMX_U32 max_width = data.readInt32();
OMX_U32 max_height = data.readInt32();
status_t err = prepareForAdaptivePlayback(
- node, port_index, enable, max_width, max_height);
+ port_index, enable, max_width, max_height);
reply->writeInt32(err);
return NO_ERROR;
@@ -1075,16 +830,15 @@
case CONFIGURE_VIDEO_TUNNEL_MODE:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
OMX_BOOL tunneled = (OMX_BOOL)data.readInt32();
OMX_U32 audio_hw_sync = data.readInt32();
native_handle_t *sideband_handle = NULL;
status_t err = configureVideoTunnelMode(
- node, port_index, tunneled, audio_hw_sync, &sideband_handle);
+ port_index, tunneled, audio_hw_sync, &sideband_handle);
reply->writeInt32(err);
if(err == OK){
reply->writeNativeHandle(sideband_handle);
@@ -1095,11 +849,10 @@
case ALLOC_SECURE_BUFFER:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
- if (!isSecure(node) || port_index != 0 /* kPortIndexInput */) {
+ if (!isSecure() || port_index != 0 /* kPortIndexInput */) {
ALOGE("b/24310423");
reply->writeInt32(INVALID_OPERATION);
return NO_ERROR;
@@ -1111,7 +864,7 @@
void *buffer_data = NULL;
sp<NativeHandle> native_handle;
status_t err = allocateSecureBuffer(
- node, port_index, size, &buffer, &buffer_data, &native_handle);
+ port_index, size, &buffer, &buffer_data, &native_handle);
reply->writeInt32(err);
if (err == OK) {
@@ -1125,83 +878,61 @@
return NO_ERROR;
}
- case ALLOC_BUFFER_WITH_BACKUP:
- {
- CHECK_OMX_INTERFACE(IOMX, data, reply);
-
- node_id node = (node_id)data.readInt32();
- OMX_U32 port_index = data.readInt32();
- sp<IMemory> params =
- interface_cast<IMemory>(data.readStrongBinder());
- OMX_U32 allottedSize = data.readInt32();
-
- if (params == NULL) {
- ALOGE("b/26392700");
- reply->writeInt32(INVALID_OPERATION);
- return NO_ERROR;
- }
-
- buffer_id buffer;
- status_t err = allocateBufferWithBackup(
- node, port_index, params, &buffer, allottedSize);
-
- reply->writeInt32(err);
-
- if (err == OK) {
- reply->writeInt32((int32_t)buffer);
- }
-
- return NO_ERROR;
- }
-
case FREE_BUFFER:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
buffer_id buffer = (buffer_id)data.readInt32();
- reply->writeInt32(freeBuffer(node, port_index, buffer));
+ reply->writeInt32(freeBuffer(port_index, buffer));
return NO_ERROR;
}
case FILL_BUFFER:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
buffer_id buffer = (buffer_id)data.readInt32();
+
+ OMXBuffer omxBuf;
+ status_t err = omxBuf.readFromParcel(&data);
+ if (err != OK) {
+ return err;
+ }
+
bool haveFence = data.readInt32();
int fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
- reply->writeInt32(fillBuffer(node, buffer, fenceFd));
+
+ reply->writeInt32(fillBuffer(buffer, omxBuf, fenceFd));
return NO_ERROR;
}
case EMPTY_BUFFER:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
buffer_id buffer = (buffer_id)data.readInt32();
- OMX_U32 range_offset = data.readInt32();
- OMX_U32 range_length = data.readInt32();
+ OMXBuffer omxBuf;
+ status_t err = omxBuf.readFromParcel(&data);
+ if (err != OK) {
+ return err;
+ }
OMX_U32 flags = data.readInt32();
OMX_TICKS timestamp = data.readInt64();
bool haveFence = data.readInt32();
int fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
reply->writeInt32(emptyBuffer(
- node, buffer, range_offset, range_length, flags, timestamp, fenceFd));
+ buffer, omxBuf, flags, timestamp, fenceFd));
return NO_ERROR;
}
case GET_EXTENSION_INDEX:
{
- CHECK_OMX_INTERFACE(IOMX, data, reply);
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
- node_id node = (node_id)data.readInt32();
const char *parameter_name = data.readCString();
if (parameter_name == NULL) {
@@ -1211,7 +942,7 @@
}
OMX_INDEXTYPE index;
- status_t err = getExtensionIndex(node, parameter_name, &index);
+ status_t err = getExtensionIndex(parameter_name, &index);
reply->writeInt32(err);
@@ -1222,6 +953,23 @@
return OK;
}
+ case DISPATCH_MESSAGE:
+ {
+ CHECK_OMX_INTERFACE(IOMXNode, data, reply);
+ omx_message msg;
+ int haveFence = data.readInt32();
+ msg.fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
+ msg.type = (typeof(msg.type))data.readInt32();
+ status_t err = data.read(&msg.u, sizeof(msg.u));
+
+ if (err == OK) {
+ err = dispatchMessage(msg);
+ }
+ reply->writeInt32(err);
+
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
@@ -1238,14 +986,12 @@
virtual void onMessages(const std::list<omx_message> &messages) {
Parcel data, reply;
std::list<omx_message>::const_iterator it = messages.cbegin();
- bool first = true;
+ if (messages.empty()) {
+ return;
+ }
+ data.writeInterfaceToken(IOMXObserver::getInterfaceDescriptor());
while (it != messages.cend()) {
const omx_message &msg = *it++;
- if (first) {
- data.writeInterfaceToken(IOMXObserver::getInterfaceDescriptor());
- data.writeInt32(msg.node);
- first = false;
- }
data.writeInt32(msg.fenceFd >= 0);
if (msg.fenceFd >= 0) {
data.writeFileDescriptor(msg.fenceFd, true /* takeOwnership */);
@@ -1254,10 +1000,8 @@
data.write(&msg.u, sizeof(msg.u));
ALOGV("onMessage writing message %d, size %zu", msg.type, sizeof(msg));
}
- if (!first) {
- data.writeInt32(-1); // mark end
- remote()->transact(OBSERVER_ON_MSG, data, &reply, IBinder::FLAG_ONEWAY);
- }
+ data.writeInt32(-1); // mark end
+ remote()->transact(OBSERVER_ON_MSG, data, &reply, IBinder::FLAG_ONEWAY);
}
};
@@ -1269,7 +1013,6 @@
case OBSERVER_ON_MSG:
{
CHECK_OMX_INTERFACE(IOMXObserver, data, reply);
- IOMX::node_id node = data.readInt32();
std::list<omx_message> messages;
status_t err = FAILED_TRANSACTION; // must receive at least one message
do {
@@ -1278,7 +1021,6 @@
break;
}
omx_message msg;
- msg.node = node;
msg.fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
msg.type = (typeof(msg.type))data.readInt32();
err = data.read(&msg.u, sizeof(msg.u));
diff --git a/media/libmedia/MediaCodecBuffer.cpp b/media/libmedia/MediaCodecBuffer.cpp
new file mode 100644
index 0000000..59d6164
--- /dev/null
+++ b/media/libmedia/MediaCodecBuffer.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecBuffer"
+#include <utils/Log.h>
+
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/MediaBufferBase.h>
+
+namespace android {
+
+MediaCodecBuffer::MediaCodecBuffer(const sp<AMessage> &format, const sp<ABuffer> &buffer)
+ : mMeta(new AMessage),
+ mFormat(format),
+ mBuffer(buffer),
+ mMediaBufferBase(nullptr) {
+}
+
+// ABuffer-like interface
+uint8_t *MediaCodecBuffer::base() {
+ return mBuffer->base();
+}
+
+uint8_t *MediaCodecBuffer::data() {
+ return mBuffer->data();
+}
+
+size_t MediaCodecBuffer::capacity() const {
+ return mBuffer->capacity();
+}
+
+size_t MediaCodecBuffer::size() const {
+ return mBuffer->size();
+}
+
+size_t MediaCodecBuffer::offset() const {
+ return mBuffer->offset();
+}
+
+status_t MediaCodecBuffer::setRange(size_t offset, size_t size) {
+ mBuffer->setRange(offset, size);
+ return OK;
+}
+
+MediaBufferBase *MediaCodecBuffer::getMediaBufferBase() {
+ if (mMediaBufferBase != NULL) {
+ mMediaBufferBase->add_ref();
+ }
+ return mMediaBufferBase;
+}
+
+void MediaCodecBuffer::setMediaBufferBase(MediaBufferBase *mediaBuffer) {
+ if (mMediaBufferBase != NULL) {
+ mMediaBufferBase->release();
+ }
+ mMediaBufferBase = mediaBuffer;
+}
+
+sp<AMessage> MediaCodecBuffer::meta() {
+ return mMeta;
+}
+
+sp<AMessage> MediaCodecBuffer::format() {
+ return mFormat;
+}
+
+void MediaCodecBuffer::setFormat(const sp<AMessage> &format) {
+ mMeta->clear();
+ mFormat = format;
+}
+
+} // namespace android
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 2bc5984..1f188f3 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -121,9 +121,11 @@
}
bool MediaCodecInfo::hasQuirk(const char *name) const {
- for (size_t ix = 0; ix < mQuirks.size(); ix++) {
- if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
- return true;
+ if (name) {
+ for (size_t ix = 0; ix < mQuirks.size(); ix++) {
+ if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
+ return true;
+ }
}
}
return false;
@@ -190,9 +192,11 @@
}
ssize_t MediaCodecInfo::getCapabilityIndex(const char *mime) const {
- for (size_t ix = 0; ix < mCaps.size(); ix++) {
- if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
- return ix;
+ if (mime) {
+ for (size_t ix = 0; ix < mCaps.size(); ix++) {
+ if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
+ return ix;
+ }
}
}
return -1;
diff --git a/media/libmedia/MediaDefs.cpp b/media/libmedia/MediaDefs.cpp
index a2110c9..544a6ae 100644
--- a/media/libmedia/MediaDefs.cpp
+++ b/media/libmedia/MediaDefs.cpp
@@ -29,6 +29,7 @@
const char *MEDIA_MIMETYPE_VIDEO_MPEG2 = "video/mpeg2";
const char *MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION = "video/dolby-vision";
+const char *MEDIA_MIMETYPE_VIDEO_SCRAMBLED = "video/scrambled";
const char *MEDIA_MIMETYPE_AUDIO_AMR_NB = "audio/3gpp";
const char *MEDIA_MIMETYPE_AUDIO_AMR_WB = "audio/amr-wb";
@@ -48,6 +49,7 @@
const char *MEDIA_MIMETYPE_AUDIO_MSGSM = "audio/gsm";
const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
@@ -57,8 +59,6 @@
const char *MEDIA_MIMETYPE_CONTAINER_AVI = "video/avi";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS = "video/mp2p";
-const char *MEDIA_MIMETYPE_CONTAINER_WVM = "video/wvm";
-
const char *MEDIA_MIMETYPE_TEXT_3GPP = "text/3gpp-tt";
const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index ff0e52e..aade69a 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -27,9 +27,11 @@
#include <media/MediaProfiles.h>
#include <media/stagefright/foundation/ADebug.h>
#include <OMX_Video.h>
+#include <sys/stat.h>
namespace android {
+constexpr char const * const MediaProfiles::xmlFiles[];
Mutex MediaProfiles::sLock;
bool MediaProfiles::sIsInitialized = false;
MediaProfiles *MediaProfiles::sInstance = NULL;
@@ -593,14 +595,19 @@
if (!sIsInitialized) {
char value[PROPERTY_VALUE_MAX];
if (property_get("media.settings.xml", value, NULL) <= 0) {
- const char *defaultXmlFile = "/etc/media_profiles.xml";
- FILE *fp = fopen(defaultXmlFile, "r");
- if (fp == NULL) {
- ALOGW("could not find media config xml file");
+ const char* xmlFile = nullptr;
+ for (auto const& f : xmlFiles) {
+ if (checkXmlFile(f)) {
+ xmlFile = f;
+ break;
+ }
+ }
+ if (xmlFile == nullptr) {
+ ALOGW("Could not find a validated xml file. "
+ "Using the default instance instead.");
sInstance = createDefaultInstance();
} else {
- fclose(fp); // close the file first.
- sInstance = createInstanceFromXmlFile(defaultXmlFile);
+ sInstance = createInstanceFromXmlFile(xmlFile);
}
} else {
sInstance = createInstanceFromXmlFile(value);
@@ -838,6 +845,12 @@
return profiles;
}
+bool MediaProfiles::checkXmlFile(const char* xmlFile) {
+ struct stat fStat;
+ return stat(xmlFile, &fStat) == 0 && S_ISREG(fStat.st_mode);
+ // TODO: Add validation
+}
+
/*static*/ MediaProfiles*
MediaProfiles::createInstanceFromXmlFile(const char *xml)
{
diff --git a/media/libmedia/MidiDeviceInfo.cpp b/media/libmedia/MidiDeviceInfo.cpp
new file mode 100644
index 0000000..02efc5f
--- /dev/null
+++ b/media/libmedia/MidiDeviceInfo.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MidiDeviceInfo"
+
+#include "MidiDeviceInfo.h"
+
+#include <binder/Parcel.h>
+#include <log/log.h>
+#include <utils/Errors.h>
+#include <utils/String16.h>
+
+namespace android {
+namespace media {
+namespace midi {
+
+// The constant values need to be kept in sync with MidiDeviceInfo.java.
+// static
+const char* const MidiDeviceInfo::PROPERTY_NAME = "name";
+const char* const MidiDeviceInfo::PROPERTY_MANUFACTURER = "manufacturer";
+const char* const MidiDeviceInfo::PROPERTY_PRODUCT = "product";
+const char* const MidiDeviceInfo::PROPERTY_VERSION = "version";
+const char* const MidiDeviceInfo::PROPERTY_SERIAL_NUMBER = "serial_number";
+const char* const MidiDeviceInfo::PROPERTY_ALSA_CARD = "alsa_card";
+const char* const MidiDeviceInfo::PROPERTY_ALSA_DEVICE = "alsa_device";
+
+String16 MidiDeviceInfo::getProperty(const char* propertyName) {
+ String16 value;
+ if (mProperties.getString(String16(propertyName), &value)) {
+ return value;
+ } else {
+ return String16();
+ }
+}
+
+#define RETURN_IF_FAILED(calledOnce) \
+ { \
+ status_t returnStatus = calledOnce; \
+ if (returnStatus) { \
+ ALOGE("Failed at %s:%d (%s)", __FILE__, __LINE__, __func__); \
+ return returnStatus; \
+ } \
+ }
+
+status_t MidiDeviceInfo::writeToParcel(Parcel* parcel) const {
+ // Needs to be kept in sync with code in MidiDeviceInfo.java
+ RETURN_IF_FAILED(parcel->writeInt32(mType));
+ RETURN_IF_FAILED(parcel->writeInt32(mId));
+ RETURN_IF_FAILED(parcel->writeInt32((int32_t)mInputPortNames.size()));
+ RETURN_IF_FAILED(parcel->writeInt32((int32_t)mOutputPortNames.size()));
+ RETURN_IF_FAILED(writeStringVector(parcel, mInputPortNames));
+ RETURN_IF_FAILED(writeStringVector(parcel, mOutputPortNames));
+ RETURN_IF_FAILED(parcel->writeInt32(mIsPrivate ? 1 : 0));
+ RETURN_IF_FAILED(mProperties.writeToParcel(parcel));
+ // This corresponds to "extra" properties written by Java code
+ RETURN_IF_FAILED(mProperties.writeToParcel(parcel));
+ return OK;
+}
+
+status_t MidiDeviceInfo::readFromParcel(const Parcel* parcel) {
+ // Needs to be kept in sync with code in MidiDeviceInfo.java
+ RETURN_IF_FAILED(parcel->readInt32(&mType));
+ RETURN_IF_FAILED(parcel->readInt32(&mId));
+ int32_t inputPortCount;
+ RETURN_IF_FAILED(parcel->readInt32(&inputPortCount));
+ int32_t outputPortCount;
+ RETURN_IF_FAILED(parcel->readInt32(&outputPortCount));
+ RETURN_IF_FAILED(readStringVector(parcel, &mInputPortNames, inputPortCount));
+ RETURN_IF_FAILED(readStringVector(parcel, &mOutputPortNames, outputPortCount));
+ int32_t isPrivate;
+ RETURN_IF_FAILED(parcel->readInt32(&isPrivate));
+ mIsPrivate = isPrivate == 1;
+ RETURN_IF_FAILED(mProperties.readFromParcel(parcel));
+ // Ignore "extra" properties as they may contain Java Parcelables
+ return OK;
+}
+
+status_t MidiDeviceInfo::readStringVector(
+ const Parcel* parcel, Vector<String16> *vectorPtr, size_t defaultLength) {
+ std::unique_ptr<std::vector<std::unique_ptr<String16>>> v;
+ status_t result = parcel->readString16Vector(&v);
+ if (result != OK) return result;
+ vectorPtr->clear();
+ if (v.get() != nullptr) {
+ for (const auto& iter : *v) {
+ if (iter.get() != nullptr) {
+ vectorPtr->push_back(*iter);
+ } else {
+ vectorPtr->push_back(String16());
+ }
+ }
+ } else {
+ vectorPtr->resize(defaultLength);
+ }
+ return OK;
+}
+
+status_t MidiDeviceInfo::writeStringVector(Parcel* parcel, const Vector<String16>& vector) const {
+ std::vector<String16> v;
+ for (size_t i = 0; i < vector.size(); ++i) {
+ v.push_back(vector[i]);
+ }
+ return parcel->writeString16Vector(v);
+}
+
+// Vector does not define operator==
+static inline bool areVectorsEqual(const Vector<String16>& lhs, const Vector<String16>& rhs) {
+ if (lhs.size() != rhs.size()) return false;
+ for (size_t i = 0; i < lhs.size(); ++i) {
+ if (lhs[i] != rhs[i]) return false;
+ }
+ return true;
+}
+
+bool operator==(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs) {
+ return (lhs.mType == rhs.mType && lhs.mId == rhs.mId &&
+ areVectorsEqual(lhs.mInputPortNames, rhs.mInputPortNames) &&
+ areVectorsEqual(lhs.mOutputPortNames, rhs.mOutputPortNames) &&
+ lhs.mProperties == rhs.mProperties &&
+ lhs.mIsPrivate == rhs.mIsPrivate);
+}
+
+} // namespace midi
+} // namespace media
+} // namespace android
diff --git a/media/libmedia/OMXBuffer.cpp b/media/libmedia/OMXBuffer.cpp
new file mode 100644
index 0000000..6d54a13
--- /dev/null
+++ b/media/libmedia/OMXBuffer.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "OMXBuffer"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/OMXBuffer.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <ui/GraphicBuffer.h>
+#include <utils/NativeHandle.h>
+
+namespace android {
+
+//static
+OMXBuffer OMXBuffer::sPreset(static_cast<sp<MediaCodecBuffer> >(NULL));
+
+OMXBuffer::OMXBuffer()
+ : mBufferType(kBufferTypeInvalid) {
+}
+
+OMXBuffer::OMXBuffer(const sp<MediaCodecBuffer>& codecBuffer)
+ : mBufferType(kBufferTypePreset),
+ mRangeOffset(codecBuffer != NULL ? codecBuffer->offset() : 0),
+ mRangeLength(codecBuffer != NULL ? codecBuffer->size() : 0) {
+}
+
+OMXBuffer::OMXBuffer(OMX_U32 rangeOffset, OMX_U32 rangeLength)
+ : mBufferType(kBufferTypePreset),
+ mRangeOffset(rangeOffset),
+ mRangeLength(rangeLength) {
+}
+
+OMXBuffer::OMXBuffer(const sp<IMemory> &mem)
+ : mBufferType(kBufferTypeSharedMem),
+ mMem(mem) {
+}
+
+OMXBuffer::OMXBuffer(const sp<GraphicBuffer> &gbuf)
+ : mBufferType(kBufferTypeANWBuffer),
+ mGraphicBuffer(gbuf) {
+}
+
+OMXBuffer::OMXBuffer(const sp<NativeHandle> &handle)
+ : mBufferType(kBufferTypeNativeHandle),
+ mNativeHandle(handle) {
+}
+
+OMXBuffer::OMXBuffer(const hidl_memory &hidlMemory)
+ : mBufferType(kBufferTypeHidlMemory),
+ mHidlMemory(hidlMemory) {
+}
+
+OMXBuffer::~OMXBuffer() {
+}
+
+status_t OMXBuffer::writeToParcel(Parcel *parcel) const {
+ CHECK(mBufferType != kBufferTypeHidlMemory);
+ parcel->writeInt32(mBufferType);
+
+ switch(mBufferType) {
+ case kBufferTypePreset:
+ {
+ status_t err = parcel->writeUint32(mRangeOffset);
+ if (err != OK) {
+ return err;
+ }
+ return parcel->writeUint32(mRangeLength);
+ }
+
+ case kBufferTypeSharedMem:
+ {
+ return parcel->writeStrongBinder(IInterface::asBinder(mMem));
+ }
+
+ case kBufferTypeANWBuffer:
+ {
+ if (mGraphicBuffer == NULL) {
+ return parcel->writeBool(false);
+ }
+ status_t err = parcel->writeBool(true);
+ if (err != OK) {
+ return err;
+ }
+ return parcel->write(*mGraphicBuffer);
+ }
+
+ case kBufferTypeNativeHandle:
+ {
+ return parcel->writeNativeHandle(mNativeHandle->handle());
+ }
+
+ default:
+ return BAD_VALUE;
+ }
+ return BAD_VALUE;
+}
+
+status_t OMXBuffer::readFromParcel(const Parcel *parcel) {
+ BufferType bufferType = (BufferType) parcel->readInt32();
+ CHECK(bufferType != kBufferTypeHidlMemory);
+
+ switch(bufferType) {
+ case kBufferTypePreset:
+ {
+ status_t err = parcel->readUint32(&mRangeOffset);
+ if (err != OK) {
+ return err;
+ }
+ err = parcel->readUint32(&mRangeLength);
+ if (err != OK) {
+ return err;
+ }
+ break;
+ }
+
+ case kBufferTypeSharedMem:
+ {
+ mMem = interface_cast<IMemory>(parcel->readStrongBinder());
+ break;
+ }
+
+ case kBufferTypeANWBuffer:
+ {
+ bool notNull;
+ status_t err = parcel->readBool(¬Null);
+ if (err != OK) {
+ return err;
+ }
+ if (notNull) {
+ sp<GraphicBuffer> buffer = new GraphicBuffer();
+ status_t err = parcel->read(*buffer);
+ if (err != OK) {
+ return err;
+ }
+ mGraphicBuffer = buffer;
+ } else {
+ mGraphicBuffer = nullptr;
+ }
+ break;
+ }
+
+ case kBufferTypeNativeHandle:
+ {
+ sp<NativeHandle> handle = NativeHandle::create(
+ parcel->readNativeHandle(), true /* ownsHandle */);
+
+ mNativeHandle = handle;
+ break;
+ }
+
+ default:
+ return BAD_VALUE;
+ }
+
+ mBufferType = bufferType;
+ return OK;
+}
+
+OMXBuffer& OMXBuffer::operator=(OMXBuffer&& source) {
+ mBufferType = std::move(source.mBufferType);
+ mRangeOffset = std::move(source.mRangeOffset);
+ mRangeLength = std::move(source.mRangeLength);
+ mMem = std::move(source.mMem);
+ mGraphicBuffer = std::move(source.mGraphicBuffer);
+ mNativeHandle = std::move(source.mNativeHandle);
+ mHidlMemory = std::move(source.mHidlMemory);
+ return *this;
+}
+
+} // namespace android
+
+
+
+
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
new file mode 100644
index 0000000..a6eba86
--- /dev/null
+++ b/media/libmedia/TypeConverter.cpp
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/TypeConverter.h>
+
+namespace android {
+
+#define MAKE_STRING_FROM_ENUM(string) { #string, string }
+#define TERMINATOR { .literal = nullptr }
+
+template <>
+const OutputDeviceConverter::Table OutputDeviceConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_LINE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPDIF),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_FM),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_PROXY),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DEFAULT),
+ // STUB must be after DEFAULT, so the latter is picked up by toString first.
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
+ TERMINATOR
+};
+
+template <>
+const InputDeviceConverter::Table InputDeviceConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_COMMUNICATION),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_USB),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LINE),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_SPDIF),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_PROXY),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_HEADSET),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DEFAULT),
+ // STUB must be after DEFAULT, so the latter is picked up by toString first.
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
+ TERMINATOR
+};
+
+
+template <>
+const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_FAST),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_TTS),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT_PCM),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ),
+ MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_VOIP_RX),
+ TERMINATOR
+};
+
+
+template <>
+const InputFlagConverter::Table InputFlagConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_NONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_FAST),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_MMAP_NOIRQ),
+ MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_VOIP_TX),
+ TERMINATOR
+};
+
+
+template <>
+const FormatConverter::Table FormatConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_16_BIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_BIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_32_BIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_FLOAT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP3),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AMR_NB),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AMR_WB),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_MAIN),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SSR),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LTP),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V1),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ERLC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V2),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ELD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_MAIN),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_LC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_SSR),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_LTP),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_HE_V1),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_SCALABLE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_ERLC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_LD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_HE_V2),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS_ELD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_VORBIS),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V1),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V2),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_OPUS),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC3),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_E_AC3),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS_HD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_EVRC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_EVRCB),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_EVRCWB),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_EVRCNW),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADIF),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_WMA),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_WMA_PRO),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AMR_WB_PLUS),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP2),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_QCELP),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DSD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_FLAC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_ALAC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ADTS),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_SBC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX_HD),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC4),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LDAC),
+ TERMINATOR
+};
+
+
+template <>
+const OutputChannelConverter::Table OutputChannelConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT1),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_SURROUND),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_PENTA),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_6POINT1),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+ TERMINATOR
+};
+
+
+template <>
+const InputChannelConverter::Table InputChannelConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_6),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_VOICE_CALL_MONO),
+ TERMINATOR
+};
+
+template <>
+const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
+ {"AUDIO_CHANNEL_INDEX_MASK_1", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_1)},
+ {"AUDIO_CHANNEL_INDEX_MASK_2", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_2)},
+ {"AUDIO_CHANNEL_INDEX_MASK_3", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_3)},
+ {"AUDIO_CHANNEL_INDEX_MASK_4", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_4)},
+ {"AUDIO_CHANNEL_INDEX_MASK_5", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_5)},
+ {"AUDIO_CHANNEL_INDEX_MASK_6", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_6)},
+ {"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
+ {"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
+ TERMINATOR
+};
+
+
+template <>
+const GainModeConverter::Table GainModeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_JOINT),
+ MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_CHANNELS),
+ MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
+ TERMINATOR
+};
+
+
+template <>
+const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_VOICE_CALL),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_SYSTEM),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_RING),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_MUSIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ALARM),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_NOTIFICATION),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_BLUETOOTH_SCO ),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ENFORCED_AUDIBLE),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DTMF),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_TTS),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ACCESSIBILITY),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_REROUTING),
+ MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
+ TERMINATOR
+};
+
+template<>
+const AudioModeConverter::Table AudioModeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_INVALID),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_CURRENT),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_NORMAL),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_RINGTONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
+ MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
+ TERMINATOR
+};
+
+template <>
+const UsageTypeConverter::Table UsageTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_UNKNOWN),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_MEDIA),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VOICE_COMMUNICATION),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ALARM),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_NOTIFICATION_EVENT),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_ASSISTANCE_SONIFICATION),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_GAME),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_VIRTUAL_SOURCE),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_CNT),
+ MAKE_STRING_FROM_ENUM(AUDIO_USAGE_MAX),
+ TERMINATOR
+};
+
+template <>
+const SourceTypeConverter::Table SourceTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_DEFAULT),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_MIC),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_UPLINK),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_DOWNLINK),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_CALL),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_CAMCORDER),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_RECOGNITION),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_VOICE_COMMUNICATION),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_REMOTE_SUBMIX),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_UNPROCESSED),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_CNT),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_MAX),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_FM_TUNER),
+ MAKE_STRING_FROM_ENUM(AUDIO_SOURCE_HOTWORD),
+ TERMINATOR
+};
+
+template class TypeConverter<OutputDeviceTraits>;
+template class TypeConverter<InputDeviceTraits>;
+template class TypeConverter<OutputFlagTraits>;
+template class TypeConverter<InputFlagTraits>;
+template class TypeConverter<FormatTraits>;
+template class TypeConverter<OutputChannelTraits>;
+template class TypeConverter<InputChannelTraits>;
+template class TypeConverter<ChannelIndexTraits>;
+template class TypeConverter<GainModeTraits>;
+template class TypeConverter<StreamTraits>;
+template class TypeConverter<AudioModeTraits>;
+template class TypeConverter<UsageTraits>;
+template class TypeConverter<SourceTraits>;
+
+bool deviceFromString(const std::string& literalDevice, audio_devices_t& device) {
+ return InputDeviceConverter::fromString(literalDevice, device) ||
+ OutputDeviceConverter::fromString(literalDevice, device);
+}
+
+bool deviceToString(audio_devices_t device, std::string& literalDevice) {
+ if (device & AUDIO_DEVICE_BIT_IN) {
+ return InputDeviceConverter::toString(device, literalDevice);
+ } else {
+ return OutputDeviceConverter::toString(device, literalDevice);
+ }
+}
+
+SampleRateTraits::Collection samplingRatesFromString(
+ const std::string &samplingRates, const char *del)
+{
+ SampleRateTraits::Collection samplingRateCollection;
+ collectionFromString<SampleRateTraits>(samplingRates, samplingRateCollection, del);
+ return samplingRateCollection;
+}
+
+FormatTraits::Collection formatsFromString(
+ const std::string &formats, const char *del)
+{
+ FormatTraits::Collection formatCollection;
+ FormatConverter::collectionFromString(formats, formatCollection, del);
+ return formatCollection;
+}
+
+audio_format_t formatFromString(const std::string &literalFormat, audio_format_t defaultFormat)
+{
+ audio_format_t format;
+ if (literalFormat.empty()) {
+ return defaultFormat;
+ }
+ FormatConverter::fromString(literalFormat, format);
+ return format;
+}
+
+audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
+{
+ audio_channel_mask_t channels;
+ if (!OutputChannelConverter::fromString(literalChannels, channels) ||
+ !InputChannelConverter::fromString(literalChannels, channels)) {
+ return AUDIO_CHANNEL_INVALID;
+ }
+ return channels;
+}
+
+ChannelTraits::Collection channelMasksFromString(
+ const std::string &channels, const char *del)
+{
+ ChannelTraits::Collection channelMaskCollection;
+ OutputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
+ InputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
+ ChannelIndexConverter::collectionFromString(channels, channelMaskCollection, del);
+ return channelMaskCollection;
+}
+
+InputChannelTraits::Collection inputChannelMasksFromString(
+ const std::string &inChannels, const char *del)
+{
+ InputChannelTraits::Collection inputChannelMaskCollection;
+ InputChannelConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
+ ChannelIndexConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
+ return inputChannelMaskCollection;
+}
+
+OutputChannelTraits::Collection outputChannelMasksFromString(
+ const std::string &outChannels, const char *del)
+{
+ OutputChannelTraits::Collection outputChannelMaskCollection;
+ OutputChannelConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
+ ChannelIndexConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
+ return outputChannelMaskCollection;
+}
+
+}; // namespace android
diff --git a/media/libmedia/aidl/android/IGraphicBufferSource.aidl b/media/libmedia/aidl/android/IGraphicBufferSource.aidl
new file mode 100644
index 0000000..f3c7abc
--- /dev/null
+++ b/media/libmedia/aidl/android/IGraphicBufferSource.aidl
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android;
+
+import android.IOMXNode;
+
+/**
+ * Binder interface for controlling a graphic buffer source.
+ *
+ * @hide
+ */
+interface IGraphicBufferSource {
+ void configure(IOMXNode omxNode, int dataSpace);
+ void setSuspend(boolean suspend, long suspendTimeUs);
+ void setRepeatPreviousFrameDelayUs(long repeatAfterUs);
+ void setMaxFps(float maxFps);
+ void setTimeLapseConfig(double fps, double captureFps);
+ void setStartTimeUs(long startTimeUs);
+ void setStopTimeUs(long stopTimeUs);
+ void setColorAspects(int aspects);
+ void setTimeOffsetUs(long timeOffsetsUs);
+ void signalEndOfInputStream();
+}
diff --git a/media/libmedia/aidl/android/IOMXBufferSource.aidl b/media/libmedia/aidl/android/IOMXBufferSource.aidl
new file mode 100644
index 0000000..a5bf448
--- /dev/null
+++ b/media/libmedia/aidl/android/IOMXBufferSource.aidl
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android;
+
+import android.OMXFenceParcelable;
+
+/**
+ * Binder interface for a buffer source to be used together with an OMX encoder
+ *
+ * @hide
+ */
+interface IOMXBufferSource {
+ /**
+ * This is called when OMX transitions to OMX_StateExecuting, which means
+ * we can start handing it buffers. If we already have buffers of data
+ * sitting in the BufferQueue, this will send them to the codec.
+ */
+ void onOmxExecuting();
+
+ /**
+ * This is called when OMX transitions to OMX_StateIdle, indicating that
+ * the codec is meant to return all buffers back to the client for them
+ * to be freed. Do NOT submit any more buffers to the component.
+ */
+ void onOmxIdle();
+
+ /**
+ * This is called when OMX transitions to OMX_StateLoaded, indicating that
+ * we are shutting down.
+ */
+ void onOmxLoaded();
+
+ /**
+ * A "codec buffer", i.e. a buffer that can be used to pass data into
+ * the encoder, has been allocated.
+ */
+ void onInputBufferAdded(int bufferID);
+
+ /**
+ * Called from OnEmptyBufferDone. If we have a BQ buffer available,
+ * fill it with a new frame of data; otherwise, just mark it as available.
+ *
+ * fenceParcel contains the fence's fd that the callee should wait on before
+ * using the buffer (or pass on to the user of the buffer, if the user supports
+ * fences). Callee takes ownership of the fence fd even if it fails.
+ */
+ void onInputBufferEmptied(int bufferID, in OMXFenceParcelable fenceParcel);
+}
\ No newline at end of file
diff --git a/media/libmedia/aidl/android/IOMXNode.aidl b/media/libmedia/aidl/android/IOMXNode.aidl
new file mode 100644
index 0000000..ec87fd2
--- /dev/null
+++ b/media/libmedia/aidl/android/IOMXNode.aidl
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android;
+
+/** @hide */
+interface IOMXNode {
+ // Stub for manual implementation
+}
diff --git a/media/libmedia/aidl/android/IOMXNode.h b/media/libmedia/aidl/android/IOMXNode.h
new file mode 100644
index 0000000..7b17614
--- /dev/null
+++ b/media/libmedia/aidl/android/IOMXNode.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/IOMX.h>
diff --git a/media/libmedia/aidl/android/OMXFenceParcelable.aidl b/media/libmedia/aidl/android/OMXFenceParcelable.aidl
new file mode 100644
index 0000000..6d517e8
--- /dev/null
+++ b/media/libmedia/aidl/android/OMXFenceParcelable.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android;
+
+/** @hide */
+parcelable OMXFenceParcelable cpp_header "media/OMXFenceParcelable.h";
diff --git a/include/media/AVSyncSettings.h b/media/libmedia/include/media/AVSyncSettings.h
similarity index 100%
rename from include/media/AVSyncSettings.h
rename to media/libmedia/include/media/AVSyncSettings.h
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
new file mode 100644
index 0000000..9d026f6
--- /dev/null
+++ b/media/libmedia/include/media/BufferProviders.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BUFFER_PROVIDERS_H
+#define ANDROID_BUFFER_PROVIDERS_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/AudioBufferProvider.h>
+#include <media/AudioResamplerPublic.h>
+#include <system/audio.h>
+#include <system/audio_effect.h>
+#include <utils/StrongPointer.h>
+
+// external forward declaration from external/sonic/sonic.h
+struct sonicStreamStruct;
+typedef struct sonicStreamStruct *sonicStream;
+
+namespace android {
+
+class EffectBufferHalInterface;
+class EffectHalInterface;
+class EffectsFactoryHalInterface;
+
+// ----------------------------------------------------------------------------
+
+class PassthruBufferProvider : public AudioBufferProvider {
+public:
+ PassthruBufferProvider() : mTrackBufferProvider(NULL) { }
+
+ virtual ~PassthruBufferProvider() { }
+
+ // call this to release the buffer to the upstream provider.
+ // treat it as an audio discontinuity for future samples.
+ virtual void reset() { }
+
+ // set the upstream buffer provider. Consider calling "reset" before this function.
+ virtual void setBufferProvider(AudioBufferProvider *p) {
+ mTrackBufferProvider = p;
+ }
+
+protected:
+ AudioBufferProvider *mTrackBufferProvider;
+};
+
+// Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
+// and ReformatBufferProvider.
+// It handles a private buffer for use in converting format or channel masks from the
+// input data to a form acceptable by the mixer.
+// TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
+// processing pipeline.
+class CopyBufferProvider : public PassthruBufferProvider {
+public:
+ // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
+ // If bufferFrameCount is 0, no private buffer is created and in-place modification of
+ // the upstream buffer provider's buffers is performed by copyFrames().
+ CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
+ size_t bufferFrameCount);
+ virtual ~CopyBufferProvider();
+
+ // Overrides AudioBufferProvider methods
+ virtual status_t getNextBuffer(Buffer *buffer);
+ virtual void releaseBuffer(Buffer *buffer);
+
+ // Overrides PassthruBufferProvider
+ virtual void reset();
+
+ // this function should be supplied by the derived class. It converts
+ // #frames in the *src pointer to the *dst pointer. It is public because
+ // some providers will allow this to work on arbitrary buffers outside
+ // of the internal buffers.
+ virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
+
+protected:
+ const size_t mInputFrameSize;
+ const size_t mOutputFrameSize;
+private:
+ AudioBufferProvider::Buffer mBuffer;
+ const size_t mLocalBufferFrameCount;
+ void *mLocalBufferData;
+ size_t mConsumed;
+};
+
+// DownmixerBufferProvider derives from CopyBufferProvider to provide
+// position dependent downmixing by an Audio Effect.
+class DownmixerBufferProvider : public CopyBufferProvider {
+public:
+ DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
+ audio_channel_mask_t outputChannelMask, audio_format_t format,
+ uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
+ virtual ~DownmixerBufferProvider();
+ //Overrides
+ virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+ bool isValid() const { return mDownmixInterface.get() != NULL; }
+ static status_t init();
+ static bool isMultichannelCapable() { return sIsMultichannelCapable; }
+
+protected:
+ sp<EffectsFactoryHalInterface> mEffectsFactory;
+ sp<EffectHalInterface> mDownmixInterface;
+ size_t mInFrameSize;
+ size_t mOutFrameSize;
+ sp<EffectBufferHalInterface> mInBuffer;
+ sp<EffectBufferHalInterface> mOutBuffer;
+ effect_config_t mDownmixConfig;
+
+ // effect descriptor for the downmixer used by the mixer
+ static effect_descriptor_t sDwnmFxDesc;
+ // indicates whether a downmix effect has been found and is usable by this mixer
+ static bool sIsMultichannelCapable;
+ // FIXME: should we allow effects outside of the framework?
+ // We need to here. A special ioId that must be <= -2 so it does not map to a session.
+ static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
+};
+
+// RemixBufferProvider derives from CopyBufferProvider to perform an
+// upmix or downmix to the proper channel count and mask.
+class RemixBufferProvider : public CopyBufferProvider {
+public:
+ RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+ audio_channel_mask_t outputChannelMask, audio_format_t format,
+ size_t bufferFrameCount);
+ //Overrides
+ virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+ const audio_format_t mFormat;
+ const size_t mSampleSize;
+ const size_t mInputChannels;
+ const size_t mOutputChannels;
+ int8_t mIdxAry[sizeof(uint32_t) * 8]; // 32 bits => channel indices
+};
+
+// ReformatBufferProvider derives from CopyBufferProvider to convert the input data
+// to an acceptable mixer input format type.
+class ReformatBufferProvider : public CopyBufferProvider {
+public:
+ ReformatBufferProvider(int32_t channelCount,
+ audio_format_t inputFormat, audio_format_t outputFormat,
+ size_t bufferFrameCount);
+ virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+ const uint32_t mChannelCount;
+ const audio_format_t mInputFormat;
+ const audio_format_t mOutputFormat;
+};
+
+// TimestretchBufferProvider derives from PassthruBufferProvider for time stretching
+class TimestretchBufferProvider : public PassthruBufferProvider {
+public:
+ TimestretchBufferProvider(int32_t channelCount,
+ audio_format_t format, uint32_t sampleRate,
+ const AudioPlaybackRate &playbackRate);
+ virtual ~TimestretchBufferProvider();
+
+ // Overrides AudioBufferProvider methods
+ virtual status_t getNextBuffer(Buffer* buffer);
+ virtual void releaseBuffer(Buffer* buffer);
+
+ // Overrides PassthruBufferProvider
+ virtual void reset();
+
+ virtual status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
+
+ // processes frames
+ // dstBuffer is where to place the data
+ // dstFrames [in/out] is the desired frames (return with actual placed in buffer)
+ // srcBuffer is the source data
+ // srcFrames [in/out] is the available source frames (return with consumed)
+ virtual void processFrames(void *dstBuffer, size_t *dstFrames,
+ const void *srcBuffer, size_t *srcFrames);
+
+protected:
+ const uint32_t mChannelCount;
+ const audio_format_t mFormat;
+ const uint32_t mSampleRate; // const for now (TODO change this)
+ const size_t mFrameSize;
+ AudioPlaybackRate mPlaybackRate;
+
+private:
+ AudioBufferProvider::Buffer mBuffer; // for upstream request
+ size_t mLocalBufferFrameCount; // size of local buffer
+ void *mLocalBufferData; // internally allocated buffer for data returned
+ // to caller
+ size_t mRemaining; // remaining data in local buffer
+ sonicStream mSonicStream; // handle to sonic timestretch object
+ //FIXME: this dependency should be abstracted out
+ bool mFallbackFailErrorShown; // log fallback error only once
+ bool mAudioPlaybackRateValid; // flag for current parameters validity
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_BUFFER_PROVIDERS_H
diff --git a/media/libmedia/include/media/BufferingSettings.h b/media/libmedia/include/media/BufferingSettings.h
new file mode 100644
index 0000000..e812d2a
--- /dev/null
+++ b/media/libmedia/include/media/BufferingSettings.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BUFFERING_SETTINGS_H
+#define ANDROID_BUFFERING_SETTINGS_H
+
+#include <binder/Parcelable.h>
+
+namespace android {
+
+enum BufferingMode : int {
+ // Do not support buffering.
+ BUFFERING_MODE_NONE = 0,
+ // Support only time based buffering.
+ BUFFERING_MODE_TIME_ONLY = 1,
+ // Support only size based buffering.
+ BUFFERING_MODE_SIZE_ONLY = 2,
+ // Support both time and size based buffering, time based calculation precedes size based.
+ // Size based calculation will be used only when time information is not available for
+ // the stream.
+ BUFFERING_MODE_TIME_THEN_SIZE = 3,
+ // Number of modes.
+ BUFFERING_MODE_COUNT = 4,
+};
+
+struct BufferingSettings : public Parcelable {
+ static const int kNoWatermark = -1;
+
+ static bool IsValidBufferingMode(int mode);
+ static bool IsTimeBasedBufferingMode(int mode);
+ static bool IsSizeBasedBufferingMode(int mode);
+
+ BufferingMode mInitialBufferingMode; // for prepare
+ BufferingMode mRebufferingMode; // for playback
+
+ int mInitialWatermarkMs; // time based
+ int mInitialWatermarkKB; // size based
+
+ // When cached data is below this mark, playback will be paused for buffering
+ // till data reach |mRebufferingWatermarkHighMs| or end of stream.
+ int mRebufferingWatermarkLowMs;
+ // When cached data is above this mark, buffering will be paused.
+ int mRebufferingWatermarkHighMs;
+
+ // When cached data is below this mark, playback will be paused for buffering
+ // till data reach |mRebufferingWatermarkHighKB| or end of stream.
+ int mRebufferingWatermarkLowKB;
+ // When cached data is above this mark, buffering will be paused.
+ int mRebufferingWatermarkHighKB;
+
+ BufferingSettings();
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+ String8 toString() const;
+};
+
+} // namespace android
+
+// ---------------------------------------------------------------------------
+
+#endif // ANDROID_BUFFERING_SETTINGS_H
diff --git a/include/media/CharacterEncodingDetector.h b/media/libmedia/include/media/CharacterEncodingDetector.h
similarity index 100%
rename from include/media/CharacterEncodingDetector.h
rename to media/libmedia/include/media/CharacterEncodingDetector.h
diff --git a/media/libmedia/include/media/Crypto.h b/media/libmedia/include/media/Crypto.h
new file mode 100644
index 0000000..b68413d
--- /dev/null
+++ b/media/libmedia/include/media/Crypto.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CRYPTO_H_
+
+#define CRYPTO_H_
+
+#include <media/ICrypto.h>
+#include <utils/threads.h>
+#include <utils/KeyedVector.h>
+
+#include "SharedLibrary.h"
+
+namespace android {
+
+struct CryptoFactory;
+struct CryptoPlugin;
+
+struct Crypto : public BnCrypto {
+ Crypto();
+ virtual ~Crypto();
+
+ virtual status_t initCheck() const;
+
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]);
+
+ virtual status_t createPlugin(
+ const uint8_t uuid[16], const void *data, size_t size);
+
+ virtual status_t destroyPlugin();
+
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const;
+
+ virtual void notifyResolution(uint32_t width, uint32_t height);
+
+ virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
+
+ virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
+ CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+ const sp<IMemory> &source, size_t offset,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ const DestinationBuffer &destination, AString *errorDetailMsg);
+
+ virtual void setHeap(const sp<IMemoryHeap>&) {}
+ virtual void unsetHeap(const sp<IMemoryHeap>&) {}
+
+private:
+ mutable Mutex mLock;
+
+ status_t mInitCheck;
+ sp<SharedLibrary> mLibrary;
+ CryptoFactory *mFactory;
+ CryptoPlugin *mPlugin;
+
+ static KeyedVector<Vector<uint8_t>, String8> mUUIDToLibraryPathMap;
+ static KeyedVector<String8, wp<SharedLibrary> > mLibraryPathToOpenLibraryMap;
+ static Mutex mMapLock;
+
+ void findFactoryForScheme(const uint8_t uuid[16]);
+ bool loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]);
+ void closeFactory();
+
+ DISALLOW_EVIL_CONSTRUCTORS(Crypto);
+};
+
+} // namespace android
+
+#endif // CRYPTO_H_
diff --git a/media/libmedia/include/media/CryptoHal.h b/media/libmedia/include/media/CryptoHal.h
new file mode 100644
index 0000000..a5d8b43
--- /dev/null
+++ b/media/libmedia/include/media/CryptoHal.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CRYPTO_HAL_H_
+
+#define CRYPTO_HAL_H_
+
+#include <android/hardware/drm/1.0/ICryptoFactory.h>
+#include <android/hardware/drm/1.0/ICryptoPlugin.h>
+
+#include <media/ICrypto.h>
+#include <utils/KeyedVector.h>
+#include <utils/threads.h>
+
+using ::android::hardware::drm::V1_0::ICryptoFactory;
+using ::android::hardware::drm::V1_0::ICryptoPlugin;
+using ::android::hardware::drm::V1_0::SharedBuffer;
+
+class IMemoryHeap;
+
+namespace android {
+
+struct CryptoHal : public BnCrypto {
+ CryptoHal();
+ virtual ~CryptoHal();
+
+ virtual status_t initCheck() const;
+
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]);
+
+ virtual status_t createPlugin(
+ const uint8_t uuid[16], const void *data, size_t size);
+
+ virtual status_t destroyPlugin();
+
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const;
+
+ virtual void notifyResolution(uint32_t width, uint32_t height);
+
+ virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
+
+ virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
+ CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+ const ICrypto::SourceBuffer &source, size_t offset,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ const ICrypto::DestinationBuffer &destination,
+ AString *errorDetailMsg);
+
+ virtual int32_t setHeap(const sp<IMemoryHeap>& heap) {
+ return setHeapBase(heap);
+ }
+ virtual void unsetHeap(int32_t seqNum) { clearHeapBase(seqNum); }
+
+private:
+ mutable Mutex mLock;
+
+ const Vector<sp<ICryptoFactory>> mFactories;
+ sp<ICryptoPlugin> mPlugin;
+
+ /**
+ * mInitCheck is:
+ * NO_INIT if a plugin hasn't been created yet
+ * ERROR_UNSUPPORTED if a plugin can't be created for the uuid
+ * OK after a plugin has been created and mPlugin is valid
+ */
+ status_t mInitCheck;
+
+ KeyedVector<int32_t, uint32_t> mHeapBases;
+ uint32_t mNextBufferId;
+ int32_t mHeapSeqNum;
+
+ Vector<sp<ICryptoFactory>> makeCryptoFactories();
+ sp<ICryptoPlugin> makeCryptoPlugin(const sp<ICryptoFactory>& factory,
+ const uint8_t uuid[16], const void *initData, size_t size);
+
+ int32_t setHeapBase(const sp<IMemoryHeap>& heap);
+ void clearHeapBase(int32_t seqNum);
+
+ status_t toSharedBuffer(const sp<IMemory>& memory, int32_t seqNum, ::SharedBuffer* buffer);
+
+ DISALLOW_EVIL_CONSTRUCTORS(CryptoHal);
+};
+
+} // namespace android
+
+#endif // CRYPTO_HAL_H_
diff --git a/media/libmedia/include/media/Drm.h b/media/libmedia/include/media/Drm.h
new file mode 100644
index 0000000..fc869cc
--- /dev/null
+++ b/media/libmedia/include/media/Drm.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_H_
+
+#define DRM_H_
+
+#include "SharedLibrary.h"
+
+#include <media/IDrm.h>
+#include <media/IDrmClient.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class DrmFactory;
+class DrmPlugin;
+struct DrmSessionClientInterface;
+
+struct Drm : public BnDrm,
+ public IBinder::DeathRecipient,
+ public DrmPluginListener {
+ Drm();
+ virtual ~Drm();
+
+ virtual status_t initCheck() const;
+
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
+
+ virtual status_t createPlugin(const uint8_t uuid[16], const String8 &appPackageName);
+
+ virtual status_t destroyPlugin();
+
+ virtual status_t openSession(Vector<uint8_t> &sessionId);
+
+ virtual status_t closeSession(Vector<uint8_t> const &sessionId);
+
+ virtual status_t
+ getKeyRequest(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &initData,
+ String8 const &mimeType, DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const &optionalParameters,
+ Vector<uint8_t> &request, String8 &defaultUrl,
+ DrmPlugin::KeyRequestType *keyRequestType);
+
+ virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &response,
+ Vector<uint8_t> &keySetId);
+
+ virtual status_t removeKeys(Vector<uint8_t> const &keySetId);
+
+ virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keySetId);
+
+ virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
+ KeyedVector<String8, String8> &infoMap) const;
+
+ virtual status_t getProvisionRequest(String8 const &certType,
+ String8 const &certAuthority,
+ Vector<uint8_t> &request,
+ String8 &defaulUrl);
+
+ virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
+ Vector<uint8_t> &certificate,
+ Vector<uint8_t> &wrappedKey);
+
+ virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops);
+ virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
+
+ virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
+ virtual status_t releaseAllSecureStops();
+
+ virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
+ virtual status_t getPropertyByteArray(String8 const &name,
+ Vector<uint8_t> &value ) const;
+ virtual status_t setPropertyString(String8 const &name, String8 const &value ) const;
+ virtual status_t setPropertyByteArray(String8 const &name,
+ Vector<uint8_t> const &value ) const;
+
+ virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm);
+
+ virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm);
+
+ virtual status_t encrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output);
+
+ virtual status_t decrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output);
+
+ virtual status_t sign(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> &signature);
+
+ virtual status_t verify(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &signature,
+ bool &match);
+
+ virtual status_t signRSA(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &wrappedKey,
+ Vector<uint8_t> &signature);
+
+ virtual status_t setListener(const sp<IDrmClient>& listener);
+
+ virtual void sendEvent(DrmPlugin::EventType eventType, int extra,
+ Vector<uint8_t> const *sessionId,
+ Vector<uint8_t> const *data);
+
+ virtual void sendExpirationUpdate(Vector<uint8_t> const *sessionId,
+ int64_t expiryTimeInMS);
+
+ virtual void sendKeysChange(Vector<uint8_t> const *sessionId,
+ Vector<DrmPlugin::KeyStatus> const *keyStatusList,
+ bool hasNewUsableKey);
+
+ virtual void binderDied(const wp<IBinder> &the_late_who);
+
+private:
+ static Mutex mLock;
+
+ status_t mInitCheck;
+
+ sp<DrmSessionClientInterface> mDrmSessionClient;
+
+ sp<IDrmClient> mListener;
+ mutable Mutex mEventLock;
+ mutable Mutex mNotifyLock;
+
+ sp<SharedLibrary> mLibrary;
+ DrmFactory *mFactory;
+ DrmPlugin *mPlugin;
+
+ static KeyedVector<Vector<uint8_t>, String8> mUUIDToLibraryPathMap;
+ static KeyedVector<String8, wp<SharedLibrary> > mLibraryPathToOpenLibraryMap;
+ static Mutex mMapLock;
+
+ void findFactoryForScheme(const uint8_t uuid[16]);
+ bool loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]);
+ void closeFactory();
+ void writeByteArray(Parcel &obj, Vector<uint8_t> const *array);
+
+ DISALLOW_EVIL_CONSTRUCTORS(Drm);
+};
+
+} // namespace android
+
+#endif // CRYPTO_H_
diff --git a/media/libmedia/include/media/DrmHal.h b/media/libmedia/include/media/DrmHal.h
new file mode 100644
index 0000000..e031765
--- /dev/null
+++ b/media/libmedia/include/media/DrmHal.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_HAL_H_
+
+#define DRM_HAL_H_
+
+#include <android/hardware/drm/1.0/IDrmPlugin.h>
+#include <android/hardware/drm/1.0/IDrmPluginListener.h>
+#include <android/hardware/drm/1.0/IDrmFactory.h>
+
+#include <media/IDrm.h>
+#include <media/IDrmClient.h>
+#include <utils/threads.h>
+
+using ::android::hardware::drm::V1_0::EventType;
+using ::android::hardware::drm::V1_0::IDrmFactory;
+using ::android::hardware::drm::V1_0::IDrmPlugin;
+using ::android::hardware::drm::V1_0::IDrmPluginListener;
+using ::android::hardware::drm::V1_0::KeyStatus;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+
+namespace android {
+
+struct DrmSessionClientInterface;
+
+struct DrmHal : public BnDrm,
+ public IBinder::DeathRecipient,
+ public IDrmPluginListener {
+ DrmHal();
+ virtual ~DrmHal();
+
+ virtual status_t initCheck() const;
+
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
+
+ virtual status_t createPlugin(const uint8_t uuid[16],
+ const String8 &appPackageName);
+
+ virtual status_t destroyPlugin();
+
+ virtual status_t openSession(Vector<uint8_t> &sessionId);
+
+ virtual status_t closeSession(Vector<uint8_t> const &sessionId);
+
+ virtual status_t
+ getKeyRequest(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &initData,
+ String8 const &mimeType, DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const &optionalParameters,
+ Vector<uint8_t> &request, String8 &defaultUrl,
+ DrmPlugin::KeyRequestType *keyRequestType);
+
+ virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &response,
+ Vector<uint8_t> &keySetId);
+
+ virtual status_t removeKeys(Vector<uint8_t> const &keySetId);
+
+ virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keySetId);
+
+ virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
+ KeyedVector<String8, String8> &infoMap) const;
+
+ virtual status_t getProvisionRequest(String8 const &certType,
+ String8 const &certAuthority,
+ Vector<uint8_t> &request,
+ String8 &defaulUrl);
+
+ virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
+ Vector<uint8_t> &certificate,
+ Vector<uint8_t> &wrappedKey);
+
+ virtual status_t getSecureStops(List<Vector<uint8_t>> &secureStops);
+ virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
+
+ virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
+ virtual status_t releaseAllSecureStops();
+
+ virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
+ virtual status_t getPropertyByteArray(String8 const &name,
+ Vector<uint8_t> &value ) const;
+ virtual status_t setPropertyString(String8 const &name, String8 const &value ) const;
+ virtual status_t setPropertyByteArray(String8 const &name,
+ Vector<uint8_t> const &value ) const;
+
+ virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm);
+
+ virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm);
+
+ virtual status_t encrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output);
+
+ virtual status_t decrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output);
+
+ virtual status_t sign(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> &signature);
+
+ virtual status_t verify(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &signature,
+ bool &match);
+
+ virtual status_t signRSA(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &wrappedKey,
+ Vector<uint8_t> &signature);
+
+ virtual status_t setListener(const sp<IDrmClient>& listener);
+
+ // Methods of IDrmPluginListener
+ Return<void> sendEvent(EventType eventType,
+ const hidl_vec<uint8_t>& sessionId, const hidl_vec<uint8_t>& data);
+
+ Return<void> sendExpirationUpdate(const hidl_vec<uint8_t>& sessionId,
+ int64_t expiryTimeInMS);
+
+ Return<void> sendKeysChange(const hidl_vec<uint8_t>& sessionId,
+ const hidl_vec<KeyStatus>& keyStatusList, bool hasNewUsableKey);
+
+ virtual void binderDied(const wp<IBinder> &the_late_who);
+
+private:
+ static Mutex mLock;
+
+ sp<DrmSessionClientInterface> mDrmSessionClient;
+
+ sp<IDrmClient> mListener;
+ mutable Mutex mEventLock;
+ mutable Mutex mNotifyLock;
+
+ const Vector<sp<IDrmFactory>> mFactories;
+ sp<IDrmPlugin> mPlugin;
+
+ /**
+ * mInitCheck is:
+ * NO_INIT if a plugin hasn't been created yet
+ * ERROR_UNSUPPORTED if a plugin can't be created for the uuid
+ * OK after a plugin has been created and mPlugin is valid
+ */
+ status_t mInitCheck;
+
+ Vector<sp<IDrmFactory>> makeDrmFactories();
+ sp<IDrmPlugin> makeDrmPlugin(const sp<IDrmFactory>& factory,
+ const uint8_t uuid[16], const String8& appPackageName);
+
+ void writeByteArray(Parcel &obj, const hidl_vec<uint8_t>& array);
+
+ DISALLOW_EVIL_CONSTRUCTORS(DrmHal);
+};
+
+} // namespace android
+
+#endif // DRM_HAL_H_
diff --git a/media/libmedia/include/media/DrmPluginPath.h b/media/libmedia/include/media/DrmPluginPath.h
new file mode 100644
index 0000000..51ba26e
--- /dev/null
+++ b/media/libmedia/include/media/DrmPluginPath.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_PLUGIN_PATH_H_
+
+#define DRM_PLUGIN_PATH_H_
+
+namespace android {
+
+const char* getDrmPluginPath();
+
+} // namespace android
+
+#endif // DRM_PLUGIN_PATH_H_
diff --git a/include/media/DrmSessionClientInterface.h b/media/libmedia/include/media/DrmSessionClientInterface.h
similarity index 100%
rename from include/media/DrmSessionClientInterface.h
rename to media/libmedia/include/media/DrmSessionClientInterface.h
diff --git a/include/media/DrmSessionManager.h b/media/libmedia/include/media/DrmSessionManager.h
similarity index 100%
rename from include/media/DrmSessionManager.h
rename to media/libmedia/include/media/DrmSessionManager.h
diff --git a/include/media/ExtendedAudioBufferProvider.h b/media/libmedia/include/media/ExtendedAudioBufferProvider.h
similarity index 100%
rename from include/media/ExtendedAudioBufferProvider.h
rename to media/libmedia/include/media/ExtendedAudioBufferProvider.h
diff --git a/media/libmedia/include/media/ICrypto.h b/media/libmedia/include/media/ICrypto.h
new file mode 100644
index 0000000..6d896b8
--- /dev/null
+++ b/media/libmedia/include/media/ICrypto.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binder/IInterface.h>
+#include <cutils/native_handle.h>
+#include <media/hardware/CryptoAPI.h>
+#include <media/stagefright/foundation/ABase.h>
+
+#ifndef ANDROID_ICRYPTO_H_
+
+#define ANDROID_ICRYPTO_H_
+
+namespace android {
+
+struct AString;
+class IMemory;
+class IMemoryHeap;
+
+struct ICrypto : public IInterface {
+ DECLARE_META_INTERFACE(Crypto);
+
+ virtual status_t initCheck() const = 0;
+
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) = 0;
+
+ virtual status_t createPlugin(
+ const uint8_t uuid[16], const void *data, size_t size) = 0;
+
+ virtual status_t destroyPlugin() = 0;
+
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const = 0;
+
+ virtual void notifyResolution(uint32_t width, uint32_t height) = 0;
+
+ virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) = 0;
+
+ struct SourceBuffer {
+ sp<IMemory> mSharedMemory;
+ int32_t mHeapSeqNum;
+ };
+
+ enum DestinationType {
+ kDestinationTypeSharedMemory, // non-secure
+ kDestinationTypeNativeHandle // secure
+ };
+
+ struct DestinationBuffer {
+ DestinationType mType;
+ native_handle_t *mHandle;
+ sp<IMemory> mSharedMemory;
+ };
+
+ virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
+ CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
+ const SourceBuffer &source, size_t offset,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ const DestinationBuffer &destination, AString *errorDetailMsg) = 0;
+
+ /**
+ * Declare the heap that the shared memory source buffers passed
+ * to decrypt will be allocated from. Returns a sequence number
+ * that subsequent decrypt calls can use to refer to the heap,
+ * with -1 indicating failure.
+ */
+ virtual int32_t setHeap(const sp<IMemoryHeap>& heap) = 0;
+ virtual void unsetHeap(int32_t seqNum) = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(ICrypto);
+};
+
+struct BnCrypto : public BnInterface<ICrypto> {
+ virtual status_t onTransact(
+ uint32_t code, const Parcel &data, Parcel *reply,
+ uint32_t flags = 0);
+private:
+ void readVector(const Parcel &data, Vector<uint8_t> &vector) const;
+ void writeVector(Parcel *reply, Vector<uint8_t> const &vector) const;
+};
+
+} // namespace android
+
+#endif // ANDROID_ICRYPTO_H_
diff --git a/include/media/IDataSource.h b/media/libmedia/include/media/IDataSource.h
similarity index 100%
rename from include/media/IDataSource.h
rename to media/libmedia/include/media/IDataSource.h
diff --git a/media/libmedia/include/media/IDrm.h b/media/libmedia/include/media/IDrm.h
new file mode 100644
index 0000000..a57e372
--- /dev/null
+++ b/media/libmedia/include/media/IDrm.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binder/IInterface.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/drm/DrmAPI.h>
+#include <media/IDrmClient.h>
+
+#ifndef ANDROID_IDRM_H_
+
+#define ANDROID_IDRM_H_
+
+namespace android {
+
+struct AString;
+
+struct IDrm : public IInterface {
+ DECLARE_META_INTERFACE(Drm);
+
+ virtual status_t initCheck() const = 0;
+
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0;
+
+ virtual status_t createPlugin(const uint8_t uuid[16],
+ const String8 &appPackageName) = 0;
+
+ virtual status_t destroyPlugin() = 0;
+
+ virtual status_t openSession(Vector<uint8_t> &sessionId) = 0;
+
+ virtual status_t closeSession(Vector<uint8_t> const &sessionId) = 0;
+
+ virtual status_t
+ getKeyRequest(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &initData,
+ String8 const &mimeType, DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const &optionalParameters,
+ Vector<uint8_t> &request, String8 &defaultUrl,
+ DrmPlugin::KeyRequestType *keyRequestType) = 0;
+
+ virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &response,
+ Vector<uint8_t> &keySetId) = 0;
+
+ virtual status_t removeKeys(Vector<uint8_t> const &keySetId) = 0;
+
+ virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keySetId) = 0;
+
+ virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
+ KeyedVector<String8, String8> &infoMap) const = 0;
+
+ virtual status_t getProvisionRequest(String8 const &certType,
+ String8 const &certAuthority,
+ Vector<uint8_t> &request,
+ String8 &defaulUrl) = 0;
+
+ virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
+ Vector<uint8_t> &certificate,
+ Vector<uint8_t> &wrappedKey) = 0;
+
+ virtual status_t getSecureStops(List<Vector<uint8_t> > &secureStops) = 0;
+ virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) = 0;
+
+ virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0;
+ virtual status_t releaseAllSecureStops() = 0;
+
+ virtual status_t getPropertyString(String8 const &name, String8 &value) const = 0;
+ virtual status_t getPropertyByteArray(String8 const &name,
+ Vector<uint8_t> &value) const = 0;
+ virtual status_t setPropertyString(String8 const &name,
+ String8 const &value ) const = 0;
+ virtual status_t setPropertyByteArray(String8 const &name,
+ Vector<uint8_t> const &value) const = 0;
+
+ virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm) = 0;
+
+ virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm) = 0;
+
+ virtual status_t encrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output) = 0;
+
+ virtual status_t decrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output) = 0;
+
+ virtual status_t sign(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> &signature) = 0;
+
+ virtual status_t verify(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &signature,
+ bool &match) = 0;
+
+ virtual status_t signRSA(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &wrappedKey,
+ Vector<uint8_t> &signature) = 0;
+
+ virtual status_t setListener(const sp<IDrmClient>& listener) = 0;
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(IDrm);
+};
+
+struct BnDrm : public BnInterface<IDrm> {
+ virtual status_t onTransact(
+ uint32_t code, const Parcel &data, Parcel *reply,
+ uint32_t flags = 0);
+private:
+ void readVector(const Parcel &data, Vector<uint8_t> &vector) const;
+ void writeVector(Parcel *reply, Vector<uint8_t> const &vector) const;
+};
+
+} // namespace android
+
+#endif // ANDROID_IDRM_H_
diff --git a/include/media/IDrmClient.h b/media/libmedia/include/media/IDrmClient.h
similarity index 100%
rename from include/media/IDrmClient.h
rename to media/libmedia/include/media/IDrmClient.h
diff --git a/include/media/IHDCP.h b/media/libmedia/include/media/IHDCP.h
similarity index 100%
rename from include/media/IHDCP.h
rename to media/libmedia/include/media/IHDCP.h
diff --git a/include/media/IMediaCodecList.h b/media/libmedia/include/media/IMediaCodecList.h
similarity index 100%
rename from include/media/IMediaCodecList.h
rename to media/libmedia/include/media/IMediaCodecList.h
diff --git a/media/libmedia/include/media/IMediaCodecService.h b/media/libmedia/include/media/IMediaCodecService.h
new file mode 100644
index 0000000..da3c5a03
--- /dev/null
+++ b/media/libmedia/include/media/IMediaCodecService.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIACODECSERVICE_H
+#define ANDROID_IMEDIACODECSERVICE_H
+
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <media/IDataSource.h>
+#include <media/IOMX.h>
+
+namespace android {
+
+class IMediaCodecService: public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(MediaCodecService);
+
+ virtual sp<IOMX> getOMX() = 0;
+};
+
+class BnMediaCodecService: public BnInterface<IMediaCodecService>
+{
+public:
+ virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags = 0);
+};
+
+} // namespace android
+
+#endif // ANDROID_IMEDIACODECSERVICE_H
diff --git a/include/media/IMediaDeathNotifier.h b/media/libmedia/include/media/IMediaDeathNotifier.h
similarity index 100%
rename from include/media/IMediaDeathNotifier.h
rename to media/libmedia/include/media/IMediaDeathNotifier.h
diff --git a/include/media/IMediaDrmService.h b/media/libmedia/include/media/IMediaDrmService.h
similarity index 100%
rename from include/media/IMediaDrmService.h
rename to media/libmedia/include/media/IMediaDrmService.h
diff --git a/media/libmedia/include/media/IMediaExtractor.h b/media/libmedia/include/media/IMediaExtractor.h
new file mode 100644
index 0000000..ab40f53
--- /dev/null
+++ b/media/libmedia/include/media/IMediaExtractor.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef IMEDIA_EXTRACTOR_BASE_H_
+
+#define IMEDIA_EXTRACTOR_BASE_H_
+
+#include <media/IMediaSource.h>
+#include <media/stagefright/DataSource.h>
+
+namespace android {
+
+class MetaData;
+namespace media {
+class ICas;
+};
+using namespace media;
+
+class IMediaExtractor : public IInterface {
+public:
+ DECLARE_META_INTERFACE(MediaExtractor);
+
+ virtual size_t countTracks() = 0;
+ // This function could return NULL IMediaSource even when index is within the
+ // track count returned by countTracks, since it's possible the track is malformed
+ // and it's not detected during countTracks call.
+ virtual sp<IMediaSource> getTrack(size_t index) = 0;
+
+ enum GetTrackMetaDataFlags {
+ kIncludeExtensiveMetaData = 1
+ };
+ virtual sp<MetaData> getTrackMetaData(
+ size_t index, uint32_t flags = 0) = 0;
+
+ // Return container specific meta-data. The default implementation
+ // returns an empty metadata object.
+ virtual sp<MetaData> getMetaData() = 0;
+
+ virtual status_t getMetrics(Parcel *reply) = 0;
+
+ enum Flags {
+ CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
+ CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
+ CAN_PAUSE = 4,
+ CAN_SEEK = 8, // the "seek bar"
+ };
+
+ // If subclasses do _not_ override this, the default is
+ // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
+ virtual uint32_t flags() const = 0;
+
+ // for DRM
+ virtual char* getDrmTrackInfo(size_t trackID, int *len) = 0;
+
+ virtual status_t setMediaCas(const sp<ICas> &cas) = 0;
+
+ virtual void setUID(uid_t uid) = 0;
+
+ virtual const char * name() = 0;
+};
+
+
+class BnMediaExtractor: public BnInterface<IMediaExtractor>
+{
+public:
+ virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags = 0);
+};
+
+void registerMediaExtractor(
+ const sp<IMediaExtractor> &extractor,
+ const sp<DataSource> &source,
+ const char *mime);
+
+void registerMediaSource(
+ const sp<IMediaExtractor> &extractor,
+ const sp<IMediaSource> &source);
+
+status_t dumpExtractors(int fd, const Vector<String16>& args);
+
+
+} // namespace android
+
+#endif // IMEDIA_EXTRACTOR_BASE_H_
diff --git a/media/libmedia/include/media/IMediaExtractorService.h b/media/libmedia/include/media/IMediaExtractorService.h
new file mode 100644
index 0000000..45e9620
--- /dev/null
+++ b/media/libmedia/include/media/IMediaExtractorService.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIAEXTRACTORSERVICE_H
+#define ANDROID_IMEDIAEXTRACTORSERVICE_H
+
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <media/IDataSource.h>
+#include <media/IMediaExtractor.h>
+
+namespace android {
+
+class IMediaExtractorService: public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(MediaExtractorService);
+
+ virtual sp<IMediaExtractor> makeExtractor(const sp<IDataSource> &source, const char *mime) = 0;
+
+ virtual sp<IDataSource> makeIDataSource(int fd, int64_t offset, int64_t length) = 0;
+};
+
+class BnMediaExtractorService: public BnInterface<IMediaExtractorService>
+{
+public:
+ virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags = 0);
+};
+
+} // namespace android
+
+#endif // ANDROID_IMEDIAEXTRACTORSERVICE_H
diff --git a/include/media/IMediaHTTPConnection.h b/media/libmedia/include/media/IMediaHTTPConnection.h
similarity index 100%
rename from include/media/IMediaHTTPConnection.h
rename to media/libmedia/include/media/IMediaHTTPConnection.h
diff --git a/include/media/IMediaHTTPService.h b/media/libmedia/include/media/IMediaHTTPService.h
similarity index 100%
rename from include/media/IMediaHTTPService.h
rename to media/libmedia/include/media/IMediaHTTPService.h
diff --git a/media/libmedia/include/media/IMediaLogService.h b/media/libmedia/include/media/IMediaLogService.h
new file mode 100644
index 0000000..1df1907
--- /dev/null
+++ b/media/libmedia/include/media/IMediaLogService.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIALOGSERVICE_H
+#define ANDROID_IMEDIALOGSERVICE_H
+
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class IMediaLogService: public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(MediaLogService);
+
+ virtual void registerWriter(const sp<IMemory>& shared, size_t size, const char *name) = 0;
+ virtual void unregisterWriter(const sp<IMemory>& shared) = 0;
+
+ virtual void requestMergeWakeup() = 0;
+};
+
+class BnMediaLogService: public BnInterface<IMediaLogService>
+{
+public:
+ virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+ uint32_t flags = 0);
+};
+
+} // namespace android
+
+#endif // ANDROID_IMEDIALOGSERVICE_H
diff --git a/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
similarity index 100%
rename from include/media/IMediaMetadataRetriever.h
rename to media/libmedia/include/media/IMediaMetadataRetriever.h
diff --git a/media/libmedia/include/media/IMediaPlayer.h b/media/libmedia/include/media/IMediaPlayer.h
new file mode 100644
index 0000000..e5a98dd
--- /dev/null
+++ b/media/libmedia/include/media/IMediaPlayer.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIAPLAYER_H
+#define ANDROID_IMEDIAPLAYER_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+
+#include <media/IMediaSource.h>
+#include <media/VolumeShaper.h>
+
+// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
+// global, and not in android::
+struct sockaddr_in;
+
+namespace android {
+
+class Parcel;
+class Surface;
+class IDataSource;
+struct IStreamSource;
+class IGraphicBufferProducer;
+struct IMediaHTTPService;
+struct AudioPlaybackRate;
+struct AVSyncSettings;
+struct BufferingSettings;
+
+typedef IMediaSource::ReadOptions::SeekMode MediaPlayerSeekMode;
+
+class IMediaPlayer: public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(MediaPlayer);
+
+ virtual void disconnect() = 0;
+
+ virtual status_t setDataSource(
+ const sp<IMediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8>* headers) = 0;
+
+ virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
+ virtual status_t setDataSource(const sp<IStreamSource>& source) = 0;
+ virtual status_t setDataSource(const sp<IDataSource>& source) = 0;
+ virtual status_t setVideoSurfaceTexture(
+ const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) = 0;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
+ virtual status_t prepareAsync() = 0;
+ virtual status_t start() = 0;
+ virtual status_t stop() = 0;
+ virtual status_t pause() = 0;
+ virtual status_t isPlaying(bool* state) = 0;
+ virtual status_t setPlaybackSettings(const AudioPlaybackRate& rate) = 0;
+ virtual status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) = 0;
+ virtual status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint) = 0;
+ virtual status_t getSyncSettings(AVSyncSettings* sync /* nonnull */,
+ float* videoFps /* nonnull */) = 0;
+ virtual status_t seekTo(
+ int msec,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) = 0;
+ virtual status_t getCurrentPosition(int* msec) = 0;
+ virtual status_t getDuration(int* msec) = 0;
+ virtual status_t reset() = 0;
+ virtual status_t setAudioStreamType(audio_stream_type_t type) = 0;
+ virtual status_t setLooping(int loop) = 0;
+ virtual status_t setVolume(float leftVolume, float rightVolume) = 0;
+ virtual status_t setAuxEffectSendLevel(float level) = 0;
+ virtual status_t attachAuxEffect(int effectId) = 0;
+ virtual status_t setParameter(int key, const Parcel& request) = 0;
+ virtual status_t getParameter(int key, Parcel* reply) = 0;
+ virtual status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint) = 0;
+ virtual status_t getRetransmitEndpoint(struct sockaddr_in* endpoint) = 0;
+ virtual status_t setNextPlayer(const sp<IMediaPlayer>& next) = 0;
+
+ virtual VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) = 0;
+ virtual sp<VolumeShaper::State> getVolumeShaperState(int id) = 0;
+
+ // Modular DRM
+ virtual status_t prepareDrm(const uint8_t uuid[16],
+ const Vector<uint8_t>& drmSessionId) = 0;
+ virtual status_t releaseDrm() = 0;
+
+ // Invoke a generic method on the player by using opaque parcels
+ // for the request and reply.
+ // @param request Parcel that must start with the media player
+ // interface token.
+ // @param[out] reply Parcel to hold the reply data. Cannot be null.
+ // @return OK if the invocation was made successfully.
+ virtual status_t invoke(const Parcel& request, Parcel *reply) = 0;
+
+ // Set a new metadata filter.
+ // @param filter A set of allow and drop rules serialized in a Parcel.
+ // @return OK if the invocation was made successfully.
+ virtual status_t setMetadataFilter(const Parcel& filter) = 0;
+
+ // Retrieve a set of metadata.
+ // @param update_only Include only the metadata that have changed
+ // since the last invocation of getMetadata.
+ // The set is built using the unfiltered
+ // notifications the native player sent to the
+ // MediaPlayerService during that period of
+ // time. If false, all the metadatas are considered.
+ // @param apply_filter If true, once the metadata set has been built based
+ // on the value update_only, the current filter is
+ // applied.
+ // @param[out] metadata On exit contains a set (possibly empty) of metadata.
+ // Valid only if the call returned OK.
+ // @return OK if the invocation was made successfully.
+ virtual status_t getMetadata(bool update_only,
+ bool apply_filter,
+ Parcel *metadata) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnMediaPlayer: public BnInterface<IMediaPlayer>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IMEDIAPLAYER_H
diff --git a/include/media/IMediaPlayerClient.h b/media/libmedia/include/media/IMediaPlayerClient.h
similarity index 100%
rename from include/media/IMediaPlayerClient.h
rename to media/libmedia/include/media/IMediaPlayerClient.h
diff --git a/include/media/IMediaPlayerService.h b/media/libmedia/include/media/IMediaPlayerService.h
similarity index 100%
rename from include/media/IMediaPlayerService.h
rename to media/libmedia/include/media/IMediaPlayerService.h
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
new file mode 100644
index 0000000..9d0341a
--- /dev/null
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -0,0 +1,82 @@
+/*
+ **
+ ** Copyright 2008, The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIARECORDER_H
+#define ANDROID_IMEDIARECORDER_H
+
+#include <binder/IInterface.h>
+
+namespace android {
+
+class Surface;
+namespace hardware {
+class ICamera;
+}
+class ICameraRecordingProxy;
+class IMediaRecorderClient;
+class IGraphicBufferProducer;
+struct PersistentSurface;
+
+class IMediaRecorder: public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(MediaRecorder);
+
+ virtual status_t setCamera(const sp<hardware::ICamera>& camera,
+ const sp<ICameraRecordingProxy>& proxy) = 0;
+ virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
+ virtual status_t setVideoSource(int vs) = 0;
+ virtual status_t setAudioSource(int as) = 0;
+ virtual status_t setOutputFormat(int of) = 0;
+ virtual status_t setVideoEncoder(int ve) = 0;
+ virtual status_t setAudioEncoder(int ae) = 0;
+ virtual status_t setOutputFile(int fd) = 0;
+ virtual status_t setNextOutputFile(int fd) = 0;
+ virtual status_t setVideoSize(int width, int height) = 0;
+ virtual status_t setVideoFrameRate(int frames_per_second) = 0;
+ virtual status_t setParameters(const String8& params) = 0;
+ virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0;
+ virtual status_t setClientName(const String16& clientName) = 0;
+ virtual status_t prepare() = 0;
+ virtual status_t getMaxAmplitude(int* max) = 0;
+ virtual status_t getMetrics(Parcel *reply) = 0;
+ virtual status_t start() = 0;
+ virtual status_t stop() = 0;
+ virtual status_t reset() = 0;
+ virtual status_t pause() = 0;
+ virtual status_t resume() = 0;
+ virtual status_t init() = 0;
+ virtual status_t close() = 0;
+ virtual status_t release() = 0;
+ virtual status_t setInputSurface(const sp<PersistentSurface>& surface) = 0;
+ virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnMediaRecorder: public BnInterface<IMediaRecorder>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IMEDIARECORDER_H
diff --git a/include/media/IMediaRecorderClient.h b/media/libmedia/include/media/IMediaRecorderClient.h
similarity index 100%
rename from include/media/IMediaRecorderClient.h
rename to media/libmedia/include/media/IMediaRecorderClient.h
diff --git a/include/media/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
similarity index 100%
rename from include/media/IMediaSource.h
rename to media/libmedia/include/media/IMediaSource.h
diff --git a/media/libmedia/include/media/IOMX.h b/media/libmedia/include/media/IOMX.h
new file mode 100644
index 0000000..9a0ada1
--- /dev/null
+++ b/media/libmedia/include/media/IOMX.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IOMX_H_
+
+#define ANDROID_IOMX_H_
+
+#include <binder/IInterface.h>
+#include <utils/List.h>
+#include <utils/String8.h>
+#include <cutils/native_handle.h>
+
+#include <list>
+
+#include <hidl/HybridInterface.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+
+#include <OMX_Core.h>
+#include <OMX_Video.h>
+
+namespace android {
+
+class IGraphicBufferProducer;
+class IGraphicBufferSource;
+class IMemory;
+class IOMXBufferSource;
+class IOMXNode;
+class IOMXObserver;
+class NativeHandle;
+class OMXBuffer;
+struct omx_message;
+
+using hardware::media::omx::V1_0::IOmxNode;
+
+class IOMX : public IInterface {
+public:
+ DECLARE_META_INTERFACE(OMX);
+
+ typedef uint32_t buffer_id;
+
+ enum {
+ kFenceTimeoutMs = 1000
+ };
+
+ enum PortMode {
+ kPortModePresetStart = 0,
+ kPortModePresetByteBuffer,
+ kPortModePresetANWBuffer,
+ kPortModePresetSecureBuffer,
+ kPortModePresetEnd,
+
+ kPortModeDynamicStart = 100,
+ kPortModeDynamicANWBuffer, // uses metadata mode kMetadataBufferTypeANWBuffer
+ // or kMetadataBufferTypeGrallocSource
+ kPortModeDynamicNativeHandle, // uses metadata mode kMetadataBufferTypeNativeHandleSource
+ kPortModeDynamicEnd,
+ };
+
+ struct ComponentInfo {
+ String8 mName;
+ List<String8> mRoles;
+ };
+ virtual status_t listNodes(List<ComponentInfo> *list) = 0;
+
+ virtual status_t allocateNode(
+ const char *name, const sp<IOMXObserver> &observer,
+ sp<IOMXNode> *omxNode) = 0;
+
+ virtual status_t createInputSurface(
+ sp<IGraphicBufferProducer> *bufferProducer,
+ sp<IGraphicBufferSource> *bufferSource) = 0;
+};
+
+class IOMXNode : public IInterface {
+public:
+ DECLARE_HYBRID_META_INTERFACE(OMXNode, IOmxNode);
+
+ typedef IOMX::buffer_id buffer_id;
+
+ virtual status_t freeNode() = 0;
+
+ virtual status_t sendCommand(
+ OMX_COMMANDTYPE cmd, OMX_S32 param) = 0;
+
+ virtual status_t getParameter(
+ OMX_INDEXTYPE index, void *params, size_t size) = 0;
+
+ virtual status_t setParameter(
+ OMX_INDEXTYPE index, const void *params, size_t size) = 0;
+
+ virtual status_t getConfig(
+ OMX_INDEXTYPE index, void *params, size_t size) = 0;
+
+ virtual status_t setConfig(
+ OMX_INDEXTYPE index, const void *params, size_t size) = 0;
+
+ virtual status_t setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) = 0;
+
+ virtual status_t prepareForAdaptivePlayback(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) = 0;
+
+ virtual status_t configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) = 0;
+
+ virtual status_t getGraphicBufferUsage(
+ OMX_U32 port_index, OMX_U32* usage) = 0;
+
+ virtual status_t setInputSurface(
+ const sp<IOMXBufferSource> &bufferSource) = 0;
+
+ // Allocate an opaque buffer as a native handle. If component supports returning native
+ // handles, those are returned in *native_handle. Otherwise, the allocated buffer is
+ // returned in *buffer_data. This clearly only makes sense if the caller lives in the
+ // same process as the callee, i.e. is the media_server, as the returned "buffer_data"
+ // pointer is just that, a pointer into local address space.
+ virtual status_t allocateSecureBuffer(
+ OMX_U32 port_index, size_t size, buffer_id *buffer,
+ void **buffer_data, sp<NativeHandle> *native_handle) = 0;
+
+ // Instructs the component to use the buffer passed in via |omxBuf| on the
+ // specified port. Returns in |*buffer| the buffer id that the component
+ // assigns to this buffer. |omxBuf| must be one of:
+ // 1) OMXBuffer::sPreset for meta-mode,
+ // 2) type kBufferTypeANWBuffer for non-meta-graphic buffer mode,
+ // 3) type kBufferTypeSharedMem for bytebuffer mode.
+ virtual status_t useBuffer(
+ OMX_U32 port_index, const OMXBuffer &omxBuf, buffer_id *buffer) = 0;
+
+ // Frees the buffer on the specified port with buffer id |buffer|.
+ virtual status_t freeBuffer(
+ OMX_U32 port_index, buffer_id buffer) = 0;
+
+ // Calls OMX_FillBuffer on buffer. Passes |fenceFd| to component if it
+ // supports fences. Otherwise, it waits on |fenceFd| before calling
+ // OMX_FillBuffer. Takes ownership of |fenceFd| even if this call fails.
+ // If the port is in metadata mode, the buffer will be updated to point
+ // to the new buffer passed in via |omxBuf| before OMX_FillBuffer is called.
+ // Otherwise info in the |omxBuf| is not used.
+ virtual status_t fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd = -1) = 0;
+
+ // Calls OMX_EmptyBuffer on buffer. Passes |fenceFd| to component if it
+ // supports fences. Otherwise, it waits on |fenceFd| before calling
+ // OMX_EmptyBuffer. Takes ownership of |fenceFd| even if this call fails.
+ // If the port is in metadata mode, the buffer will be updated to point
+ // to the new buffer passed in via |omxBuf| before OMX_EmptyBuffer is called.
+ virtual status_t emptyBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) = 0;
+
+ virtual status_t getExtensionIndex(
+ const char *parameter_name,
+ OMX_INDEXTYPE *index) = 0;
+
+ virtual status_t dispatchMessage(const omx_message &msg) = 0;
+};
+
+struct omx_message {
+ enum {
+ EVENT,
+ EMPTY_BUFFER_DONE,
+ FILL_BUFFER_DONE,
+ FRAME_RENDERED,
+ } type;
+
+ int fenceFd; // used for EMPTY_BUFFER_DONE and FILL_BUFFER_DONE; client must close this
+
+ union {
+ // if type == EVENT
+ struct {
+ OMX_EVENTTYPE event;
+ OMX_U32 data1;
+ OMX_U32 data2;
+ OMX_U32 data3;
+ OMX_U32 data4;
+ } event_data;
+
+ // if type == EMPTY_BUFFER_DONE
+ struct {
+ IOMX::buffer_id buffer;
+ } buffer_data;
+
+ // if type == FILL_BUFFER_DONE
+ struct {
+ IOMX::buffer_id buffer;
+ OMX_U32 range_offset;
+ OMX_U32 range_length;
+ OMX_U32 flags;
+ OMX_TICKS timestamp;
+ } extended_buffer_data;
+
+ // if type == FRAME_RENDERED
+ struct {
+ OMX_TICKS timestamp;
+ OMX_S64 nanoTime;
+ } render_data;
+ } u;
+};
+
+class IOMXObserver : public IInterface {
+public:
+ DECLARE_META_INTERFACE(OMXObserver);
+
+ // Handle (list of) messages.
+ virtual void onMessages(const std::list<omx_message> &messages) = 0;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+class BnOMX : public BnInterface<IOMX> {
+public:
+ virtual status_t onTransact(
+ uint32_t code, const Parcel &data, Parcel *reply,
+ uint32_t flags = 0);
+};
+
+class BnOMXNode : public BnInterface<IOMXNode> {
+public:
+ virtual status_t onTransact(
+ uint32_t code, const Parcel &data, Parcel *reply,
+ uint32_t flags = 0);
+
+protected:
+ // check if the codec is secure.
+ virtual bool isSecure() const {
+ return false;
+ }
+};
+
+class BnOMXObserver : public BnInterface<IOMXObserver> {
+public:
+ virtual status_t onTransact(
+ uint32_t code, const Parcel &data, Parcel *reply,
+ uint32_t flags = 0);
+};
+
+} // namespace android
+
+#endif // ANDROID_IOMX_H_
diff --git a/include/media/IRemoteDisplay.h b/media/libmedia/include/media/IRemoteDisplay.h
similarity index 100%
rename from include/media/IRemoteDisplay.h
rename to media/libmedia/include/media/IRemoteDisplay.h
diff --git a/include/media/IRemoteDisplayClient.h b/media/libmedia/include/media/IRemoteDisplayClient.h
similarity index 100%
rename from include/media/IRemoteDisplayClient.h
rename to media/libmedia/include/media/IRemoteDisplayClient.h
diff --git a/include/media/IResourceManagerClient.h b/media/libmedia/include/media/IResourceManagerClient.h
similarity index 100%
rename from include/media/IResourceManagerClient.h
rename to media/libmedia/include/media/IResourceManagerClient.h
diff --git a/include/media/IResourceManagerService.h b/media/libmedia/include/media/IResourceManagerService.h
similarity index 100%
rename from include/media/IResourceManagerService.h
rename to media/libmedia/include/media/IResourceManagerService.h
diff --git a/include/media/IStreamSource.h b/media/libmedia/include/media/IStreamSource.h
similarity index 100%
rename from include/media/IStreamSource.h
rename to media/libmedia/include/media/IStreamSource.h
diff --git a/include/media/JetPlayer.h b/media/libmedia/include/media/JetPlayer.h
similarity index 100%
rename from include/media/JetPlayer.h
rename to media/libmedia/include/media/JetPlayer.h
diff --git a/services/audioflinger/LinearMap.h b/media/libmedia/include/media/LinearMap.h
similarity index 100%
rename from services/audioflinger/LinearMap.h
rename to media/libmedia/include/media/LinearMap.h
diff --git a/media/libmedia/include/media/MediaCodecBuffer.h b/media/libmedia/include/media/MediaCodecBuffer.h
new file mode 100644
index 0000000..501c00b
--- /dev/null
+++ b/media/libmedia/include/media/MediaCodecBuffer.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODEC_BUFFER_H_
+
+#define MEDIA_CODEC_BUFFER_H_
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/StrongPointer.h>
+
+namespace android {
+
+struct ABuffer;
+struct AMessage;
+class MediaBufferBase;
+
+/**
+ * Buffers used by MediaCodec.
+ */
+class MediaCodecBuffer : public RefBase {
+public:
+ MediaCodecBuffer(const sp<AMessage> &format, const sp<ABuffer> &buffer);
+
+ /**
+ * MediaCodec will release all references to the buffer when it's done using
+ * it, so the destructor should return the buffer to the owner, such as OMX
+ * components, buffer allocators, surfaces, etc.
+ */
+ virtual ~MediaCodecBuffer() = default;
+
+ // ABuffer-like interface
+ uint8_t *base();
+ uint8_t *data();
+ size_t capacity() const;
+ size_t size() const;
+ size_t offset() const;
+ // Default implementation calls ABuffer::setRange() and returns OK.
+ virtual status_t setRange(size_t offset, size_t size);
+ // TODO: These can be removed if we finish replacing all MediaBuffer's.
+ MediaBufferBase *getMediaBufferBase();
+ void setMediaBufferBase(MediaBufferBase *mediaBuffer);
+
+ // TODO: Specify each field for meta/format.
+ sp<AMessage> meta();
+ sp<AMessage> format();
+
+ void setFormat(const sp<AMessage> &format);
+
+private:
+ MediaCodecBuffer() = delete;
+
+ const sp<AMessage> mMeta;
+ sp<AMessage> mFormat;
+ const sp<ABuffer> mBuffer;
+ MediaBufferBase *mMediaBufferBase;
+};
+
+} // namespace android
+
+#endif // MEDIA_CODEC_BUFFER_H_
diff --git a/include/media/MediaCodecInfo.h b/media/libmedia/include/media/MediaCodecInfo.h
similarity index 100%
rename from include/media/MediaCodecInfo.h
rename to media/libmedia/include/media/MediaCodecInfo.h
diff --git a/media/libmedia/include/media/MediaDefs.h b/media/libmedia/include/media/MediaDefs.h
new file mode 100644
index 0000000..7f17013
--- /dev/null
+++ b/media/libmedia/include/media/MediaDefs.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_DEFS_H_
+
+#define MEDIA_DEFS_H_
+
+namespace android {
+
+extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+
+extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
+extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
+extern const char *MEDIA_MIMETYPE_VIDEO_H263;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
+extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
+extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
+extern const char *MEDIA_MIMETYPE_VIDEO_SCRAMBLED;
+
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG; // layer III
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
+extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
+extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
+extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
+
+extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
+extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
+extern const char *MEDIA_MIMETYPE_TEXT_VTT;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
+extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+
+// These are values exported to JAVA API that need to be in sync with
+// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
+// they are not defined in frameworks/av, so defining them here.
+enum AudioEncoding {
+ kAudioEncodingPcm16bit = 2,
+ kAudioEncodingPcm8bit = 3,
+ kAudioEncodingPcmFloat = 4,
+};
+
+} // namespace android
+
+#endif // MEDIA_DEFS_H_
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
similarity index 100%
rename from include/media/MediaMetadataRetrieverInterface.h
rename to media/libmedia/include/media/MediaMetadataRetrieverInterface.h
diff --git a/media/libmedia/include/media/MediaProfiles.h b/media/libmedia/include/media/MediaProfiles.h
new file mode 100644
index 0000000..6975581
--- /dev/null
+++ b/media/libmedia/include/media/MediaProfiles.h
@@ -0,0 +1,481 @@
+/*
+ **
+ ** Copyright 2010, The Android Open Source Project.
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ ** limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIAPROFILES_H
+#define ANDROID_MEDIAPROFILES_H
+
+#include <utils/threads.h>
+#include <media/mediarecorder.h>
+
+namespace android {
+
+enum camcorder_quality {
+ CAMCORDER_QUALITY_LIST_START = 0,
+ CAMCORDER_QUALITY_LOW = 0,
+ CAMCORDER_QUALITY_HIGH = 1,
+ CAMCORDER_QUALITY_QCIF = 2,
+ CAMCORDER_QUALITY_CIF = 3,
+ CAMCORDER_QUALITY_480P = 4,
+ CAMCORDER_QUALITY_720P = 5,
+ CAMCORDER_QUALITY_1080P = 6,
+ CAMCORDER_QUALITY_QVGA = 7,
+ CAMCORDER_QUALITY_2160P = 8,
+ CAMCORDER_QUALITY_LIST_END = 8,
+
+ CAMCORDER_QUALITY_TIME_LAPSE_LIST_START = 1000,
+ CAMCORDER_QUALITY_TIME_LAPSE_LOW = 1000,
+ CAMCORDER_QUALITY_TIME_LAPSE_HIGH = 1001,
+ CAMCORDER_QUALITY_TIME_LAPSE_QCIF = 1002,
+ CAMCORDER_QUALITY_TIME_LAPSE_CIF = 1003,
+ CAMCORDER_QUALITY_TIME_LAPSE_480P = 1004,
+ CAMCORDER_QUALITY_TIME_LAPSE_720P = 1005,
+ CAMCORDER_QUALITY_TIME_LAPSE_1080P = 1006,
+ CAMCORDER_QUALITY_TIME_LAPSE_QVGA = 1007,
+ CAMCORDER_QUALITY_TIME_LAPSE_2160P = 1008,
+ CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1008,
+
+ CAMCORDER_QUALITY_HIGH_SPEED_LIST_START = 2000,
+ CAMCORDER_QUALITY_HIGH_SPEED_LOW = 2000,
+ CAMCORDER_QUALITY_HIGH_SPEED_HIGH = 2001,
+ CAMCORDER_QUALITY_HIGH_SPEED_480P = 2002,
+ CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
+ CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
+ CAMCORDER_QUALITY_HIGH_SPEED_2160P = 2005,
+ CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
+};
+
+enum video_decoder {
+ VIDEO_DECODER_WMV,
+};
+
+enum audio_decoder {
+ AUDIO_DECODER_WMA,
+};
+
+
+class MediaProfiles
+{
+public:
+
+ /*
+ * If property media.settings.xml is not set:
+ *
+ * getInstance() will search through paths listed in xmlFiles.
+ * The search goes through members of xmlFiles in the order that they are
+ * defined, so files at lower indices have higher priority than those at
+ * higher indices.
+ *
+ * TODO: Add runtime validation of xml files. A search should be considered
+ * successful only when validation is successful.
+ */
+ static constexpr char const * const xmlFiles[] = {
+ "vendor/etc/media_profiles_V1_0.xml",
+ "system/etc/media_profiles.xml"
+ };
+
+ /**
+ * Returns the singleton instance for subsequence queries or NULL if error.
+ *
+ * If property media.settings.xml is set, getInstance() will attempt to read
+ * from file path in media.settings.xml. Otherwise, getInstance() will
+ * search through the list xmlFiles as described above.
+ *
+ * If the search is unsuccessful, the default instance will be created
+ * instead.
+ *
+ * TODO: After validation is added, getInstance() should handle validation
+ * failure properly.
+ */
+ static MediaProfiles* getInstance();
+
+ /**
+ * Returns the value for the given param name for the given camera at
+ * the given quality level, or -1 if error.
+ *
+ * Supported param name are:
+ * duration - the recording duration.
+ * file.format - output file format. see mediarecorder.h for details
+ * vid.codec - video encoder. see mediarecorder.h for details.
+ * aud.codec - audio encoder. see mediarecorder.h for details.
+ * vid.width - video frame width
+ * vid.height - video frame height
+ * vid.fps - video frame rate
+ * vid.bps - video bit rate
+ * aud.bps - audio bit rate
+ * aud.hz - audio sample rate
+ * aud.ch - number of audio channels
+ */
+ int getCamcorderProfileParamByName(const char *name, int cameraId,
+ camcorder_quality quality) const;
+
+ /**
+ * Returns true if a profile for the given camera at the given quality exists,
+ * or false if not.
+ */
+ bool hasCamcorderProfile(int cameraId, camcorder_quality quality) const;
+
+ /**
+ * Returns the output file formats supported.
+ */
+ Vector<output_format> getOutputFileFormats() const;
+
+ /**
+ * Returns the video encoders supported.
+ */
+ Vector<video_encoder> getVideoEncoders() const;
+
+ /**
+ * Returns the value for the given param name for the given video encoder
+ * returned from getVideoEncoderByIndex or -1 if error.
+ *
+ * Supported param name are:
+ * enc.vid.width.min - min video frame width
+ * enc.vid.width.max - max video frame width
+ * enc.vid.height.min - min video frame height
+ * enc.vid.height.max - max video frame height
+ * enc.vid.bps.min - min bit rate in bits per second
+ * enc.vid.bps.max - max bit rate in bits per second
+ * enc.vid.fps.min - min frame rate in frames per second
+ * enc.vid.fps.max - max frame rate in frames per second
+ */
+ int getVideoEncoderParamByName(const char *name, video_encoder codec) const;
+
+ /**
+ * Returns the audio encoders supported.
+ */
+ Vector<audio_encoder> getAudioEncoders() const;
+
+ /**
+ * Returns the value for the given param name for the given audio encoder
+ * returned from getAudioEncoderByIndex or -1 if error.
+ *
+ * Supported param name are:
+ * enc.aud.ch.min - min number of channels
+ * enc.aud.ch.max - max number of channels
+ * enc.aud.bps.min - min bit rate in bits per second
+ * enc.aud.bps.max - max bit rate in bits per second
+ * enc.aud.hz.min - min sample rate in samples per second
+ * enc.aud.hz.max - max sample rate in samples per second
+ */
+ int getAudioEncoderParamByName(const char *name, audio_encoder codec) const;
+
+ /**
+ * Returns the video decoders supported.
+ */
+ Vector<video_decoder> getVideoDecoders() const;
+
+ /**
+ * Returns the audio decoders supported.
+ */
+ Vector<audio_decoder> getAudioDecoders() const;
+
+ /**
+ * Returns the number of image encoding quality levels supported.
+ */
+ Vector<int> getImageEncodingQualityLevels(int cameraId) const;
+
+ /**
+ * Returns the start time offset (in ms) for the given camera Id.
+ * If the given camera Id does not exist, -1 will be returned.
+ */
+ int getStartTimeOffsetMs(int cameraId) const;
+
+private:
+ enum {
+ // Camcorder profiles (high/low) and timelapse profiles (high/low)
+ kNumRequiredProfiles = 4,
+ };
+
+ MediaProfiles& operator=(const MediaProfiles&); // Don't call me
+ MediaProfiles(const MediaProfiles&); // Don't call me
+ MediaProfiles() {} // Dummy default constructor
+ ~MediaProfiles(); // Don't delete me
+
+ struct VideoCodec {
+ VideoCodec(video_encoder codec, int bitRate, int frameWidth, int frameHeight, int frameRate)
+ : mCodec(codec),
+ mBitRate(bitRate),
+ mFrameWidth(frameWidth),
+ mFrameHeight(frameHeight),
+ mFrameRate(frameRate) {}
+
+ VideoCodec(const VideoCodec& copy) {
+ mCodec = copy.mCodec;
+ mBitRate = copy.mBitRate;
+ mFrameWidth = copy.mFrameWidth;
+ mFrameHeight = copy.mFrameHeight;
+ mFrameRate = copy.mFrameRate;
+ }
+
+ ~VideoCodec() {}
+
+ video_encoder mCodec;
+ int mBitRate;
+ int mFrameWidth;
+ int mFrameHeight;
+ int mFrameRate;
+ };
+
+ struct AudioCodec {
+ AudioCodec(audio_encoder codec, int bitRate, int sampleRate, int channels)
+ : mCodec(codec),
+ mBitRate(bitRate),
+ mSampleRate(sampleRate),
+ mChannels(channels) {}
+
+ AudioCodec(const AudioCodec& copy) {
+ mCodec = copy.mCodec;
+ mBitRate = copy.mBitRate;
+ mSampleRate = copy.mSampleRate;
+ mChannels = copy.mChannels;
+ }
+
+ ~AudioCodec() {}
+
+ audio_encoder mCodec;
+ int mBitRate;
+ int mSampleRate;
+ int mChannels;
+ };
+
+ struct CamcorderProfile {
+ CamcorderProfile()
+ : mCameraId(0),
+ mFileFormat(OUTPUT_FORMAT_THREE_GPP),
+ mQuality(CAMCORDER_QUALITY_HIGH),
+ mDuration(0),
+ mVideoCodec(0),
+ mAudioCodec(0) {}
+
+ CamcorderProfile(const CamcorderProfile& copy) {
+ mCameraId = copy.mCameraId;
+ mFileFormat = copy.mFileFormat;
+ mQuality = copy.mQuality;
+ mDuration = copy.mDuration;
+ mVideoCodec = new VideoCodec(*copy.mVideoCodec);
+ mAudioCodec = new AudioCodec(*copy.mAudioCodec);
+ }
+
+ ~CamcorderProfile() {
+ delete mVideoCodec;
+ delete mAudioCodec;
+ }
+
+ int mCameraId;
+ output_format mFileFormat;
+ camcorder_quality mQuality;
+ int mDuration;
+ VideoCodec *mVideoCodec;
+ AudioCodec *mAudioCodec;
+ };
+
+ struct VideoEncoderCap {
+ // Ugly constructor
+ VideoEncoderCap(video_encoder codec,
+ int minBitRate, int maxBitRate,
+ int minFrameWidth, int maxFrameWidth,
+ int minFrameHeight, int maxFrameHeight,
+ int minFrameRate, int maxFrameRate)
+ : mCodec(codec),
+ mMinBitRate(minBitRate), mMaxBitRate(maxBitRate),
+ mMinFrameWidth(minFrameWidth), mMaxFrameWidth(maxFrameWidth),
+ mMinFrameHeight(minFrameHeight), mMaxFrameHeight(maxFrameHeight),
+ mMinFrameRate(minFrameRate), mMaxFrameRate(maxFrameRate) {}
+
+ ~VideoEncoderCap() {}
+
+ video_encoder mCodec;
+ int mMinBitRate, mMaxBitRate;
+ int mMinFrameWidth, mMaxFrameWidth;
+ int mMinFrameHeight, mMaxFrameHeight;
+ int mMinFrameRate, mMaxFrameRate;
+ };
+
+ struct AudioEncoderCap {
+ // Ugly constructor
+ AudioEncoderCap(audio_encoder codec,
+ int minBitRate, int maxBitRate,
+ int minSampleRate, int maxSampleRate,
+ int minChannels, int maxChannels)
+ : mCodec(codec),
+ mMinBitRate(minBitRate), mMaxBitRate(maxBitRate),
+ mMinSampleRate(minSampleRate), mMaxSampleRate(maxSampleRate),
+ mMinChannels(minChannels), mMaxChannels(maxChannels) {}
+
+ ~AudioEncoderCap() {}
+
+ audio_encoder mCodec;
+ int mMinBitRate, mMaxBitRate;
+ int mMinSampleRate, mMaxSampleRate;
+ int mMinChannels, mMaxChannels;
+ };
+
+ struct VideoDecoderCap {
+ VideoDecoderCap(video_decoder codec): mCodec(codec) {}
+ ~VideoDecoderCap() {}
+
+ video_decoder mCodec;
+ };
+
+ struct AudioDecoderCap {
+ AudioDecoderCap(audio_decoder codec): mCodec(codec) {}
+ ~AudioDecoderCap() {}
+
+ audio_decoder mCodec;
+ };
+
+ struct NameToTagMap {
+ const char* name;
+ int tag;
+ };
+
+ struct ImageEncodingQualityLevels {
+ int mCameraId;
+ Vector<int> mLevels;
+ };
+
+ int getCamcorderProfileIndex(int cameraId, camcorder_quality quality) const;
+ void initRequiredProfileRefs(const Vector<int>& cameraIds);
+ int getRequiredProfileRefIndex(int cameraId);
+
+ // Debug
+ static void logVideoCodec(const VideoCodec& codec);
+ static void logAudioCodec(const AudioCodec& codec);
+ static void logVideoEncoderCap(const VideoEncoderCap& cap);
+ static void logAudioEncoderCap(const AudioEncoderCap& cap);
+ static void logVideoDecoderCap(const VideoDecoderCap& cap);
+ static void logAudioDecoderCap(const AudioDecoderCap& cap);
+
+ // Returns true if xmlFile exists.
+ // TODO: Add runtime validation.
+ static bool checkXmlFile(const char* xmlFile);
+
+ // If the xml configuration file does exist, use the settings
+ // from the xml
+ static MediaProfiles* createInstanceFromXmlFile(const char *xml);
+ static output_format createEncoderOutputFileFormat(const char **atts);
+ static VideoCodec* createVideoCodec(const char **atts, MediaProfiles *profiles);
+ static AudioCodec* createAudioCodec(const char **atts, MediaProfiles *profiles);
+ static AudioDecoderCap* createAudioDecoderCap(const char **atts);
+ static VideoDecoderCap* createVideoDecoderCap(const char **atts);
+ static VideoEncoderCap* createVideoEncoderCap(const char **atts);
+ static AudioEncoderCap* createAudioEncoderCap(const char **atts);
+
+ static CamcorderProfile* createCamcorderProfile(
+ int cameraId, const char **atts, Vector<int>& cameraIds);
+
+ static int getCameraId(const char **atts);
+
+ void addStartTimeOffset(int cameraId, const char **atts);
+
+ ImageEncodingQualityLevels* findImageEncodingQualityLevels(int cameraId) const;
+ void addImageEncodingQualityLevel(int cameraId, const char** atts);
+
+ // Customized element tag handler for parsing the xml configuration file.
+ static void startElementHandler(void *userData, const char *name, const char **atts);
+
+ // If the xml configuration file does not exist, use hard-coded values
+ static MediaProfiles* createDefaultInstance();
+
+ static CamcorderProfile *createDefaultCamcorderQcifProfile(camcorder_quality quality);
+ static CamcorderProfile *createDefaultCamcorderCifProfile(camcorder_quality quality);
+ static void createDefaultCamcorderLowProfiles(
+ MediaProfiles::CamcorderProfile **lowProfile,
+ MediaProfiles::CamcorderProfile **lowSpecificProfile);
+ static void createDefaultCamcorderHighProfiles(
+ MediaProfiles::CamcorderProfile **highProfile,
+ MediaProfiles::CamcorderProfile **highSpecificProfile);
+
+ static CamcorderProfile *createDefaultCamcorderTimeLapseQcifProfile(camcorder_quality quality);
+ static CamcorderProfile *createDefaultCamcorderTimeLapse480pProfile(camcorder_quality quality);
+ static void createDefaultCamcorderTimeLapseLowProfiles(
+ MediaProfiles::CamcorderProfile **lowTimeLapseProfile,
+ MediaProfiles::CamcorderProfile **lowSpecificTimeLapseProfile);
+ static void createDefaultCamcorderTimeLapseHighProfiles(
+ MediaProfiles::CamcorderProfile **highTimeLapseProfile,
+ MediaProfiles::CamcorderProfile **highSpecificTimeLapseProfile);
+
+ static void createDefaultCamcorderProfiles(MediaProfiles *profiles);
+ static void createDefaultVideoEncoders(MediaProfiles *profiles);
+ static void createDefaultAudioEncoders(MediaProfiles *profiles);
+ static void createDefaultVideoDecoders(MediaProfiles *profiles);
+ static void createDefaultAudioDecoders(MediaProfiles *profiles);
+ static void createDefaultEncoderOutputFileFormats(MediaProfiles *profiles);
+ static void createDefaultImageEncodingQualityLevels(MediaProfiles *profiles);
+ static void createDefaultImageDecodingMaxMemory(MediaProfiles *profiles);
+
+ static VideoEncoderCap* createDefaultH263VideoEncoderCap();
+ static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
+ static AudioEncoderCap* createDefaultAmrNBEncoderCap();
+
+ static int findTagForName(const NameToTagMap *map, size_t nMappings, const char *name);
+
+ /**
+ * Check on existing profiles with the following criteria:
+ * 1. Low quality profile must have the lowest video
+ * resolution product (width x height)
+ * 2. High quality profile must have the highest video
+ * resolution product (width x height)
+ *
+ * and add required low/high quality camcorder/timelapse
+ * profiles if they are not found. This allows to remove
+ * duplicate profile definitions in the media_profiles.xml
+ * file.
+ */
+ void checkAndAddRequiredProfilesIfNecessary();
+
+
+ // Mappings from name (for instance, codec name) to enum value
+ static const NameToTagMap sVideoEncoderNameMap[];
+ static const NameToTagMap sAudioEncoderNameMap[];
+ static const NameToTagMap sFileFormatMap[];
+ static const NameToTagMap sVideoDecoderNameMap[];
+ static const NameToTagMap sAudioDecoderNameMap[];
+ static const NameToTagMap sCamcorderQualityNameMap[];
+
+ static bool sIsInitialized;
+ static MediaProfiles *sInstance;
+ static Mutex sLock;
+ int mCurrentCameraId;
+
+ Vector<CamcorderProfile*> mCamcorderProfiles;
+ Vector<AudioEncoderCap*> mAudioEncoders;
+ Vector<VideoEncoderCap*> mVideoEncoders;
+ Vector<AudioDecoderCap*> mAudioDecoders;
+ Vector<VideoDecoderCap*> mVideoDecoders;
+ Vector<output_format> mEncoderOutputFileFormats;
+ Vector<ImageEncodingQualityLevels *> mImageEncodingQualityLevels;
+ KeyedVector<int, int> mStartTimeOffsets;
+
+ typedef struct {
+ bool mHasRefProfile; // Refers to an existing profile
+ int mRefProfileIndex; // Reference profile index
+ int mResolutionProduct; // width x height
+ } RequiredProfileRefInfo; // Required low and high profiles
+
+ typedef struct {
+ RequiredProfileRefInfo mRefs[kNumRequiredProfiles];
+ int mCameraId;
+ } RequiredProfiles;
+
+ RequiredProfiles *mRequiredProfileRefs;
+ Vector<int> mCameraIds;
+};
+
+}; // namespace android
+
+#endif // ANDROID_MEDIAPROFILES_H
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
new file mode 100644
index 0000000..40dd9f9
--- /dev/null
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_RECORDER_BASE_H_
+
+#define MEDIA_RECORDER_BASE_H_
+
+#include <media/mediarecorder.h>
+
+#include <system/audio.h>
+
+namespace android {
+
+class ICameraRecordingProxy;
+class IGraphicBufferProducer;
+struct PersistentSurface;
+
+struct MediaRecorderBase {
+ MediaRecorderBase(const String16 &opPackageName)
+ : mOpPackageName(opPackageName) {}
+ virtual ~MediaRecorderBase() {}
+
+ virtual status_t init() = 0;
+ virtual status_t setAudioSource(audio_source_t as) = 0;
+ virtual status_t setVideoSource(video_source vs) = 0;
+ virtual status_t setOutputFormat(output_format of) = 0;
+ virtual status_t setAudioEncoder(audio_encoder ae) = 0;
+ virtual status_t setVideoEncoder(video_encoder ve) = 0;
+ virtual status_t setVideoSize(int width, int height) = 0;
+ virtual status_t setVideoFrameRate(int frames_per_second) = 0;
+ virtual status_t setCamera(const sp<hardware::ICamera>& camera,
+ const sp<ICameraRecordingProxy>& proxy) = 0;
+ virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
+ virtual status_t setOutputFile(int fd) = 0;
+ virtual status_t setNextOutputFile(int /*fd*/) {return INVALID_OPERATION;}
+ virtual status_t setOutputFileAuxiliary(int /*fd*/) {return INVALID_OPERATION;}
+ virtual status_t setParameters(const String8& params) = 0;
+ virtual status_t setListener(const sp<IMediaRecorderClient>& listener) = 0;
+ virtual status_t setClientName(const String16& clientName) = 0;
+ virtual status_t prepare() = 0;
+ virtual status_t start() = 0;
+ virtual status_t stop() = 0;
+ virtual status_t pause() = 0;
+ virtual status_t resume() = 0;
+ virtual status_t close() = 0;
+ virtual status_t reset() = 0;
+ virtual status_t getMaxAmplitude(int *max) = 0;
+ virtual status_t getMetrics(Parcel *reply) = 0;
+ virtual status_t dump(int fd, const Vector<String16>& args) const = 0;
+ virtual status_t setInputSurface(const sp<PersistentSurface>& surface) = 0;
+ virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0;
+
+
+protected:
+ String16 mOpPackageName;
+
+private:
+ MediaRecorderBase(const MediaRecorderBase &);
+ MediaRecorderBase &operator=(const MediaRecorderBase &);
+};
+
+} // namespace android
+
+#endif // MEDIA_RECORDER_BASE_H_
diff --git a/include/media/MediaResource.h b/media/libmedia/include/media/MediaResource.h
similarity index 100%
rename from include/media/MediaResource.h
rename to media/libmedia/include/media/MediaResource.h
diff --git a/include/media/MediaResourcePolicy.h b/media/libmedia/include/media/MediaResourcePolicy.h
similarity index 100%
rename from include/media/MediaResourcePolicy.h
rename to media/libmedia/include/media/MediaResourcePolicy.h
diff --git a/include/media/MemoryLeakTrackUtil.h b/media/libmedia/include/media/MemoryLeakTrackUtil.h
similarity index 100%
rename from include/media/MemoryLeakTrackUtil.h
rename to media/libmedia/include/media/MemoryLeakTrackUtil.h
diff --git a/include/media/Metadata.h b/media/libmedia/include/media/Metadata.h
similarity index 100%
rename from include/media/Metadata.h
rename to media/libmedia/include/media/Metadata.h
diff --git a/media/libmedia/include/media/MidiDeviceInfo.h b/media/libmedia/include/media/MidiDeviceInfo.h
new file mode 100644
index 0000000..5b4a241
--- /dev/null
+++ b/media/libmedia/include/media/MidiDeviceInfo.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_MIDI_DEVICE_INFO_H
+#define ANDROID_MEDIA_MIDI_DEVICE_INFO_H
+
+#include <binder/Parcelable.h>
+#include <binder/PersistableBundle.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+namespace android {
+namespace media {
+namespace midi {
+
+class MidiDeviceInfo : public Parcelable {
+public:
+ MidiDeviceInfo() = default;
+ virtual ~MidiDeviceInfo() = default;
+ MidiDeviceInfo(const MidiDeviceInfo& midiDeviceInfo) = default;
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+ int getType() const { return mType; }
+ int getUid() const { return mId; }
+ bool isPrivate() const { return mIsPrivate; }
+ const Vector<String16>& getInputPortNames() const { return mInputPortNames; }
+ const Vector<String16>& getOutputPortNames() const { return mOutputPortNames; }
+ String16 getProperty(const char* propertyName);
+
+ // The constants need to be kept in sync with MidiDeviceInfo.java
+ enum {
+ TYPE_USB = 1,
+ TYPE_VIRTUAL = 2,
+ TYPE_BLUETOOTH = 3,
+ };
+ static const char* const PROPERTY_NAME;
+ static const char* const PROPERTY_MANUFACTURER;
+ static const char* const PROPERTY_PRODUCT;
+ static const char* const PROPERTY_VERSION;
+ static const char* const PROPERTY_SERIAL_NUMBER;
+ static const char* const PROPERTY_ALSA_CARD;
+ static const char* const PROPERTY_ALSA_DEVICE;
+
+ friend bool operator==(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs);
+ friend bool operator!=(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs) {
+ return !(lhs == rhs);
+ }
+
+private:
+ status_t readStringVector(
+ const Parcel* parcel, Vector<String16> *vectorPtr, size_t defaultLength);
+ status_t writeStringVector(Parcel* parcel, const Vector<String16>& vector) const;
+
+ int32_t mType;
+ int32_t mId;
+ Vector<String16> mInputPortNames;
+ Vector<String16> mOutputPortNames;
+ os::PersistableBundle mProperties;
+ bool mIsPrivate;
+};
+
+} // namespace midi
+} // namespace media
+} // namespace android
+
+#endif // ANDROID_MEDIA_MIDI_DEVICE_INFO_H
diff --git a/include/media/MidiIoWrapper.h b/media/libmedia/include/media/MidiIoWrapper.h
similarity index 100%
rename from include/media/MidiIoWrapper.h
rename to media/libmedia/include/media/MidiIoWrapper.h
diff --git a/include/media/Modulo.h b/media/libmedia/include/media/Modulo.h
similarity index 100%
rename from include/media/Modulo.h
rename to media/libmedia/include/media/Modulo.h
diff --git a/media/libmedia/include/media/OMXBuffer.h b/media/libmedia/include/media/OMXBuffer.h
new file mode 100644
index 0000000..6f79182
--- /dev/null
+++ b/media/libmedia/include/media/OMXBuffer.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _OMXBUFFER_H_
+#define _OMXBUFFER_H_
+
+#include <cutils/native_handle.h>
+#include <media/IOMX.h>
+#include <system/window.h>
+#include <utils/StrongPointer.h>
+#include <hidl/HidlSupport.h>
+
+namespace android {
+
+class OMXBuffer;
+
+// This is needed temporarily for the OMX HIDL transition.
+namespace hardware { namespace media { namespace omx {
+namespace V1_0 {
+ struct CodecBuffer;
+namespace implementation {
+ inline bool wrapAs(::android::hardware::media::omx::V1_0::CodecBuffer* t,
+ ::android::OMXBuffer const& l);
+ inline bool convertTo(::android::OMXBuffer* l,
+ ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
+}
+namespace utils {
+ inline bool wrapAs(::android::hardware::media::omx::V1_0::CodecBuffer* t,
+ ::android::OMXBuffer const& l);
+ inline bool convertTo(::android::OMXBuffer* l,
+ ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
+}
+}}}}
+
+class GraphicBuffer;
+class IMemory;
+class MediaCodecBuffer;
+class NativeHandle;
+struct OMXNodeInstance;
+using hardware::hidl_memory;
+
+// TODO: After complete HIDL transition, this class would be replaced by
+// CodecBuffer.
+class OMXBuffer {
+public:
+ // sPreset is used in places where we are referring to a pre-registered
+ // buffer on a port. It has type kBufferTypePreset and mRangeLength of 0.
+ static OMXBuffer sPreset;
+
+ // Default constructor, constructs a buffer of type kBufferTypeInvalid.
+ OMXBuffer();
+
+ // Constructs a buffer of type kBufferTypePreset with mRangeOffset set to
+ // |codecBuffer|'s offset and mRangeLength set to |codecBuffer|'s size (or 0
+ // if |codecBuffer| is NULL).
+ OMXBuffer(const sp<MediaCodecBuffer> &codecBuffer);
+
+ // Constructs a buffer of type kBufferTypePreset with specified mRangeOffset
+ // and mRangeLength.
+ OMXBuffer(OMX_U32 rangeOffset, OMX_U32 rangeLength);
+
+ // Constructs a buffer of type kBufferTypeSharedMem.
+ OMXBuffer(const sp<IMemory> &mem);
+
+ // Constructs a buffer of type kBufferTypeANWBuffer.
+ OMXBuffer(const sp<GraphicBuffer> &gbuf);
+
+ // Constructs a buffer of type kBufferTypeNativeHandle.
+ OMXBuffer(const sp<NativeHandle> &handle);
+
+ // Constructs a buffer of type kBufferTypeHidlMemory.
+ OMXBuffer(const hidl_memory &hidlMemory);
+
+ // Parcelling/Un-parcelling.
+ status_t writeToParcel(Parcel *parcel) const;
+ status_t readFromParcel(const Parcel *parcel);
+
+ ~OMXBuffer();
+
+private:
+ friend struct OMXNodeInstance;
+
+ // This is needed temporarily for OMX HIDL transition.
+ friend inline bool (::android::hardware::media::omx::V1_0::implementation::
+ wrapAs)(::android::hardware::media::omx::V1_0::CodecBuffer* t,
+ OMXBuffer const& l);
+ friend inline bool (::android::hardware::media::omx::V1_0::implementation::
+ convertTo)(OMXBuffer* l,
+ ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
+ friend inline bool (::android::hardware::media::omx::V1_0::utils::
+ wrapAs)(::android::hardware::media::omx::V1_0::CodecBuffer* t,
+ OMXBuffer const& l);
+ friend inline bool (::android::hardware::media::omx::V1_0::utils::
+ convertTo)(OMXBuffer* l,
+ ::android::hardware::media::omx::V1_0::CodecBuffer const& t);
+
+ enum BufferType {
+ kBufferTypeInvalid = 0,
+ kBufferTypePreset,
+ kBufferTypeSharedMem,
+ kBufferTypeANWBuffer, // Use only for non-Treble
+ kBufferTypeNativeHandle,
+ kBufferTypeHidlMemory // Mapped to CodecBuffer::Type::SHARED_MEM.
+ };
+
+ BufferType mBufferType;
+
+ // kBufferTypePreset
+ // If the port is operating in byte buffer mode, mRangeLength is the valid
+ // range length. Otherwise the range info should also be ignored.
+ OMX_U32 mRangeOffset;
+ OMX_U32 mRangeLength;
+
+ // kBufferTypeSharedMem
+ sp<IMemory> mMem;
+
+ // kBufferTypeANWBuffer
+ sp<GraphicBuffer> mGraphicBuffer;
+
+ // kBufferTypeNativeHandle
+ sp<NativeHandle> mNativeHandle;
+
+ // kBufferTypeHidlMemory
+ hidl_memory mHidlMemory;
+
+ // Move assignment
+ OMXBuffer &operator=(OMXBuffer&&);
+
+ // Deleted copy constructor and assignment.
+ OMXBuffer(const OMXBuffer&) = delete;
+ OMXBuffer& operator=(const OMXBuffer&) = delete;
+};
+
+} // namespace android
+
+#endif // _OMXBUFFER_H_
diff --git a/media/libmedia/include/media/OMXFenceParcelable.h b/media/libmedia/include/media/OMXFenceParcelable.h
new file mode 100644
index 0000000..2a8da87
--- /dev/null
+++ b/media/libmedia/include/media/OMXFenceParcelable.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _OMX_FENCE_PARCELABLE_
+#define _OMX_FENCE_PARCELABLE_
+
+#include <binder/Parcel.h>
+
+namespace android {
+
+struct OMXFenceParcelable;
+
+// This is needed temporarily for the OMX HIDL transition.
+namespace hardware {
+ struct hidl_handle;
+namespace media { namespace omx { namespace V1_0 {
+namespace implementation {
+ void wrapAs(::android::OMXFenceParcelable* l,
+ ::android::hardware::hidl_handle const& t);
+ bool convertTo(::android::OMXFenceParcelable* l,
+ ::android::hardware::hidl_handle const& t);
+}
+namespace utils {
+ void wrapAs(::android::OMXFenceParcelable* l,
+ ::android::hardware::hidl_handle const& t);
+ bool convertTo(::android::OMXFenceParcelable* l,
+ ::android::hardware::hidl_handle const& t);
+}
+}}}}
+
+struct OMXFenceParcelable : public Parcelable {
+ OMXFenceParcelable() : mFenceFd(-1) {}
+ OMXFenceParcelable(int fenceFd) : mFenceFd(fenceFd) {}
+
+ int get() const { return mFenceFd; }
+
+ status_t readFromParcel(const Parcel* parcel) override;
+ status_t writeToParcel(Parcel* parcel) const override;
+
+private:
+ // Disable copy ctor and operator=
+ OMXFenceParcelable(const OMXFenceParcelable &);
+ OMXFenceParcelable &operator=(const OMXFenceParcelable &);
+
+ int mFenceFd;
+
+ // This is needed temporarily for OMX HIDL transition.
+ friend void (::android::hardware::media::omx::V1_0::implementation::
+ wrapAs)(OMXFenceParcelable* l,
+ ::android::hardware::hidl_handle const& t);
+ friend bool (::android::hardware::media::omx::V1_0::implementation::
+ convertTo)(OMXFenceParcelable* l,
+ ::android::hardware::hidl_handle const& t);
+ friend void (::android::hardware::media::omx::V1_0::utils::
+ wrapAs)(OMXFenceParcelable* l,
+ ::android::hardware::hidl_handle const& t);
+ friend bool (::android::hardware::media::omx::V1_0::utils::
+ convertTo)(OMXFenceParcelable* l,
+ ::android::hardware::hidl_handle const& t);
+};
+
+inline status_t OMXFenceParcelable::readFromParcel(const Parcel* parcel) {
+ int32_t haveFence;
+ status_t err = parcel->readInt32(&haveFence);
+ if (err == OK && haveFence) {
+ int fd = ::dup(parcel->readFileDescriptor());
+ if (fd < 0) {
+ return fd;
+ }
+ mFenceFd = fd;
+ }
+ return err;
+}
+
+inline status_t OMXFenceParcelable::writeToParcel(Parcel* parcel) const {
+ status_t err = parcel->writeInt32(mFenceFd >= 0);
+ if (err == OK && mFenceFd >= 0) {
+ err = parcel->writeFileDescriptor(mFenceFd, true /* takeOwnership */);
+ }
+ return err;
+}
+
+} // namespace android
+
+#endif // _OMX_FENCE_PARCELABLE_
diff --git a/media/libmedia/include/media/PluginLoader.h b/media/libmedia/include/media/PluginLoader.h
new file mode 100644
index 0000000..a626e16
--- /dev/null
+++ b/media/libmedia/include/media/PluginLoader.h
@@ -0,0 +1,99 @@
+/**
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PLUGIN_LOADER_H_
+#define PLUGIN_LOADER_H_
+
+#include "SharedLibrary.h"
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+template <class T>
+class PluginLoader {
+
+ public:
+ PluginLoader(const char *dir, const char *entry) {
+ /**
+ * scan all plugins in the plugin directory and add them to the
+ * factories list.
+ */
+ String8 pluginDir(dir);
+
+ DIR* pDir = opendir(pluginDir.string());
+ if (pDir == NULL) {
+ ALOGE("Failed to find plugin directory %s", pluginDir.string());
+ } else {
+ struct dirent* pEntry;
+ while ((pEntry = readdir(pDir))) {
+ String8 file(pEntry->d_name);
+ if (file.getPathExtension() == ".so") {
+ String8 path = pluginDir + "/" + pEntry->d_name;
+ T *plugin = loadOne(path, entry);
+ if (plugin) {
+ factories.push(plugin);
+ }
+ }
+ }
+ closedir(pDir);
+ }
+ }
+
+ ~PluginLoader() {
+ for (size_t i = 0; i < factories.size(); i++) {
+ delete factories[i];
+ }
+ }
+
+ T *getFactory(size_t i) const {
+ return factories[i];
+ }
+
+ size_t factoryCount() const {return factories.size();}
+
+ private:
+ T* loadOne(const char *path, const char *entry) {
+ sp<SharedLibrary> library = new SharedLibrary(String8(path));
+ if (!library.get()) {
+ ALOGE("Failed to open plugin library %s: %s", path,
+ library->lastError());
+ } else {
+ typedef T *(*CreateFactoryFunc)();
+ CreateFactoryFunc createFactoryFunc =
+ (CreateFactoryFunc)library->lookup(entry);
+ if (createFactoryFunc) {
+ ALOGV("Found plugin factory entry %s in %s", entry, path);
+ libraries.push(library);
+ T* result = createFactoryFunc();
+ return result;
+ }
+ }
+ return NULL;
+ }
+
+ Vector<T *> factories;
+ Vector<sp<SharedLibrary> > libraries;
+
+ PluginLoader(const PluginLoader &) = delete;
+ void operator=(const PluginLoader &) = delete;
+};
+
+} // namespace android
+
+#endif // PLUGIN_LOADER_H_
+
diff --git a/media/libmedia/include/media/RecordBufferConverter.h b/media/libmedia/include/media/RecordBufferConverter.h
new file mode 100644
index 0000000..2abc45e
--- /dev/null
+++ b/media/libmedia/include/media/RecordBufferConverter.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_RECORD_BUFFER_CONVERTER_H
+#define ANDROID_RECORD_BUFFER_CONVERTER_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/AudioBufferProvider.h>
+#include <system/audio.h>
+
+class AudioResampler;
+class PassthruBufferProvider;
+
+namespace android {
+
+/* The RecordBufferConverter is used for format, channel, and sample rate
+ * conversion for a RecordTrack.
+ *
+ * RecordBufferConverter uses the convert() method rather than exposing a
+ * buffer provider interface; this is to save a memory copy.
+ *
+ * There are legacy conversion requirements for this converter, specifically
+ * due to mono handling, so be careful about modifying.
+ *
+ * Original source audioflinger/Threads.{h,cpp}
+ */
+class RecordBufferConverter
+{
+public:
+ RecordBufferConverter(
+ audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+ uint32_t srcSampleRate,
+ audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+ uint32_t dstSampleRate);
+
+ ~RecordBufferConverter();
+
+ /* Converts input data from an AudioBufferProvider by format, channelMask,
+ * and sampleRate to a destination buffer.
+ *
+ * Parameters
+ * dst: buffer to place the converted data.
+ * provider: buffer provider to obtain source data.
+ * frames: number of frames to convert
+ *
+ * Returns the number of frames converted.
+ */
+ size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
+
+ // returns NO_ERROR if constructor was successful
+ status_t initCheck() const {
+ // mSrcChannelMask set on successful updateParameters
+ return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
+ }
+
+ // allows dynamic reconfigure of all parameters
+ status_t updateParameters(
+ audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+ uint32_t srcSampleRate,
+ audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+ uint32_t dstSampleRate);
+
+ // called to reset resampler buffers on record track discontinuity
+ void reset();
+
+private:
+ // format conversion when not using resampler
+ void convertNoResampler(void *dst, const void *src, size_t frames);
+
+ // format conversion when using resampler; modifies src in-place
+ void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
+
+ // user provided information
+ audio_channel_mask_t mSrcChannelMask;
+ audio_format_t mSrcFormat;
+ uint32_t mSrcSampleRate;
+ audio_channel_mask_t mDstChannelMask;
+ audio_format_t mDstFormat;
+ uint32_t mDstSampleRate;
+
+ // derived information
+ uint32_t mSrcChannelCount;
+ uint32_t mDstChannelCount;
+ size_t mDstFrameSize;
+
+ // format conversion buffer
+ void *mBuf;
+ size_t mBufFrames;
+ size_t mBufFrameSize;
+
+ // resampler info
+ AudioResampler *mResampler;
+
+ bool mIsLegacyDownmix; // legacy stereo to mono conversion needed
+ bool mIsLegacyUpmix; // legacy mono to stereo conversion needed
+ bool mRequiresFloat; // data processing requires float (e.g. resampler)
+ PassthruBufferProvider *mInputConverterProvider; // converts input to float
+ int8_t mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_RECORD_BUFFER_CONVERTER_H
diff --git a/include/media/RingBuffer.h b/media/libmedia/include/media/RingBuffer.h
similarity index 100%
rename from include/media/RingBuffer.h
rename to media/libmedia/include/media/RingBuffer.h
diff --git a/include/media/SharedLibrary.h b/media/libmedia/include/media/SharedLibrary.h
similarity index 100%
rename from include/media/SharedLibrary.h
rename to media/libmedia/include/media/SharedLibrary.h
diff --git a/include/media/SingleStateQueue.h b/media/libmedia/include/media/SingleStateQueue.h
similarity index 100%
rename from include/media/SingleStateQueue.h
rename to media/libmedia/include/media/SingleStateQueue.h
diff --git a/include/media/StringArray.h b/media/libmedia/include/media/StringArray.h
similarity index 100%
rename from include/media/StringArray.h
rename to media/libmedia/include/media/StringArray.h
diff --git a/media/libmedia/include/media/TypeConverter.h b/media/libmedia/include/media/TypeConverter.h
new file mode 100644
index 0000000..84e22b1
--- /dev/null
+++ b/media/libmedia/include/media/TypeConverter.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_TYPE_CONVERTER_H_
+#define ANDROID_TYPE_CONVERTER_H_
+
+#include <string>
+#include <string.h>
+
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <utils/Vector.h>
+#include <utils/SortedVector.h>
+
+#include <media/AudioParameter.h>
+#include "convert.h"
+
+namespace android {
+
+struct SampleRateTraits
+{
+ typedef uint32_t Type;
+ typedef SortedVector<Type> Collection;
+};
+struct DeviceTraits
+{
+ typedef audio_devices_t Type;
+ typedef Vector<Type> Collection;
+};
+struct OutputDeviceTraits : public DeviceTraits {};
+struct InputDeviceTraits : public DeviceTraits {};
+struct OutputFlagTraits
+{
+ typedef audio_output_flags_t Type;
+ typedef Vector<Type> Collection;
+};
+struct InputFlagTraits
+{
+ typedef audio_input_flags_t Type;
+ typedef Vector<Type> Collection;
+};
+struct FormatTraits
+{
+ typedef audio_format_t Type;
+ typedef Vector<Type> Collection;
+};
+struct ChannelTraits
+{
+ typedef audio_channel_mask_t Type;
+ typedef SortedVector<Type> Collection;
+};
+struct OutputChannelTraits : public ChannelTraits {};
+struct InputChannelTraits : public ChannelTraits {};
+struct ChannelIndexTraits : public ChannelTraits {};
+struct GainModeTraits
+{
+ typedef audio_gain_mode_t Type;
+ typedef Vector<Type> Collection;
+};
+struct StreamTraits
+{
+ typedef audio_stream_type_t Type;
+ typedef Vector<Type> Collection;
+};
+struct AudioModeTraits
+{
+ typedef audio_mode_t Type;
+ typedef Vector<Type> Collection;
+};
+struct UsageTraits
+{
+ typedef audio_usage_t Type;
+ typedef Vector<Type> Collection;
+};
+struct SourceTraits
+{
+ typedef audio_source_t Type;
+ typedef Vector<Type> Collection;
+};
+template <typename T>
+struct DefaultTraits
+{
+ typedef T Type;
+ typedef Vector<Type> Collection;
+};
+
+template <class Traits>
+static void collectionFromString(const std::string &str, typename Traits::Collection &collection,
+ const char *del = AudioParameter::valueListSeparator)
+{
+ char *literal = strdup(str.c_str());
+ for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+ typename Traits::Type value;
+ if (utilities::convertTo<std::string, typename Traits::Type >(cstr, value)) {
+ collection.add(value);
+ }
+ }
+ free(literal);
+}
+
+template <class Traits>
+class TypeConverter
+{
+public:
+ static bool toString(const typename Traits::Type &value, std::string &str);
+
+ static bool fromString(const std::string &str, typename Traits::Type &result);
+
+ static void collectionFromString(const std::string &str,
+ typename Traits::Collection &collection,
+ const char *del = AudioParameter::valueListSeparator);
+
+ static uint32_t maskFromString(
+ const std::string &str, const char *del = AudioParameter::valueListSeparator);
+
+ static void maskToString(
+ uint32_t mask, std::string &str, const char *del = AudioParameter::valueListSeparator);
+
+protected:
+ struct Table {
+ const char *literal;
+ typename Traits::Type value;
+ };
+
+ static const Table mTable[];
+};
+
+template <class Traits>
+inline bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
+{
+ for (size_t i = 0; mTable[i].literal; i++) {
+ if (mTable[i].value == value) {
+ str = mTable[i].literal;
+ return true;
+ }
+ }
+ char result[64];
+ snprintf(result, sizeof(result), "Unknown enum value %d", value);
+ str = result;
+ return false;
+}
+
+template <class Traits>
+inline bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
+{
+ for (size_t i = 0; mTable[i].literal; i++) {
+ if (strcmp(mTable[i].literal, str.c_str()) == 0) {
+ ALOGV("stringToEnum() found %s", mTable[i].literal);
+ result = mTable[i].value;
+ return true;
+ }
+ }
+ return false;
+}
+
+template <class Traits>
+inline void TypeConverter<Traits>::collectionFromString(const std::string &str,
+ typename Traits::Collection &collection,
+ const char *del)
+{
+ char *literal = strdup(str.c_str());
+
+ for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+ typename Traits::Type value;
+ if (fromString(cstr, value)) {
+ collection.add(value);
+ }
+ }
+ free(literal);
+}
+
+template <class Traits>
+inline uint32_t TypeConverter<Traits>::maskFromString(const std::string &str, const char *del)
+{
+ char *literal = strdup(str.c_str());
+ uint32_t value = 0;
+ for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
+ typename Traits::Type type;
+ if (fromString(cstr, type)) {
+ value |= static_cast<uint32_t>(type);
+ }
+ }
+ free(literal);
+ return value;
+}
+
+template <class Traits>
+inline void TypeConverter<Traits>::maskToString(uint32_t mask, std::string &str, const char *del)
+{
+ if (mask != 0) {
+ bool first_flag = true;
+ for (size_t i = 0; mTable[i].literal; i++) {
+ uint32_t value = static_cast<uint32_t>(mTable[i].value);
+ if (mTable[i].value != 0 && ((mask & value) == value)) {
+ if (!first_flag) str += del;
+ first_flag = false;
+ str += mTable[i].literal;
+ }
+ }
+ } else {
+ toString(static_cast<typename Traits::Type>(0), str);
+ }
+}
+
+typedef TypeConverter<OutputDeviceTraits> OutputDeviceConverter;
+typedef TypeConverter<InputDeviceTraits> InputDeviceConverter;
+typedef TypeConverter<OutputFlagTraits> OutputFlagConverter;
+typedef TypeConverter<InputFlagTraits> InputFlagConverter;
+typedef TypeConverter<FormatTraits> FormatConverter;
+typedef TypeConverter<OutputChannelTraits> OutputChannelConverter;
+typedef TypeConverter<InputChannelTraits> InputChannelConverter;
+typedef TypeConverter<ChannelIndexTraits> ChannelIndexConverter;
+typedef TypeConverter<GainModeTraits> GainModeConverter;
+typedef TypeConverter<StreamTraits> StreamTypeConverter;
+typedef TypeConverter<AudioModeTraits> AudioModeConverter;
+typedef TypeConverter<UsageTraits> UsageTypeConverter;
+typedef TypeConverter<SourceTraits> SourceTypeConverter;
+
+template<> const OutputDeviceConverter::Table OutputDeviceConverter::mTable[];
+template<> const InputDeviceConverter::Table InputDeviceConverter::mTable[];
+template<> const OutputFlagConverter::Table OutputFlagConverter::mTable[];
+template<> const InputFlagConverter::Table InputFlagConverter::mTable[];
+template<> const FormatConverter::Table FormatConverter::mTable[];
+template<> const OutputChannelConverter::Table OutputChannelConverter::mTable[];
+template<> const InputChannelConverter::Table InputChannelConverter::mTable[];
+template<> const ChannelIndexConverter::Table ChannelIndexConverter::mTable[];
+template<> const GainModeConverter::Table GainModeConverter::mTable[];
+template<> const StreamTypeConverter::Table StreamTypeConverter::mTable[];
+template<> const AudioModeConverter::Table AudioModeConverter::mTable[];
+template<> const UsageTypeConverter::Table UsageTypeConverter::mTable[];
+template<> const SourceTypeConverter::Table SourceTypeConverter::mTable[];
+
+bool deviceFromString(const std::string& literalDevice, audio_devices_t& device);
+
+bool deviceToString(audio_devices_t device, std::string& literalDevice);
+
+SampleRateTraits::Collection samplingRatesFromString(
+ const std::string &samplingRates, const char *del = AudioParameter::valueListSeparator);
+
+FormatTraits::Collection formatsFromString(
+ const std::string &formats, const char *del = AudioParameter::valueListSeparator);
+
+audio_format_t formatFromString(
+ const std::string &literalFormat, audio_format_t defaultFormat = AUDIO_FORMAT_DEFAULT);
+
+audio_channel_mask_t channelMaskFromString(const std::string &literalChannels);
+
+ChannelTraits::Collection channelMasksFromString(
+ const std::string &channels, const char *del = AudioParameter::valueListSeparator);
+
+InputChannelTraits::Collection inputChannelMasksFromString(
+ const std::string &inChannels, const char *del = AudioParameter::valueListSeparator);
+
+OutputChannelTraits::Collection outputChannelMasksFromString(
+ const std::string &outChannels, const char *del = AudioParameter::valueListSeparator);
+
+}; // namespace android
+
+#endif /*ANDROID_TYPE_CONVERTER_H_*/
diff --git a/media/libmedia/include/media/Visualizer.h b/media/libmedia/include/media/Visualizer.h
new file mode 100644
index 0000000..f8f4f50
--- /dev/null
+++ b/media/libmedia/include/media/Visualizer.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_VISUALIZER_H
+#define ANDROID_MEDIA_VISUALIZER_H
+
+#include <media/AudioEffect.h>
+#include <system/audio_effects/effect_visualizer.h>
+#include <utils/Thread.h>
+
+/**
+ * The Visualizer class enables application to retrieve part of the currently playing audio for
+ * visualization purpose. It is not an audio recording interface and only returns partial and low
+ * quality audio content. However, to protect privacy of certain audio data (e.g voice mail) the use
+ * of the visualizer requires the permission android.permission.RECORD_AUDIO.
+ * The audio session ID passed to the constructor indicates which audio content should be
+ * visualized:
+ * - If the session is 0, the audio output mix is visualized
+ * - If the session is not 0, the audio from a particular MediaPlayer or AudioTrack
+ * using this audio session is visualized
+ * Two types of representation of audio content can be captured:
+ * - Waveform data: consecutive 8-bit (unsigned) mono samples by using the getWaveForm() method
+ * - Frequency data: 8-bit magnitude FFT by using the getFft() method
+ *
+ * The length of the capture can be retrieved or specified by calling respectively
+ * getCaptureSize() and setCaptureSize() methods. Note that the size of the FFT
+ * is half of the specified capture size but both sides of the spectrum are returned yielding in a
+ * number of bytes equal to the capture size. The capture size must be a power of 2 in the range
+ * returned by getMinCaptureSize() and getMaxCaptureSize().
+ * In addition to the polling capture mode, a callback mode is also available by installing a
+ * callback function by use of the setCaptureCallBack() method. The rate at which the callback
+ * is called as well as the type of data returned is specified.
+ * Before capturing data, the Visualizer must be enabled by calling the setEnabled() method.
+ * When data capture is not needed any more, the Visualizer should be disabled.
+ */
+
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class Visualizer: public AudioEffect {
+public:
+
+ enum callback_flags {
+ CAPTURE_WAVEFORM = 0x00000001, // capture callback returns a PCM wave form
+ CAPTURE_FFT = 0x00000002, // apture callback returns a frequency representation
+ CAPTURE_CALL_JAVA = 0x00000004 // the callback thread can call java
+ };
+
+
+ /* Constructor.
+ * See AudioEffect constructor for details on parameters.
+ */
+ Visualizer(const String16& opPackageName,
+ int32_t priority = 0,
+ effect_callback_t cbf = NULL,
+ void* user = NULL,
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX);
+
+ ~Visualizer();
+
+ virtual status_t setEnabled(bool enabled);
+
+ // maximum capture size in samples
+ static uint32_t getMaxCaptureSize() { return VISUALIZER_CAPTURE_SIZE_MAX; }
+ // minimum capture size in samples
+ static uint32_t getMinCaptureSize() { return VISUALIZER_CAPTURE_SIZE_MIN; }
+ // maximum capture rate in millihertz
+ static uint32_t getMaxCaptureRate() { return CAPTURE_RATE_MAX; }
+
+ // callback used to return periodic PCM or FFT captures to the application. Either one or both
+ // types of data are returned (PCM and FFT) according to flags indicated when installing the
+ // callback. When a type of data is not present, the corresponding size (waveformSize or
+ // fftSize) is 0.
+ typedef void (*capture_cbk_t)(void* user,
+ uint32_t waveformSize,
+ uint8_t *waveform,
+ uint32_t fftSize,
+ uint8_t *fft,
+ uint32_t samplingrate);
+
+ // install a callback to receive periodic captures. The capture rate is specified in milliHertz
+ // and the capture format is according to flags (see callback_flags).
+ status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate);
+
+ // set the capture size capture size must be a power of two in the range
+ // [VISUALIZER_CAPTURE_SIZE_MAX. VISUALIZER_CAPTURE_SIZE_MIN]
+ // must be called when the visualizer is not enabled
+ status_t setCaptureSize(uint32_t size);
+ uint32_t getCaptureSize() { return mCaptureSize; }
+
+ // returns the capture rate indicated when installing the callback
+ uint32_t getCaptureRate() { return mCaptureRate; }
+
+ // returns the sampling rate of the audio being captured
+ uint32_t getSamplingRate() { return mSampleRate; }
+
+ // set the way volume affects the captured data
+ // mode must one of VISUALIZER_SCALING_MODE_NORMALIZED,
+ // VISUALIZER_SCALING_MODE_AS_PLAYED
+ status_t setScalingMode(uint32_t mode);
+ uint32_t getScalingMode() { return mScalingMode; }
+
+ // set which measurements are done on the audio buffers processed by the effect.
+ // valid measurements (mask): MEASUREMENT_MODE_PEAK_RMS
+ status_t setMeasurementMode(uint32_t mode);
+ uint32_t getMeasurementMode() { return mMeasurementMode; }
+
+ // return a set of int32_t measurements
+ status_t getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements);
+
+ // return a capture in PCM 8 bit unsigned format. The size of the capture is equal to
+ // getCaptureSize()
+ status_t getWaveForm(uint8_t *waveform);
+
+ // return a capture in FFT 8 bit signed format. The size of the capture is equal to
+ // getCaptureSize() but the length of the FFT is half of the size (both parts of the spectrum
+ // are returned
+ status_t getFft(uint8_t *fft);
+
+protected:
+ // from IEffectClient
+ virtual void controlStatusChanged(bool controlGranted);
+
+private:
+
+ static const uint32_t CAPTURE_RATE_MAX = 20000;
+ static const uint32_t CAPTURE_RATE_DEF = 10000;
+ static const uint32_t CAPTURE_SIZE_DEF = VISUALIZER_CAPTURE_SIZE_MAX;
+
+ /* internal class to handle the callback */
+ class CaptureThread : public Thread
+ {
+ public:
+ CaptureThread(Visualizer& receiver, uint32_t captureRate, bool bCanCallJava = false);
+
+ private:
+ friend class Visualizer;
+ virtual bool threadLoop();
+ Visualizer& mReceiver;
+ Mutex mLock;
+ uint32_t mSleepTimeUs;
+ };
+
+ status_t doFft(uint8_t *fft, uint8_t *waveform);
+ void periodicCapture();
+ uint32_t initCaptureSize();
+
+ Mutex mCaptureLock;
+ uint32_t mCaptureRate;
+ uint32_t mCaptureSize;
+ uint32_t mSampleRate;
+ uint32_t mScalingMode;
+ uint32_t mMeasurementMode;
+ capture_cbk_t mCaptureCallBack;
+ void *mCaptureCbkUser;
+ sp<CaptureThread> mCaptureThread;
+ uint32_t mCaptureFlags;
+};
+
+
+}; // namespace android
+
+#endif // ANDROID_MEDIA_VISUALIZER_H
diff --git a/services/audiopolicy/utilities/convert/convert.h b/media/libmedia/include/media/convert.h
similarity index 100%
rename from services/audiopolicy/utilities/convert/convert.h
rename to media/libmedia/include/media/convert.h
diff --git a/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
similarity index 100%
rename from include/media/mediametadataretriever.h
rename to media/libmedia/include/media/mediametadataretriever.h
diff --git a/media/libmedia/include/media/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
new file mode 100644
index 0000000..623c374
--- /dev/null
+++ b/media/libmedia/include/media/mediaplayer.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIAPLAYER_H
+#define ANDROID_MEDIAPLAYER_H
+
+#include <arpa/inet.h>
+
+#include <binder/IMemory.h>
+
+#include <media/AudioResamplerPublic.h>
+#include <media/BufferingSettings.h>
+#include <media/IMediaPlayerClient.h>
+#include <media/IMediaPlayer.h>
+#include <media/IMediaDeathNotifier.h>
+#include <media/IStreamSource.h>
+
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+
+struct ANativeWindow;
+
+namespace android {
+
+struct AVSyncSettings;
+class IGraphicBufferProducer;
+class Surface;
+
+enum media_event_type {
+ MEDIA_NOP = 0, // interface test message
+ MEDIA_PREPARED = 1,
+ MEDIA_PLAYBACK_COMPLETE = 2,
+ MEDIA_BUFFERING_UPDATE = 3,
+ MEDIA_SEEK_COMPLETE = 4,
+ MEDIA_SET_VIDEO_SIZE = 5,
+ MEDIA_STARTED = 6,
+ MEDIA_PAUSED = 7,
+ MEDIA_STOPPED = 8,
+ MEDIA_SKIPPED = 9,
+ MEDIA_TIMED_TEXT = 99,
+ MEDIA_ERROR = 100,
+ MEDIA_INFO = 200,
+ MEDIA_SUBTITLE_DATA = 201,
+ MEDIA_META_DATA = 202,
+ MEDIA_DRM_INFO = 210,
+};
+
+// Generic error codes for the media player framework. Errors are fatal, the
+// playback must abort.
+//
+// Errors are communicated back to the client using the
+// MediaPlayerListener::notify method defined below.
+// In this situation, 'notify' is invoked with the following:
+// 'msg' is set to MEDIA_ERROR.
+// 'ext1' should be a value from the enum media_error_type.
+// 'ext2' contains an implementation dependant error code to provide
+// more details. Should default to 0 when not used.
+//
+// The codes are distributed as follow:
+// 0xx: Reserved
+// 1xx: Android Player errors. Something went wrong inside the MediaPlayer.
+// 2xx: Media errors (e.g Codec not supported). There is a problem with the
+// media itself.
+// 3xx: Runtime errors. Some extraordinary condition arose making the playback
+// impossible.
+//
+enum media_error_type {
+ // 0xx
+ MEDIA_ERROR_UNKNOWN = 1,
+ // 1xx
+ MEDIA_ERROR_SERVER_DIED = 100,
+ // 2xx
+ MEDIA_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK = 200,
+ // 3xx
+};
+
+
+// Info and warning codes for the media player framework. These are non fatal,
+// the playback is going on but there might be some user visible issues.
+//
+// Info and warning messages are communicated back to the client using the
+// MediaPlayerListener::notify method defined below. In this situation,
+// 'notify' is invoked with the following:
+// 'msg' is set to MEDIA_INFO.
+// 'ext1' should be a value from the enum media_info_type.
+// 'ext2' contains an implementation dependant info code to provide
+// more details. Should default to 0 when not used.
+//
+// The codes are distributed as follow:
+// 0xx: Reserved
+// 7xx: Android Player info/warning (e.g player lagging behind.)
+// 8xx: Media info/warning (e.g media badly interleaved.)
+//
+enum media_info_type {
+ // 0xx
+ MEDIA_INFO_UNKNOWN = 1,
+ // The player was started because it was used as the next player for another
+ // player, which just completed playback
+ MEDIA_INFO_STARTED_AS_NEXT = 2,
+ // The player just pushed the very first video frame for rendering
+ MEDIA_INFO_RENDERING_START = 3,
+ // 7xx
+ // The video is too complex for the decoder: it can't decode frames fast
+ // enough. Possibly only the audio plays fine at this stage.
+ MEDIA_INFO_VIDEO_TRACK_LAGGING = 700,
+ // MediaPlayer is temporarily pausing playback internally in order to
+ // buffer more data.
+ MEDIA_INFO_BUFFERING_START = 701,
+ // MediaPlayer is resuming playback after filling buffers.
+ MEDIA_INFO_BUFFERING_END = 702,
+ // Bandwidth in recent past
+ MEDIA_INFO_NETWORK_BANDWIDTH = 703,
+
+ // 8xx
+ // Bad interleaving means that a media has been improperly interleaved or not
+ // interleaved at all, e.g has all the video samples first then all the audio
+ // ones. Video is playing but a lot of disk seek may be happening.
+ MEDIA_INFO_BAD_INTERLEAVING = 800,
+ // The media is not seekable (e.g live stream).
+ MEDIA_INFO_NOT_SEEKABLE = 801,
+ // New media metadata is available.
+ MEDIA_INFO_METADATA_UPDATE = 802,
+ // Audio can not be played.
+ MEDIA_INFO_PLAY_AUDIO_ERROR = 804,
+ // Video can not be played.
+ MEDIA_INFO_PLAY_VIDEO_ERROR = 805,
+
+ //9xx
+ MEDIA_INFO_TIMED_TEXT_ERROR = 900,
+};
+
+
+
+enum media_player_states {
+ MEDIA_PLAYER_STATE_ERROR = 0,
+ MEDIA_PLAYER_IDLE = 1 << 0,
+ MEDIA_PLAYER_INITIALIZED = 1 << 1,
+ MEDIA_PLAYER_PREPARING = 1 << 2,
+ MEDIA_PLAYER_PREPARED = 1 << 3,
+ MEDIA_PLAYER_STARTED = 1 << 4,
+ MEDIA_PLAYER_PAUSED = 1 << 5,
+ MEDIA_PLAYER_STOPPED = 1 << 6,
+ MEDIA_PLAYER_PLAYBACK_COMPLETE = 1 << 7
+};
+
+// Keep KEY_PARAMETER_* in sync with MediaPlayer.java.
+// The same enum space is used for both set and get, in case there are future keys that
+// can be both set and get. But as of now, all parameters are either set only or get only.
+enum media_parameter_keys {
+ // Streaming/buffering parameters
+ KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS = 1100, // set only
+
+ // Return a Parcel containing a single int, which is the channel count of the
+ // audio track, or zero for error (e.g. no audio track) or unknown.
+ KEY_PARAMETER_AUDIO_CHANNEL_COUNT = 1200, // get only
+
+ // Playback rate expressed in permille (1000 is normal speed), saved as int32_t, with negative
+ // values used for rewinding or reverse playback.
+ KEY_PARAMETER_PLAYBACK_RATE_PERMILLE = 1300, // set only
+
+ // Set a Parcel containing the value of a parcelled Java AudioAttribute instance
+ KEY_PARAMETER_AUDIO_ATTRIBUTES = 1400 // set only
+};
+
+// Keep INVOKE_ID_* in sync with MediaPlayer.java.
+enum media_player_invoke_ids {
+ INVOKE_ID_GET_TRACK_INFO = 1,
+ INVOKE_ID_ADD_EXTERNAL_SOURCE = 2,
+ INVOKE_ID_ADD_EXTERNAL_SOURCE_FD = 3,
+ INVOKE_ID_SELECT_TRACK = 4,
+ INVOKE_ID_UNSELECT_TRACK = 5,
+ INVOKE_ID_SET_VIDEO_SCALING_MODE = 6,
+ INVOKE_ID_GET_SELECTED_TRACK = 7
+};
+
+// Keep MEDIA_TRACK_TYPE_* in sync with MediaPlayer.java.
+enum media_track_type {
+ MEDIA_TRACK_TYPE_UNKNOWN = 0,
+ MEDIA_TRACK_TYPE_VIDEO = 1,
+ MEDIA_TRACK_TYPE_AUDIO = 2,
+ MEDIA_TRACK_TYPE_TIMEDTEXT = 3,
+ MEDIA_TRACK_TYPE_SUBTITLE = 4,
+ MEDIA_TRACK_TYPE_METADATA = 5,
+};
+
+// ----------------------------------------------------------------------------
+// ref-counted object for callbacks
+class MediaPlayerListener: virtual public RefBase
+{
+public:
+ virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) = 0;
+};
+
+struct IMediaHTTPService;
+
+class MediaPlayer : public BnMediaPlayerClient,
+ public virtual IMediaDeathNotifier
+{
+public:
+ MediaPlayer();
+ ~MediaPlayer();
+ void died();
+ void disconnect();
+
+ status_t setDataSource(
+ const sp<IMediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8> *headers);
+
+ status_t setDataSource(int fd, int64_t offset, int64_t length);
+ status_t setDataSource(const sp<IDataSource> &source);
+ status_t setVideoSurfaceTexture(
+ const sp<IGraphicBufferProducer>& bufferProducer);
+ status_t setListener(const sp<MediaPlayerListener>& listener);
+ status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+ status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */);
+ status_t setBufferingSettings(const BufferingSettings& buffering);
+ status_t prepare();
+ status_t prepareAsync();
+ status_t start();
+ status_t stop();
+ status_t pause();
+ bool isPlaying();
+ status_t setPlaybackSettings(const AudioPlaybackRate& rate);
+ status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
+ status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
+ status_t getSyncSettings(
+ AVSyncSettings* sync /* nonnull */,
+ float* videoFps /* nonnull */);
+ status_t getVideoWidth(int *w);
+ status_t getVideoHeight(int *h);
+ status_t seekTo(
+ int msec,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
+ status_t getCurrentPosition(int *msec);
+ status_t getDuration(int *msec);
+ status_t reset();
+ status_t setAudioStreamType(audio_stream_type_t type);
+ status_t getAudioStreamType(audio_stream_type_t *type);
+ status_t setLooping(int loop);
+ bool isLooping();
+ status_t setVolume(float leftVolume, float rightVolume);
+ void notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
+ status_t invoke(const Parcel& request, Parcel *reply);
+ status_t setMetadataFilter(const Parcel& filter);
+ status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
+ status_t setAudioSessionId(audio_session_t sessionId);
+ audio_session_t getAudioSessionId();
+ status_t setAuxEffectSendLevel(float level);
+ status_t attachAuxEffect(int effectId);
+ status_t setParameter(int key, const Parcel& request);
+ status_t getParameter(int key, Parcel* reply);
+ status_t setRetransmitEndpoint(const char* addrString, uint16_t port);
+ status_t setNextMediaPlayer(const sp<MediaPlayer>& player);
+
+ VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation);
+ sp<VolumeShaper::State> getVolumeShaperState(int id);
+ // Modular DRM
+ status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
+ status_t releaseDrm();
+
+private:
+ void clear_l();
+ status_t seekTo_l(int msec, MediaPlayerSeekMode mode);
+ status_t prepareAsync_l();
+ status_t getDuration_l(int *msec);
+ status_t attachNewPlayer(const sp<IMediaPlayer>& player);
+ status_t reset_l();
+ status_t doSetRetransmitEndpoint(const sp<IMediaPlayer>& player);
+ status_t checkStateForKeySet_l(int key);
+
+ sp<IMediaPlayer> mPlayer;
+ thread_id_t mLockThreadId;
+ Mutex mLock;
+ Mutex mNotifyLock;
+ Condition mSignal;
+ sp<MediaPlayerListener> mListener;
+ void* mCookie;
+ media_player_states mCurrentState;
+ int mCurrentPosition;
+ MediaPlayerSeekMode mCurrentSeekMode;
+ int mSeekPosition;
+ MediaPlayerSeekMode mSeekMode;
+ bool mPrepareSync;
+ status_t mPrepareStatus;
+ audio_stream_type_t mStreamType;
+ Parcel* mAudioAttributesParcel;
+ bool mLoop;
+ float mLeftVolume;
+ float mRightVolume;
+ int mVideoWidth;
+ int mVideoHeight;
+ audio_session_t mAudioSessionId;
+ float mSendLevel;
+ struct sockaddr_in mRetransmitEndpoint;
+ bool mRetransmitEndpointValid;
+ BufferingSettings mCurrentBufferingSettings;
+};
+
+}; // namespace android
+
+#endif // ANDROID_MEDIAPLAYER_H
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
new file mode 100644
index 0000000..071e7a1
--- /dev/null
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -0,0 +1,278 @@
+/*
+ ** Copyright (C) 2008 The Android Open Source Project
+ **
+ ** Licensed under the Apache License, Version 2.0 (the "License");
+ ** you may not use this file except in compliance with the License.
+ ** You may obtain a copy of the License at
+ **
+ ** http://www.apache.org/licenses/LICENSE-2.0
+ **
+ ** Unless required by applicable law or agreed to in writing, software
+ ** distributed under the License is distributed on an "AS IS" BASIS,
+ ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ** See the License for the specific language governing permissions and
+ **
+ ** limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIARECORDER_H
+#define ANDROID_MEDIARECORDER_H
+
+#include <utils/Log.h>
+#include <utils/threads.h>
+#include <utils/List.h>
+#include <utils/Errors.h>
+#include <media/IMediaRecorderClient.h>
+#include <media/IMediaDeathNotifier.h>
+
+namespace android {
+
+class Surface;
+class IMediaRecorder;
+class ICameraRecordingProxy;
+class IGraphicBufferProducer;
+struct PersistentSurface;
+class Surface;
+
+namespace hardware {
+class ICamera;
+}
+
+typedef void (*media_completion_f)(status_t status, void *cookie);
+
+enum video_source {
+ VIDEO_SOURCE_DEFAULT = 0,
+ VIDEO_SOURCE_CAMERA = 1,
+ VIDEO_SOURCE_SURFACE = 2,
+
+ VIDEO_SOURCE_LIST_END // must be last - used to validate audio source type
+};
+
+//Please update media/java/android/media/MediaRecorder.java if the following is updated.
+enum output_format {
+ OUTPUT_FORMAT_DEFAULT = 0,
+ OUTPUT_FORMAT_THREE_GPP = 1,
+ OUTPUT_FORMAT_MPEG_4 = 2,
+
+
+ OUTPUT_FORMAT_AUDIO_ONLY_START = 3, // Used in validating the output format. Should be the
+ // at the start of the audio only output formats.
+
+ /* These are audio only file formats */
+ OUTPUT_FORMAT_RAW_AMR = 3, //to be backward compatible
+ OUTPUT_FORMAT_AMR_NB = 3,
+ OUTPUT_FORMAT_AMR_WB = 4,
+ OUTPUT_FORMAT_AAC_ADIF = 5,
+ OUTPUT_FORMAT_AAC_ADTS = 6,
+
+ OUTPUT_FORMAT_AUDIO_ONLY_END = 7, // Used in validating the output format. Should be the
+ // at the end of the audio only output formats.
+
+ /* Stream over a socket, limited to a single stream */
+ OUTPUT_FORMAT_RTP_AVP = 7,
+
+ /* H.264/AAC data encapsulated in MPEG2/TS */
+ OUTPUT_FORMAT_MPEG2TS = 8,
+
+ /* VP8/VORBIS data in a WEBM container */
+ OUTPUT_FORMAT_WEBM = 9,
+
+ OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
+};
+
+enum audio_encoder {
+ AUDIO_ENCODER_DEFAULT = 0,
+ AUDIO_ENCODER_AMR_NB = 1,
+ AUDIO_ENCODER_AMR_WB = 2,
+ AUDIO_ENCODER_AAC = 3,
+ AUDIO_ENCODER_HE_AAC = 4,
+ AUDIO_ENCODER_AAC_ELD = 5,
+ AUDIO_ENCODER_VORBIS = 6,
+
+ AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
+};
+
+enum video_encoder {
+ VIDEO_ENCODER_DEFAULT = 0,
+ VIDEO_ENCODER_H263 = 1,
+ VIDEO_ENCODER_H264 = 2,
+ VIDEO_ENCODER_MPEG_4_SP = 3,
+ VIDEO_ENCODER_VP8 = 4,
+ VIDEO_ENCODER_HEVC = 5,
+
+ VIDEO_ENCODER_LIST_END // must be the last - used to validate the video encoder type
+};
+
+/*
+ * The state machine of the media_recorder.
+ */
+enum media_recorder_states {
+ // Error state.
+ MEDIA_RECORDER_ERROR = 0,
+
+ // Recorder was just created.
+ MEDIA_RECORDER_IDLE = 1 << 0,
+
+ // Recorder has been initialized.
+ MEDIA_RECORDER_INITIALIZED = 1 << 1,
+
+ // Configuration of the recorder has been completed.
+ MEDIA_RECORDER_DATASOURCE_CONFIGURED = 1 << 2,
+
+ // Recorder is ready to start.
+ MEDIA_RECORDER_PREPARED = 1 << 3,
+
+ // Recording is in progress.
+ MEDIA_RECORDER_RECORDING = 1 << 4,
+};
+
+// The "msg" code passed to the listener in notify.
+enum media_recorder_event_type {
+ MEDIA_RECORDER_EVENT_LIST_START = 1,
+ MEDIA_RECORDER_EVENT_ERROR = 1,
+ MEDIA_RECORDER_EVENT_INFO = 2,
+ MEDIA_RECORDER_EVENT_LIST_END = 99,
+
+ // Track related event types
+ MEDIA_RECORDER_TRACK_EVENT_LIST_START = 100,
+ MEDIA_RECORDER_TRACK_EVENT_ERROR = 100,
+ MEDIA_RECORDER_TRACK_EVENT_INFO = 101,
+ MEDIA_RECORDER_TRACK_EVENT_LIST_END = 1000,
+};
+
+/*
+ * The (part of) "what" code passed to the listener in notify.
+ * When the error or info type is track specific, the what has
+ * the following layout:
+ * the left-most 16-bit is meant for error or info type.
+ * the right-most 4-bit is meant for track id.
+ * the rest is reserved.
+ *
+ * | track id | reserved | error or info type |
+ * 31 28 16 0
+ *
+ */
+enum media_recorder_error_type {
+ MEDIA_RECORDER_ERROR_UNKNOWN = 1,
+
+ // Track related error type
+ MEDIA_RECORDER_TRACK_ERROR_LIST_START = 100,
+ MEDIA_RECORDER_TRACK_ERROR_GENERAL = 100,
+ MEDIA_RECORDER_ERROR_VIDEO_NO_SYNC_FRAME = 200,
+ MEDIA_RECORDER_TRACK_ERROR_LIST_END = 1000,
+};
+
+// The codes are distributed as follow:
+// 0xx: Reserved
+// 8xx: General info/warning
+//
+enum media_recorder_info_type {
+ MEDIA_RECORDER_INFO_UNKNOWN = 1,
+
+ MEDIA_RECORDER_INFO_MAX_DURATION_REACHED = 800,
+ MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED = 801,
+ MEDIA_RECORDER_INFO_MAX_FILESIZE_APPROACHING = 802,
+ MEDIA_RECORDER_INFO_NEXT_OUTPUT_FILE_STARTED = 803,
+
+ // All track related informtional events start here
+ MEDIA_RECORDER_TRACK_INFO_LIST_START = 1000,
+ MEDIA_RECORDER_TRACK_INFO_COMPLETION_STATUS = 1000,
+ MEDIA_RECORDER_TRACK_INFO_PROGRESS_IN_TIME = 1001,
+ MEDIA_RECORDER_TRACK_INFO_TYPE = 1002,
+ MEDIA_RECORDER_TRACK_INFO_DURATION_MS = 1003,
+
+ // The time to measure the max chunk duration
+ MEDIA_RECORDER_TRACK_INFO_MAX_CHUNK_DUR_MS = 1004,
+
+ MEDIA_RECORDER_TRACK_INFO_ENCODED_FRAMES = 1005,
+
+ // The time to measure how well the audio and video
+ // track data is interleaved.
+ MEDIA_RECORDER_TRACK_INTER_CHUNK_TIME_MS = 1006,
+
+ // The time to measure system response. Note that
+ // the delay does not include the intentional delay
+ // we use to eliminate the recording sound.
+ MEDIA_RECORDER_TRACK_INFO_INITIAL_DELAY_MS = 1007,
+
+ // The time used to compensate for initial A/V sync.
+ MEDIA_RECORDER_TRACK_INFO_START_OFFSET_MS = 1008,
+
+ // Total number of bytes of the media data.
+ MEDIA_RECORDER_TRACK_INFO_DATA_KBYTES = 1009,
+
+ MEDIA_RECORDER_TRACK_INFO_LIST_END = 2000,
+};
+
+// ----------------------------------------------------------------------------
+// ref-counted object for callbacks
+class MediaRecorderListener: virtual public RefBase
+{
+public:
+ virtual void notify(int msg, int ext1, int ext2) = 0;
+};
+
+class MediaRecorder : public BnMediaRecorderClient,
+ public virtual IMediaDeathNotifier
+{
+public:
+ MediaRecorder(const String16& opPackageName);
+ ~MediaRecorder();
+
+ void died();
+ status_t initCheck();
+ status_t setCamera(const sp<hardware::ICamera>& camera,
+ const sp<ICameraRecordingProxy>& proxy);
+ status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
+ status_t setVideoSource(int vs);
+ status_t setAudioSource(int as);
+ status_t setOutputFormat(int of);
+ status_t setVideoEncoder(int ve);
+ status_t setAudioEncoder(int ae);
+ status_t setOutputFile(int fd);
+ status_t setNextOutputFile(int fd);
+ status_t setVideoSize(int width, int height);
+ status_t setVideoFrameRate(int frames_per_second);
+ status_t setParameters(const String8& params);
+ status_t setListener(const sp<MediaRecorderListener>& listener);
+ status_t setClientName(const String16& clientName);
+ status_t prepare();
+ status_t getMaxAmplitude(int* max);
+ status_t start();
+ status_t stop();
+ status_t reset();
+ status_t pause();
+ status_t resume();
+ status_t init();
+ status_t close();
+ status_t release();
+ void notify(int msg, int ext1, int ext2);
+ status_t setInputSurface(const sp<PersistentSurface>& surface);
+ sp<IGraphicBufferProducer> querySurfaceMediaSourceFromMediaServer();
+ status_t getMetrics(Parcel *reply);
+
+private:
+ void doCleanUp();
+ status_t doReset();
+
+ sp<IMediaRecorder> mMediaRecorder;
+ sp<MediaRecorderListener> mListener;
+
+ // Reference to IGraphicBufferProducer
+ // for encoding GL Frames. That is useful only when the
+ // video source is set to VIDEO_SOURCE_GRALLOC_BUFFER
+ sp<IGraphicBufferProducer> mSurfaceMediaSource;
+
+ media_recorder_states mCurrentState;
+ bool mIsAudioSourceSet;
+ bool mIsVideoSourceSet;
+ bool mIsAudioEncoderSet;
+ bool mIsVideoEncoderSet;
+ bool mIsOutputFileSet;
+ Mutex mLock;
+ Mutex mNotifyLock;
+};
+
+}; // namespace android
+
+#endif // ANDROID_MEDIARECORDER_H
diff --git a/include/media/mediascanner.h b/media/libmedia/include/media/mediascanner.h
similarity index 100%
rename from include/media/mediascanner.h
rename to media/libmedia/include/media/mediascanner.h
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index fbe749c..b976721 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -16,7 +16,7 @@
*/
//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaPlayer"
+#define LOG_TAG "MediaPlayerNative"
#include <fcntl.h>
#include <inttypes.h>
@@ -36,6 +36,7 @@
#include <media/AudioSystem.h>
#include <media/AVSyncSettings.h>
#include <media/IDataSource.h>
+#include <media/MediaAnalyticsItem.h>
#include <binder/MemoryBase.h>
@@ -55,7 +56,9 @@
mStreamType = AUDIO_STREAM_MUSIC;
mAudioAttributesParcel = NULL;
mCurrentPosition = -1;
+ mCurrentSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
mSeekPosition = -1;
+ mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
mCurrentState = MEDIA_PLAYER_IDLE;
mPrepareSync = false;
mPrepareStatus = NO_ERROR;
@@ -100,7 +103,9 @@
void MediaPlayer::clear_l()
{
mCurrentPosition = -1;
+ mCurrentSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
mSeekPosition = -1;
+ mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
mVideoWidth = mVideoHeight = 0;
mRetransmitEndpointValid = false;
}
@@ -132,8 +137,10 @@
mPlayer = player;
if (player != 0) {
mCurrentState = MEDIA_PLAYER_INITIALIZED;
+ player->getDefaultBufferingSettings(&mCurrentBufferingSettings);
err = NO_ERROR;
} else {
+ mCurrentBufferingSettings = BufferingSettings();
ALOGE("Unable to create media player");
}
}
@@ -240,6 +247,44 @@
return mPlayer->setVideoSurfaceTexture(bufferProducer);
}
+status_t MediaPlayer::getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */)
+{
+ ALOGV("getDefaultBufferingSettings");
+
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return NO_INIT;
+ }
+ return mPlayer->getDefaultBufferingSettings(buffering);
+}
+
+status_t MediaPlayer::getBufferingSettings(BufferingSettings* buffering /* nonnull */)
+{
+ ALOGV("getBufferingSettings");
+
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return NO_INIT;
+ }
+ *buffering = mCurrentBufferingSettings;
+ return NO_ERROR;
+}
+
+status_t MediaPlayer::setBufferingSettings(const BufferingSettings& buffering)
+{
+ ALOGV("setBufferingSettings");
+
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == 0) {
+ return NO_INIT;
+ }
+ status_t err = mPlayer->setBufferingSettings(buffering);
+ if (err == NO_ERROR) {
+ mCurrentBufferingSettings = buffering;
+ }
+ return err;
+}
+
// must call with lock held
status_t MediaPlayer::prepareAsync_l()
{
@@ -508,9 +553,9 @@
return getDuration_l(msec);
}
-status_t MediaPlayer::seekTo_l(int msec)
+status_t MediaPlayer::seekTo_l(int msec, MediaPlayerSeekMode mode)
{
- ALOGV("seekTo %d", msec);
+ ALOGV("seekTo (%d, %d)", msec, mode);
if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED |
MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
if ( msec < 0 ) {
@@ -537,12 +582,14 @@
// cache duration
mCurrentPosition = msec;
+ mCurrentSeekMode = mode;
if (mSeekPosition < 0) {
mSeekPosition = msec;
- return mPlayer->seekTo(msec);
+ mSeekMode = mode;
+ return mPlayer->seekTo(msec, mode);
}
else {
- ALOGV("Seek in progress - queue up seekTo[%d]", msec);
+ ALOGV("Seek in progress - queue up seekTo[%d, %d]", msec, mode);
return NO_ERROR;
}
}
@@ -551,11 +598,11 @@
return INVALID_OPERATION;
}
-status_t MediaPlayer::seekTo(int msec)
+status_t MediaPlayer::seekTo(int msec, MediaPlayerSeekMode mode)
{
mLockThreadId = getThreadId();
Mutex::Autolock _l(mLock);
- status_t result = seekTo_l(msec);
+ status_t result = seekTo_l(msec, mode);
mLockThreadId = 0;
return result;
@@ -578,6 +625,7 @@
// setDataSource has to be called again to create a
// new mediaplayer.
mPlayer = 0;
+ mCurrentBufferingSettings = BufferingSettings();
return ret;
}
clear_l();
@@ -763,7 +811,11 @@
ALOGV("MediaPlayer::getParameter(%d)", key);
Mutex::Autolock _l(mLock);
if (mPlayer != NULL) {
- return mPlayer->getParameter(key, reply);
+ status_t status = mPlayer->getParameter(key, reply);
+ if (status != OK) {
+ ALOGD("getParameter returns %d", status);
+ }
+ return status;
}
ALOGV("getParameter: no active player");
return INVALID_OPERATION;
@@ -827,7 +879,7 @@
case MEDIA_NOP: // interface test message
break;
case MEDIA_PREPARED:
- ALOGV("prepared");
+ ALOGV("MediaPlayer::notify() prepared");
mCurrentState = MEDIA_PLAYER_PREPARED;
if (mPrepareSync) {
ALOGV("signal application thread");
@@ -836,6 +888,9 @@
mSignal.signal();
}
break;
+ case MEDIA_DRM_INFO:
+ ALOGV("MediaPlayer::notify() MEDIA_DRM_INFO(%d, %d, %d, %p)", msg, ext1, ext2, obj);
+ break;
case MEDIA_PLAYBACK_COMPLETE:
ALOGV("playback complete");
if (mCurrentState == MEDIA_PLAYER_IDLE) {
@@ -869,14 +924,16 @@
break;
case MEDIA_SEEK_COMPLETE:
ALOGV("Received seek complete");
- if (mSeekPosition != mCurrentPosition) {
- ALOGV("Executing queued seekTo(%d)", mSeekPosition);
+ if (mSeekPosition != mCurrentPosition || (mSeekMode != mCurrentSeekMode)) {
+ ALOGV("Executing queued seekTo(%d, %d)", mCurrentPosition, mCurrentSeekMode);
mSeekPosition = -1;
- seekTo_l(mCurrentPosition);
+ mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
+ seekTo_l(mCurrentPosition, mCurrentSeekMode);
}
else {
ALOGV("All seeks complete - return to regularly scheduled program");
mCurrentPosition = mSeekPosition = -1;
+ mCurrentSeekMode = mSeekMode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC;
}
break;
case MEDIA_BUFFERING_UPDATE:
@@ -934,4 +991,89 @@
return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
}
+VolumeShaper::Status MediaPlayer::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation)
+{
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == nullptr) {
+ return VolumeShaper::Status(NO_INIT);
+ }
+ VolumeShaper::Status status = mPlayer->applyVolumeShaper(configuration, operation);
+ return status;
+}
+
+sp<VolumeShaper::State> MediaPlayer::getVolumeShaperState(int id)
+{
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == nullptr) {
+ return nullptr;
+ }
+ return mPlayer->getVolumeShaperState(id);
+}
+
+// Modular DRM
+status_t MediaPlayer::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId)
+{
+ // TODO change to ALOGV
+ ALOGD("prepareDrm: uuid: %p drmSessionId: %p(%zu)", uuid,
+ drmSessionId.array(), drmSessionId.size());
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ return NO_INIT;
+ }
+
+ // Only allowed it in player's preparing/prepared state.
+ // We get here only if MEDIA_DRM_INFO has already arrived (e.g., prepare is half-way through or
+ // completed) so the state change to "prepared" might not have happened yet (e.g., buffering).
+ // Still, we can allow prepareDrm for the use case of being called in OnDrmInfoListener.
+ if (!(mCurrentState & (MEDIA_PLAYER_PREPARING | MEDIA_PLAYER_PREPARED))) {
+ ALOGE("prepareDrm is called in the wrong state (%d).", mCurrentState);
+ return INVALID_OPERATION;
+ }
+
+ if (drmSessionId.isEmpty()) {
+ ALOGE("prepareDrm: Unexpected. Can't proceed with crypto. Empty drmSessionId.");
+ return INVALID_OPERATION;
+ }
+
+ // Passing down to mediaserver mainly for creating the crypto
+ status_t status = mPlayer->prepareDrm(uuid, drmSessionId);
+ ALOGE_IF(status != OK, "prepareDrm: Failed at mediaserver with ret: %d", status);
+
+ // TODO change to ALOGV
+ ALOGD("prepareDrm: mediaserver::prepareDrm ret=%d", status);
+
+ return status;
+}
+
+status_t MediaPlayer::releaseDrm()
+{
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ return NO_INIT;
+ }
+
+ // Not allowing releaseDrm in an active/resumable state
+ if (mCurrentState & (MEDIA_PLAYER_STARTED |
+ MEDIA_PLAYER_PAUSED |
+ MEDIA_PLAYER_PLAYBACK_COMPLETE |
+ MEDIA_PLAYER_STATE_ERROR)) {
+ ALOGE("releaseDrm Unexpected state %d. Can only be called in stopped/idle.", mCurrentState);
+ return INVALID_OPERATION;
+ }
+
+ status_t status = mPlayer->releaseDrm();
+ // TODO change to ALOGV
+ ALOGD("releaseDrm: mediaserver::releaseDrm ret: %d", status);
+ if (status != OK) {
+ ALOGE("releaseDrm: Failed at mediaserver with ret: %d", status);
+ // Overriding to OK so the client proceed with its own cleanup
+ // Client can't do more cleanup. mediaserver release its crypto at end of session anyway.
+ status = OK;
+ }
+
+ return status;
+}
+
} // namespace android
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 59c077a..4405930 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -256,6 +256,7 @@
return INVALID_OPERATION;
}
+
status_t ret = mMediaRecorder->setAudioEncoder(ae);
if (OK != ret) {
ALOGV("setAudioEncoder failed: %d", ret);
@@ -266,9 +267,9 @@
return ret;
}
-status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length)
+status_t MediaRecorder::setOutputFile(int fd)
{
- ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
+ ALOGV("setOutputFile(%d)", fd);
if (mMediaRecorder == NULL) {
ALOGE("media recorder is not initialized yet");
return INVALID_OPERATION;
@@ -288,14 +289,19 @@
// the invalid file descritpor never gets invoked. This is to workaround
// this issue by checking the file descriptor first before passing
// it through binder call.
- if (fd < 0) {
- ALOGE("Invalid file descriptor: %d", fd);
+ int flags = fcntl(fd, F_GETFL);
+ if (flags == -1) {
+ ALOGE("Fail to get File Status Flags err: %s", strerror(errno));
+ }
+ // fd must be in read-write mode or write-only mode.
+ if ((flags & (O_RDWR | O_WRONLY)) == 0) {
+ ALOGE("File descriptor is not in read-write mode or write-only mode");
return BAD_VALUE;
}
- status_t ret = mMediaRecorder->setOutputFile(fd, offset, length);
+ status_t ret = mMediaRecorder->setOutputFile(fd);
if (OK != ret) {
- ALOGV("setOutputFile failed: %d", ret);
+ ALOGE("setOutputFile failed: %d", ret);
mCurrentState = MEDIA_RECORDER_ERROR;
return ret;
}
@@ -303,6 +309,37 @@
return ret;
}
+status_t MediaRecorder::setNextOutputFile(int fd)
+{
+ ALOGV("setNextOutputFile(%d)", fd);
+ if (mMediaRecorder == NULL) {
+ ALOGE("media recorder is not initialized yet");
+ return INVALID_OPERATION;
+ }
+
+ // It appears that if an invalid file descriptor is passed through
+ // binder calls, the server-side of the inter-process function call
+ // is skipped. As a result, the check at the server-side to catch
+ // the invalid file descritpor never gets invoked. This is to workaround
+ // this issue by checking the file descriptor first before passing
+ // it through binder call.
+ int flags = fcntl(fd, F_GETFL);
+ if (flags == -1) {
+ ALOGE("Fail to get File Status Flags err: %s", strerror(errno));
+ }
+ // fd must be in read-write mode or write-only mode.
+ if ((flags & (O_RDWR | O_WRONLY)) == 0) {
+ ALOGE("File descriptor is not in read-write mode or write-only mode");
+ return BAD_VALUE;
+ }
+
+ status_t ret = mMediaRecorder->setNextOutputFile(fd);
+ if (OK != ret) {
+ ALOGE("setNextOutputFile failed: %d", ret);
+ }
+ return ret;
+}
+
status_t MediaRecorder::setVideoSize(int width, int height)
{
ALOGV("setVideoSize(%d, %d)", width, height);
@@ -361,7 +398,7 @@
return INVALID_OPERATION;
}
- return mMediaRecorder->setInputSurface(surface->getBufferConsumer());
+ return mMediaRecorder->setInputSurface(surface);
}
status_t MediaRecorder::setVideoFrameRate(int frames_per_second)
@@ -476,6 +513,17 @@
return ret;
}
+status_t MediaRecorder::getMetrics(Parcel *reply) {
+
+ ALOGV("getMetrics");
+
+ status_t ret = mMediaRecorder->getMetrics(reply);
+ if (OK != ret) {
+ ALOGE("getMetrics failed: %d", ret);
+ }
+ return ret;
+}
+
status_t MediaRecorder::start()
{
ALOGV("start");
diff --git a/media/libmedia/omx/1.0/WGraphicBufferSource.cpp b/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
new file mode 100644
index 0000000..4c543fa
--- /dev/null
+++ b/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/omx/1.0/WGraphicBufferSource.h>
+#include <media/omx/1.0/WOmxNode.h>
+#include <media/omx/1.0/Conversion.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+// LWGraphicBufferSource
+LWGraphicBufferSource::LWGraphicBufferSource(
+ sp<TGraphicBufferSource> const& base) : mBase(base) {
+}
+
+BnStatus LWGraphicBufferSource::configure(
+ const sp<IOMXNode>& omxNode, int32_t dataSpace) {
+ sp<IOmxNode> hOmxNode = omxNode->getHalInterface();
+ return toBinderStatus(mBase->configure(
+ hOmxNode == nullptr ? new TWOmxNode(omxNode) : hOmxNode,
+ toHardwareDataspace(dataSpace)));
+}
+
+BnStatus LWGraphicBufferSource::setSuspend(
+ bool suspend, int64_t timeUs) {
+ return toBinderStatus(mBase->setSuspend(suspend, timeUs));
+}
+
+BnStatus LWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
+ int64_t repeatAfterUs) {
+ return toBinderStatus(mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs));
+}
+
+BnStatus LWGraphicBufferSource::setMaxFps(float maxFps) {
+ return toBinderStatus(mBase->setMaxFps(maxFps));
+}
+
+BnStatus LWGraphicBufferSource::setTimeLapseConfig(
+ double fps, double captureFps) {
+ return toBinderStatus(mBase->setTimeLapseConfig(fps, captureFps));
+}
+
+BnStatus LWGraphicBufferSource::setStartTimeUs(
+ int64_t startTimeUs) {
+ return toBinderStatus(mBase->setStartTimeUs(startTimeUs));
+}
+
+BnStatus LWGraphicBufferSource::setStopTimeUs(
+ int64_t stopTimeUs) {
+ return toBinderStatus(mBase->setStopTimeUs(stopTimeUs));
+}
+
+BnStatus LWGraphicBufferSource::setColorAspects(
+ int32_t aspects) {
+ return toBinderStatus(mBase->setColorAspects(
+ toHardwareColorAspects(aspects)));
+}
+
+BnStatus LWGraphicBufferSource::setTimeOffsetUs(
+ int64_t timeOffsetsUs) {
+ return toBinderStatus(mBase->setTimeOffsetUs(timeOffsetsUs));
+}
+
+BnStatus LWGraphicBufferSource::signalEndOfInputStream() {
+ return toBinderStatus(mBase->signalEndOfInputStream());
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libmedia/omx/1.0/WOmx.cpp b/media/libmedia/omx/1.0/WOmx.cpp
new file mode 100644
index 0000000..ce624fa
--- /dev/null
+++ b/media/libmedia/omx/1.0/WOmx.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gui/bufferqueue/1.0/H2BGraphicBufferProducer.h>
+#include <media/omx/1.0/WOmx.h>
+#include <media/omx/1.0/WOmxNode.h>
+#include <media/omx/1.0/WOmxObserver.h>
+#include <media/omx/1.0/WGraphicBufferSource.h>
+#include <media/omx/1.0/Conversion.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::graphics::bufferqueue::V1_0::utils::
+ H2BGraphicBufferProducer;
+typedef ::android::hardware::graphics::bufferqueue::V1_0::IGraphicBufferProducer
+ HGraphicBufferProducer;
+
+// LWOmx
+LWOmx::LWOmx(sp<IOmx> const& base) : mBase(base) {
+}
+
+status_t LWOmx::listNodes(List<IOMX::ComponentInfo>* list) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->listNodes(
+ [&fnStatus, list](
+ Status status,
+ hidl_vec<IOmx::ComponentInfo> const& nodeList) {
+ fnStatus = toStatusT(status);
+ list->clear();
+ for (size_t i = 0; i < nodeList.size(); ++i) {
+ auto newInfo = list->insert(
+ list->end(), IOMX::ComponentInfo());
+ convertTo(&*newInfo, nodeList[i]);
+ }
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmx::allocateNode(
+ char const* name,
+ sp<IOMXObserver> const& observer,
+ sp<IOMXNode>* omxNode) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->allocateNode(
+ name, new TWOmxObserver(observer),
+ [&fnStatus, omxNode](Status status, sp<IOmxNode> const& node) {
+ fnStatus = toStatusT(status);
+ *omxNode = new LWOmxNode(node);
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmx::createInputSurface(
+ sp<::android::IGraphicBufferProducer>* bufferProducer,
+ sp<::android::IGraphicBufferSource>* bufferSource) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->createInputSurface(
+ [&fnStatus, bufferProducer, bufferSource] (
+ Status status,
+ sp<HGraphicBufferProducer> const& tProducer,
+ sp<IGraphicBufferSource> const& tSource) {
+ fnStatus = toStatusT(status);
+ *bufferProducer = new H2BGraphicBufferProducer(tProducer);
+ *bufferSource = new LWGraphicBufferSource(tSource);
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libmedia/omx/1.0/WOmxBufferSource.cpp b/media/libmedia/omx/1.0/WOmxBufferSource.cpp
new file mode 100644
index 0000000..7cca5bd
--- /dev/null
+++ b/media/libmedia/omx/1.0/WOmxBufferSource.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <utils/String8.h>
+
+#include <media/omx/1.0/WOmxBufferSource.h>
+#include <media/omx/1.0/Conversion.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+// LWOmxBufferSource
+LWOmxBufferSource::LWOmxBufferSource(sp<IOmxBufferSource> const& base) :
+ mBase(base) {
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxExecuting() {
+ return toBinderStatus(mBase->onOmxExecuting());
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxIdle() {
+ return toBinderStatus(mBase->onOmxIdle());
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxLoaded() {
+ return toBinderStatus(mBase->onOmxLoaded());
+}
+
+::android::binder::Status LWOmxBufferSource::onInputBufferAdded(
+ int32_t bufferId) {
+ return toBinderStatus(mBase->onInputBufferAdded(
+ static_cast<uint32_t>(bufferId)));
+}
+
+::android::binder::Status LWOmxBufferSource::onInputBufferEmptied(
+ int32_t bufferId, OMXFenceParcelable const& fenceParcel) {
+ hidl_handle fence;
+ native_handle_t* fenceNh;
+ if (!wrapAs(&fence, &fenceNh, fenceParcel)) {
+ return ::android::binder::Status::fromExceptionCode(
+ ::android::binder::Status::EX_BAD_PARCELABLE,
+ "Invalid fence");
+ }
+ ::android::binder::Status status = toBinderStatus(
+ mBase->onInputBufferEmptied(
+ static_cast<uint32_t>(bufferId), fence));
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
+ return status;
+}
+
+// TWOmxBufferSource
+TWOmxBufferSource::TWOmxBufferSource(sp<IOMXBufferSource> const& base) :
+ mBase(base) {
+}
+
+Return<void> TWOmxBufferSource::onOmxExecuting() {
+ mBase->onOmxExecuting();
+ return Void();
+}
+
+Return<void> TWOmxBufferSource::onOmxIdle() {
+ mBase->onOmxIdle();
+ return Void();
+}
+
+Return<void> TWOmxBufferSource::onOmxLoaded() {
+ mBase->onOmxLoaded();
+ return Void();
+}
+
+Return<void> TWOmxBufferSource::onInputBufferAdded(uint32_t buffer) {
+ mBase->onInputBufferAdded(int32_t(buffer));
+ return Void();
+}
+
+Return<void> TWOmxBufferSource::onInputBufferEmptied(
+ uint32_t buffer, hidl_handle const& fence) {
+ OMXFenceParcelable fenceParcelable;
+ if (!convertTo(&fenceParcelable, fence)) {
+ return Void();
+ }
+ mBase->onInputBufferEmptied(int32_t(buffer), fenceParcelable);
+ return Void();
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libmedia/omx/1.0/WOmxNode.cpp b/media/libmedia/omx/1.0/WOmxNode.cpp
new file mode 100644
index 0000000..0b40e8d
--- /dev/null
+++ b/media/libmedia/omx/1.0/WOmxNode.cpp
@@ -0,0 +1,428 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+
+#include <media/omx/1.0/WOmxNode.h>
+#include <media/omx/1.0/WOmxBufferSource.h>
+#include <media/omx/1.0/Conversion.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::Void;
+
+// LWOmxNode
+status_t LWOmxNode::freeNode() {
+ return toStatusT(mBase->freeNode());
+}
+
+status_t LWOmxNode::sendCommand(
+ OMX_COMMANDTYPE cmd, OMX_S32 param) {
+ return toStatusT(mBase->sendCommand(
+ toRawCommandType(cmd), param));
+}
+
+status_t LWOmxNode::getParameter(
+ OMX_INDEXTYPE index, void *params, size_t size) {
+ hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->getParameter(
+ toRawIndexType(index),
+ tParams,
+ [&fnStatus, params](
+ Status status, hidl_vec<uint8_t> const& outParams) {
+ fnStatus = toStatusT(status);
+ std::copy(
+ outParams.data(),
+ outParams.data() + outParams.size(),
+ static_cast<uint8_t*>(params));
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setParameter(
+ OMX_INDEXTYPE index, const void *params, size_t size) {
+ hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+ return toStatusT(mBase->setParameter(
+ toRawIndexType(index), tParams));
+}
+
+status_t LWOmxNode::getConfig(
+ OMX_INDEXTYPE index, void *params, size_t size) {
+ hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->getConfig(
+ toRawIndexType(index),
+ tParams,
+ [&fnStatus, params, size](
+ Status status, hidl_vec<uint8_t> const& outParams) {
+ fnStatus = toStatusT(status);
+ std::copy(
+ outParams.data(),
+ outParams.data() + size,
+ static_cast<uint8_t*>(params));
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setConfig(
+ OMX_INDEXTYPE index, const void *params, size_t size) {
+ hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+ return toStatusT(mBase->setConfig(toRawIndexType(index), tParams));
+}
+
+status_t LWOmxNode::setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) {
+ return toStatusT(mBase->setPortMode(port_index, toHardwarePortMode(mode)));
+}
+
+status_t LWOmxNode::prepareForAdaptivePlayback(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
+ return toStatusT(mBase->prepareForAdaptivePlayback(
+ portIndex, toRawBool(enable), maxFrameWidth, maxFrameHeight));
+}
+
+status_t LWOmxNode::configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->configureVideoTunnelMode(
+ portIndex,
+ toRawBool(tunneled),
+ audioHwSync,
+ [&fnStatus, sidebandHandle](
+ Status status, hidl_handle const& outSidebandHandle) {
+ fnStatus = toStatusT(status);
+ *sidebandHandle = outSidebandHandle == nullptr ?
+ nullptr : native_handle_clone(outSidebandHandle);
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::getGraphicBufferUsage(
+ OMX_U32 portIndex, OMX_U32* usage) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->getGraphicBufferUsage(
+ portIndex,
+ [&fnStatus, usage](
+ Status status, uint32_t outUsage) {
+ fnStatus = toStatusT(status);
+ *usage = outUsage;
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setInputSurface(
+ const sp<IOMXBufferSource> &bufferSource) {
+ return toStatusT(mBase->setInputSurface(
+ new TWOmxBufferSource(bufferSource)));
+}
+
+status_t LWOmxNode::allocateSecureBuffer(
+ OMX_U32 portIndex, size_t size, buffer_id *buffer,
+ void **buffer_data, sp<NativeHandle> *native_handle) {
+ *buffer_data = nullptr;
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->allocateSecureBuffer(
+ portIndex,
+ static_cast<uint64_t>(size),
+ [&fnStatus, buffer, native_handle](
+ Status status,
+ uint32_t outBuffer,
+ hidl_handle const& outNativeHandle) {
+ fnStatus = toStatusT(status);
+ *buffer = outBuffer;
+ *native_handle = NativeHandle::create(
+ native_handle_clone(outNativeHandle), true);
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::useBuffer(
+ OMX_U32 portIndex, const OMXBuffer &omxBuffer, buffer_id *buffer) {
+ CodecBuffer codecBuffer;
+ if (!wrapAs(&codecBuffer, omxBuffer)) {
+ return BAD_VALUE;
+ }
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->useBuffer(
+ portIndex,
+ codecBuffer,
+ [&fnStatus, buffer](Status status, uint32_t outBuffer) {
+ fnStatus = toStatusT(status);
+ *buffer = outBuffer;
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::freeBuffer(
+ OMX_U32 portIndex, buffer_id buffer) {
+ return toStatusT(mBase->freeBuffer(portIndex, buffer));
+}
+
+status_t LWOmxNode::fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuffer, int fenceFd) {
+ CodecBuffer codecBuffer;
+ if (!wrapAs(&codecBuffer, omxBuffer)) {
+ return BAD_VALUE;
+ }
+ native_handle_t* fenceNh = native_handle_create_from_fd(fenceFd);
+ if (!fenceNh) {
+ return NO_MEMORY;
+ }
+ status_t status = toStatusT(mBase->fillBuffer(
+ buffer, codecBuffer, fenceNh));
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
+ return status;
+}
+
+status_t LWOmxNode::emptyBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuffer,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+ CodecBuffer codecBuffer;
+ if (!wrapAs(&codecBuffer, omxBuffer)) {
+ return BAD_VALUE;
+ }
+ native_handle_t* fenceNh = native_handle_create_from_fd(fenceFd);
+ if (!fenceNh) {
+ return NO_MEMORY;
+ }
+ status_t status = toStatusT(mBase->emptyBuffer(
+ buffer,
+ codecBuffer,
+ flags,
+ toRawTicks(timestamp),
+ fenceNh));
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
+ return status;
+}
+status_t LWOmxNode::getExtensionIndex(
+ const char *parameter_name,
+ OMX_INDEXTYPE *index) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->getExtensionIndex(
+ hidl_string(parameter_name),
+ [&fnStatus, index](Status status, uint32_t outIndex) {
+ fnStatus = toStatusT(status);
+ *index = toEnumIndexType(outIndex);
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::dispatchMessage(const omx_message &lMsg) {
+ Message tMsg;
+ native_handle_t* nh;
+ if (!wrapAs(&tMsg, &nh, lMsg)) {
+ return NO_MEMORY;
+ }
+ status_t status = toStatusT(mBase->dispatchMessage(tMsg));
+ native_handle_close(nh);
+ native_handle_delete(nh);
+ return status;
+}
+
+// TWOmxNode
+TWOmxNode::TWOmxNode(sp<IOMXNode> const& base) : mBase(base) {
+}
+
+Return<Status> TWOmxNode::freeNode() {
+ return toStatus(mBase->freeNode());
+}
+
+Return<Status> TWOmxNode::sendCommand(uint32_t cmd, int32_t param) {
+ return toStatus(mBase->sendCommand(toEnumCommandType(cmd), param));
+}
+
+Return<void> TWOmxNode::getParameter(
+ uint32_t index, hidl_vec<uint8_t> const& inParams,
+ getParameter_cb _hidl_cb) {
+ hidl_vec<uint8_t> params(inParams);
+ Status status = toStatus(mBase->getParameter(
+ toEnumIndexType(index),
+ static_cast<void*>(params.data()),
+ params.size()));
+ _hidl_cb(status, params);
+ return Void();
+}
+
+Return<Status> TWOmxNode::setParameter(
+ uint32_t index, hidl_vec<uint8_t> const& inParams) {
+ hidl_vec<uint8_t> params(inParams);
+ return toStatus(mBase->setParameter(
+ toEnumIndexType(index),
+ static_cast<void const*>(params.data()),
+ params.size()));
+}
+
+Return<void> TWOmxNode::getConfig(
+ uint32_t index, const hidl_vec<uint8_t>& inConfig,
+ getConfig_cb _hidl_cb) {
+ hidl_vec<uint8_t> config(inConfig);
+ Status status = toStatus(mBase->getConfig(
+ toEnumIndexType(index),
+ static_cast<void*>(config.data()),
+ config.size()));
+ _hidl_cb(status, config);
+ return Void();
+}
+
+Return<Status> TWOmxNode::setConfig(
+ uint32_t index, const hidl_vec<uint8_t>& inConfig) {
+ hidl_vec<uint8_t> config(inConfig);
+ return toStatus(mBase->setConfig(
+ toEnumIndexType(index),
+ static_cast<void const*>(config.data()),
+ config.size()));
+}
+
+Return<Status> TWOmxNode::setPortMode(uint32_t portIndex, PortMode mode) {
+ return toStatus(mBase->setPortMode(portIndex, toIOMXPortMode(mode)));
+}
+
+Return<Status> TWOmxNode::prepareForAdaptivePlayback(
+ uint32_t portIndex, bool enable,
+ uint32_t maxFrameWidth, uint32_t maxFrameHeight) {
+ return toStatus(mBase->prepareForAdaptivePlayback(
+ portIndex,
+ toEnumBool(enable),
+ maxFrameWidth,
+ maxFrameHeight));
+}
+
+Return<void> TWOmxNode::configureVideoTunnelMode(
+ uint32_t portIndex, bool tunneled, uint32_t audioHwSync,
+ configureVideoTunnelMode_cb _hidl_cb) {
+ native_handle_t* sidebandHandle = nullptr;
+ Status status = toStatus(mBase->configureVideoTunnelMode(
+ portIndex,
+ toEnumBool(tunneled),
+ audioHwSync,
+ &sidebandHandle));
+ _hidl_cb(status, hidl_handle(sidebandHandle));
+ return Void();
+}
+
+Return<void> TWOmxNode::getGraphicBufferUsage(
+ uint32_t portIndex, getGraphicBufferUsage_cb _hidl_cb) {
+ OMX_U32 usage;
+ Status status = toStatus(mBase->getGraphicBufferUsage(
+ portIndex, &usage));
+ _hidl_cb(status, usage);
+ return Void();
+}
+
+Return<Status> TWOmxNode::setInputSurface(
+ const sp<IOmxBufferSource>& bufferSource) {
+ return toStatus(mBase->setInputSurface(new LWOmxBufferSource(
+ bufferSource)));
+}
+
+Return<void> TWOmxNode::allocateSecureBuffer(
+ uint32_t portIndex, uint64_t size,
+ allocateSecureBuffer_cb _hidl_cb) {
+ IOMX::buffer_id buffer;
+ void* bufferData;
+ sp<NativeHandle> nativeHandle;
+ Status status = toStatus(mBase->allocateSecureBuffer(
+ portIndex,
+ static_cast<size_t>(size),
+ &buffer,
+ &bufferData,
+ &nativeHandle));
+ _hidl_cb(status, buffer, nativeHandle == nullptr ?
+ nullptr : nativeHandle->handle());
+ return Void();
+}
+
+Return<void> TWOmxNode::useBuffer(
+ uint32_t portIndex, const CodecBuffer& codecBuffer,
+ useBuffer_cb _hidl_cb) {
+ IOMX::buffer_id buffer;
+ OMXBuffer omxBuffer;
+ if (!convertTo(&omxBuffer, codecBuffer)) {
+ _hidl_cb(Status::BAD_VALUE, 0);
+ return Void();
+ }
+ Status status = toStatus(mBase->useBuffer(
+ portIndex, omxBuffer, &buffer));
+ _hidl_cb(status, buffer);
+ return Void();
+}
+
+Return<Status> TWOmxNode::freeBuffer(uint32_t portIndex, uint32_t buffer) {
+ return toStatus(mBase->freeBuffer(portIndex, buffer));
+}
+
+Return<Status> TWOmxNode::fillBuffer(
+ uint32_t buffer, const CodecBuffer& codecBuffer,
+ const hidl_handle& fence) {
+ OMXBuffer omxBuffer;
+ if (!convertTo(&omxBuffer, codecBuffer)) {
+ return Status::BAD_VALUE;
+ }
+ return toStatus(mBase->fillBuffer(
+ buffer,
+ omxBuffer,
+ dup(native_handle_read_fd(fence))));
+}
+
+Return<Status> TWOmxNode::emptyBuffer(
+ uint32_t buffer, const CodecBuffer& codecBuffer, uint32_t flags,
+ uint64_t timestampUs, const hidl_handle& fence) {
+ OMXBuffer omxBuffer;
+ if (!convertTo(&omxBuffer, codecBuffer)) {
+ return Status::BAD_VALUE;
+ }
+ return toStatus(mBase->emptyBuffer(
+ buffer,
+ omxBuffer,
+ flags,
+ toOMXTicks(timestampUs),
+ dup(native_handle_read_fd(fence))));
+}
+
+Return<void> TWOmxNode::getExtensionIndex(
+ const hidl_string& parameterName,
+ getExtensionIndex_cb _hidl_cb) {
+ OMX_INDEXTYPE index;
+ Status status = toStatus(mBase->getExtensionIndex(
+ parameterName.c_str(), &index));
+ _hidl_cb(status, toRawIndexType(index));
+ return Void();
+}
+
+Return<Status> TWOmxNode::dispatchMessage(const Message& tMsg) {
+ omx_message lMsg;
+ if (!convertTo(&lMsg, tMsg)) {
+ return Status::BAD_VALUE;
+ }
+ return toStatus(mBase->dispatchMessage(lMsg));
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libmedia/omx/1.0/WOmxObserver.cpp b/media/libmedia/omx/1.0/WOmxObserver.cpp
new file mode 100644
index 0000000..fa0407f
--- /dev/null
+++ b/media/libmedia/omx/1.0/WOmxObserver.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "WOmxObserver-utils"
+
+#include <vector>
+
+#include <utils/Log.h>
+#include <cutils/native_handle.h>
+#include <binder/Binder.h>
+
+#include <media/omx/1.0/WOmxObserver.h>
+#include <media/omx/1.0/Conversion.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace utils {
+
+// LWOmxObserver
+LWOmxObserver::LWOmxObserver(sp<IOmxObserver> const& base) : mBase(base) {
+}
+
+void LWOmxObserver::onMessages(std::list<omx_message> const& lMessages) {
+ hidl_vec<Message> tMessages;
+ std::vector<native_handle_t*> handles(lMessages.size());
+ tMessages.resize(lMessages.size());
+ size_t i = 0;
+ for (auto const& message : lMessages) {
+ wrapAs(&tMessages[i], &handles[i], message);
+ ++i;
+ }
+ auto transResult = mBase->onMessages(tMessages);
+ if (!transResult.isOk()) {
+ ALOGE("LWOmxObserver::onMessages - Transaction failed");
+ }
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+}
+
+// TWOmxObserver
+TWOmxObserver::TWOmxObserver(sp<IOMXObserver> const& base) : mBase(base) {
+}
+
+Return<void> TWOmxObserver::onMessages(const hidl_vec<Message>& tMessages) {
+ std::list<omx_message> lMessages;
+ for (size_t i = 0; i < tMessages.size(); ++i) {
+ lMessages.push_back(omx_message{});
+ convertTo(&lMessages.back(), tMessages[i]);
+ }
+ mBase->onMessages(lMessages);
+ return Return<void>();
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
new file mode 100644
index 0000000..15dac59
--- /dev/null
+++ b/media/libmediametrics/Android.bp
@@ -0,0 +1,36 @@
+cc_library_shared {
+ name: "libmediametrics",
+
+ srcs: [
+ "IMediaAnalyticsService.cpp",
+ "MediaAnalyticsItem.cpp",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libcutils",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libbase",
+ ],
+
+ export_include_dirs: ["include"],
+
+ cflags: [
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+}
diff --git a/media/libmediametrics/IMediaAnalyticsService.cpp b/media/libmediametrics/IMediaAnalyticsService.cpp
new file mode 100644
index 0000000..68bafe1
--- /dev/null
+++ b/media/libmediametrics/IMediaAnalyticsService.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaAnalytics"
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+#include <binder/IMemory.h>
+#include <binder/IPCThreadState.h>
+
+#include <utils/Errors.h> // for status_t
+#include <utils/List.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include <media/MediaAnalyticsItem.h>
+#include <media/IMediaAnalyticsService.h>
+
+#define DEBUGGING 0
+#define DEBUGGING_FLOW 0
+#define DEBUGGING_RETURNS 0
+
+namespace android {
+
+enum {
+ GENERATE_UNIQUE_SESSIONID = IBinder::FIRST_CALL_TRANSACTION,
+ SUBMIT_ITEM,
+};
+
+class BpMediaAnalyticsService: public BpInterface<IMediaAnalyticsService>
+{
+public:
+ explicit BpMediaAnalyticsService(const sp<IBinder>& impl)
+ : BpInterface<IMediaAnalyticsService>(impl)
+ {
+ }
+
+ virtual MediaAnalyticsItem::SessionID_t generateUniqueSessionID() {
+ Parcel data, reply;
+ status_t err;
+ MediaAnalyticsItem::SessionID_t sessionid =
+ MediaAnalyticsItem::SessionIDInvalid;
+
+ data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
+ err = remote()->transact(GENERATE_UNIQUE_SESSIONID, data, &reply);
+ if (err != NO_ERROR) {
+ ALOGW("bad response from service");
+ return MediaAnalyticsItem::SessionIDInvalid;
+ }
+ sessionid = reply.readInt64();
+ if (DEBUGGING_RETURNS) {
+ ALOGD("the caller gets a sessionid of %" PRId64 " back", sessionid);
+ }
+ return sessionid;
+ }
+
+ virtual MediaAnalyticsItem::SessionID_t submit(MediaAnalyticsItem *item, bool forcenew)
+ {
+ // have this record submit itself
+ // this will be a binder call with appropriate timing
+ // return value is the uuid that the system generated for it.
+ // the return value 0 and -1 are reserved.
+ // -1 to indicate that there was a problem recording...
+
+ Parcel data, reply;
+ status_t err;
+
+ if (item == NULL) {
+ return MediaAnalyticsItem::SessionIDInvalid;
+ }
+
+ data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
+ if(DEBUGGING_FLOW) {
+ ALOGD("client offers record: %s", item->toString().c_str());
+ }
+ data.writeBool(forcenew);
+ item->writeToParcel(&data);
+
+ err = remote()->transact(SUBMIT_ITEM, data, &reply);
+ if (err != NO_ERROR) {
+ return MediaAnalyticsItem::SessionIDInvalid;
+ }
+
+ // get an answer out of 'reply'
+ int64_t sessionid = reply.readInt64();
+ if (DEBUGGING_RETURNS) {
+ ALOGD("the caller gets sessionid=%" PRId64 "", sessionid);
+ }
+ return sessionid;
+ }
+
+};
+
+IMPLEMENT_META_INTERFACE(MediaAnalyticsService, "android.media.IMediaAnalyticsService");
+
+// ----------------------------------------------------------------------
+
+status_t BnMediaAnalyticsService::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+
+
+ // get calling pid/tid
+ IPCThreadState *ipc = IPCThreadState::self();
+ int clientPid = ipc->getCallingPid();
+ // permission checking
+
+ if(DEBUGGING_FLOW) {
+ ALOGD("running in service, code %d, pid %d; called from pid %d",
+ code, getpid(), clientPid);
+ }
+
+ switch (code) {
+
+ case GENERATE_UNIQUE_SESSIONID: {
+ CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
+
+ MediaAnalyticsItem::SessionID_t sessionid = generateUniqueSessionID();
+ reply->writeInt64(sessionid);
+
+ return NO_ERROR;
+ } break;
+
+ case SUBMIT_ITEM: {
+ CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
+
+ bool forcenew;
+ MediaAnalyticsItem *item = new MediaAnalyticsItem;
+
+ data.readBool(&forcenew);
+ item->readFromParcel(data);
+
+ item->setPid(clientPid);
+
+ // submit() takes over ownership of 'item'
+ MediaAnalyticsItem::SessionID_t sessionid = submit(item, forcenew);
+ reply->writeInt64(sessionid);
+
+ return NO_ERROR;
+ } break;
+
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+// ----------------------------------------------------------------------------
+
+} // namespace android
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
new file mode 100644
index 0000000..43881b3
--- /dev/null
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -0,0 +1,886 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "MediaAnalyticsItem"
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+#include <utils/SortedVector.h>
+#include <utils/threads.h>
+
+#include <media/stagefright/foundation/AString.h>
+
+#include <binder/IServiceManager.h>
+#include <media/IMediaAnalyticsService.h>
+#include <media/MediaAnalyticsItem.h>
+#include <private/android_filesystem_config.h>
+
+namespace android {
+
+#define DEBUG_SERVICEACCESS 0
+#define DEBUG_API 0
+#define DEBUG_ALLOCATIONS 0
+
+// after this many failed attempts, we stop trying [from this process] and just say that
+// the service is off.
+#define SVC_TRIES 2
+
+// the few universal keys we have
+const MediaAnalyticsItem::Key MediaAnalyticsItem::kKeyAny = "any";
+const MediaAnalyticsItem::Key MediaAnalyticsItem::kKeyNone = "none";
+
+const char * const MediaAnalyticsItem::EnabledProperty = "media.metrics.enabled";
+const char * const MediaAnalyticsItem::EnabledPropertyPersist = "persist.media.metrics.enabled";
+const int MediaAnalyticsItem::EnabledProperty_default = 1;
+
+
+// access functions for the class
+MediaAnalyticsItem::MediaAnalyticsItem()
+ : mPid(-1),
+ mUid(-1),
+ mSessionID(MediaAnalyticsItem::SessionIDNone),
+ mTimestamp(0),
+ mFinalized(0),
+ mPropCount(0), mPropSize(0), mProps(NULL)
+{
+ mKey = MediaAnalyticsItem::kKeyNone;
+}
+
+MediaAnalyticsItem::MediaAnalyticsItem(MediaAnalyticsItem::Key key)
+ : mPid(-1),
+ mUid(-1),
+ mSessionID(MediaAnalyticsItem::SessionIDNone),
+ mTimestamp(0),
+ mFinalized(0),
+ mPropCount(0), mPropSize(0), mProps(NULL)
+{
+ if (DEBUG_ALLOCATIONS) {
+ ALOGD("Allocate MediaAnalyticsItem @ %p", this);
+ }
+ mKey = key;
+}
+
+MediaAnalyticsItem::~MediaAnalyticsItem() {
+ if (DEBUG_ALLOCATIONS) {
+ ALOGD("Destroy MediaAnalyticsItem @ %p", this);
+ }
+ clear();
+}
+
+void MediaAnalyticsItem::clear() {
+
+ // clean allocated storage from key
+ mKey.clear();
+
+ // clean various major parameters
+ mSessionID = MediaAnalyticsItem::SessionIDNone;
+
+ // clean attributes
+ // contents of the attributes
+ for (size_t i = 0 ; i < mPropSize; i++ ) {
+ clearProp(&mProps[i]);
+ }
+ // the attribute records themselves
+ if (mProps != NULL) {
+ free(mProps);
+ mProps = NULL;
+ }
+ mPropSize = 0;
+ mPropCount = 0;
+
+ return;
+}
+
+// make a deep copy of myself
+MediaAnalyticsItem *MediaAnalyticsItem::dup() {
+ MediaAnalyticsItem *dst = new MediaAnalyticsItem(this->mKey);
+
+ if (dst != NULL) {
+ // key as part of constructor
+ dst->mPid = this->mPid;
+ dst->mUid = this->mUid;
+ dst->mSessionID = this->mSessionID;
+ dst->mTimestamp = this->mTimestamp;
+ dst->mFinalized = this->mFinalized;
+
+ // properties aka attributes
+ dst->growProps(this->mPropCount);
+ for(size_t i=0;i<mPropCount;i++) {
+ copyProp(&dst->mProps[i], &this->mProps[i]);
+ }
+ dst->mPropCount = this->mPropCount;
+ }
+
+ return dst;
+}
+
+// so clients can send intermediate values to be overlaid later
+MediaAnalyticsItem &MediaAnalyticsItem::setFinalized(bool value) {
+ mFinalized = value;
+ return *this;
+}
+
+bool MediaAnalyticsItem::getFinalized() const {
+ return mFinalized;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::setSessionID(MediaAnalyticsItem::SessionID_t id) {
+ mSessionID = id;
+ return *this;
+}
+
+MediaAnalyticsItem::SessionID_t MediaAnalyticsItem::getSessionID() const {
+ return mSessionID;
+}
+
+MediaAnalyticsItem::SessionID_t MediaAnalyticsItem::generateSessionID() {
+
+ if (mSessionID == SessionIDNone) {
+ // get one from the server
+ MediaAnalyticsItem::SessionID_t newid = SessionIDNone;
+ sp<IMediaAnalyticsService> svc = getInstance();
+ if (svc != NULL) {
+ newid = svc->generateUniqueSessionID();
+ }
+ mSessionID = newid;
+ }
+
+ return mSessionID;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::clearSessionID() {
+ mSessionID = MediaAnalyticsItem::SessionIDNone;
+ return *this;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::setTimestamp(nsecs_t ts) {
+ mTimestamp = ts;
+ return *this;
+}
+
+nsecs_t MediaAnalyticsItem::getTimestamp() const {
+ return mTimestamp;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::setPid(pid_t pid) {
+ mPid = pid;
+ return *this;
+}
+
+pid_t MediaAnalyticsItem::getPid() const {
+ return mPid;
+}
+
+MediaAnalyticsItem &MediaAnalyticsItem::setUid(uid_t uid) {
+ mUid = uid;
+ return *this;
+}
+
+uid_t MediaAnalyticsItem::getUid() const {
+ return mUid;
+}
+
+// this key is for the overall record -- "codec", "player", "drm", etc
+MediaAnalyticsItem &MediaAnalyticsItem::setKey(MediaAnalyticsItem::Key key) {
+ mKey = key;
+ return *this;
+}
+
+MediaAnalyticsItem::Key MediaAnalyticsItem::getKey() {
+ return mKey;
+}
+
+// number of attributes we have in this record
+int32_t MediaAnalyticsItem::count() const {
+ return mPropCount;
+}
+
+// find the proper entry in the list
+size_t MediaAnalyticsItem::findPropIndex(const char *name, size_t len)
+{
+ size_t i = 0;
+ for (; i < mPropCount; i++) {
+ Prop *prop = &mProps[i];
+ if (prop->mNameLen != len) {
+ continue;
+ }
+ if (memcmp(name, prop->mName, len) == 0) {
+ break;
+ }
+ }
+ return i;
+}
+
+MediaAnalyticsItem::Prop *MediaAnalyticsItem::findProp(const char *name) {
+ size_t len = strlen(name);
+ size_t i = findPropIndex(name, len);
+ if (i < mPropCount) {
+ return &mProps[i];
+ }
+ return NULL;
+}
+
+void MediaAnalyticsItem::Prop::setName(const char *name, size_t len) {
+ mNameLen = len;
+ mName = (const char *) malloc(len+1);
+ memcpy ((void *)mName, name, len+1);
+}
+
+// used only as part of a storing operation
+MediaAnalyticsItem::Prop *MediaAnalyticsItem::allocateProp(const char *name) {
+ size_t len = strlen(name);
+ size_t i = findPropIndex(name, len);
+ Prop *prop;
+
+ if (i < mPropCount) {
+ prop = &mProps[i];
+ } else {
+ if (i == mPropSize) {
+ growProps();
+ // XXX: verify success
+ }
+ i = mPropCount++;
+ prop = &mProps[i];
+ prop->setName(name, len);
+ }
+
+ return prop;
+}
+
+// set the values
+void MediaAnalyticsItem::setInt32(MediaAnalyticsItem::Attr name, int32_t value) {
+ Prop *prop = allocateProp(name);
+ prop->mType = kTypeInt32;
+ prop->u.int32Value = value;
+}
+
+void MediaAnalyticsItem::setInt64(MediaAnalyticsItem::Attr name, int64_t value) {
+ Prop *prop = allocateProp(name);
+ prop->mType = kTypeInt64;
+ prop->u.int64Value = value;
+}
+
+void MediaAnalyticsItem::setDouble(MediaAnalyticsItem::Attr name, double value) {
+ Prop *prop = allocateProp(name);
+ prop->mType = kTypeDouble;
+ prop->u.doubleValue = value;
+}
+
+void MediaAnalyticsItem::setCString(MediaAnalyticsItem::Attr name, const char *value) {
+
+ Prop *prop = allocateProp(name);
+ // any old value will be gone
+ prop->mType = kTypeCString;
+ prop->u.CStringValue = strdup(value);
+}
+
+void MediaAnalyticsItem::setRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
+ Prop *prop = allocateProp(name);
+ prop->mType = kTypeRate;
+ prop->u.rate.count = count;
+ prop->u.rate.duration = duration;
+}
+
+
+// find/add/set fused into a single operation
+void MediaAnalyticsItem::addInt32(MediaAnalyticsItem::Attr name, int32_t value) {
+ Prop *prop = allocateProp(name);
+ switch (prop->mType) {
+ case kTypeInt32:
+ prop->u.int32Value += value;
+ break;
+ default:
+ clearPropValue(prop);
+ prop->mType = kTypeInt32;
+ prop->u.int32Value = value;
+ break;
+ }
+}
+
+void MediaAnalyticsItem::addInt64(MediaAnalyticsItem::Attr name, int64_t value) {
+ Prop *prop = allocateProp(name);
+ switch (prop->mType) {
+ case kTypeInt64:
+ prop->u.int64Value += value;
+ break;
+ default:
+ clearPropValue(prop);
+ prop->mType = kTypeInt64;
+ prop->u.int64Value = value;
+ break;
+ }
+}
+
+void MediaAnalyticsItem::addRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
+ Prop *prop = allocateProp(name);
+ switch (prop->mType) {
+ case kTypeRate:
+ prop->u.rate.count += count;
+ prop->u.rate.duration += duration;
+ break;
+ default:
+ clearPropValue(prop);
+ prop->mType = kTypeRate;
+ prop->u.rate.count = count;
+ prop->u.rate.duration = duration;
+ break;
+ }
+}
+
+void MediaAnalyticsItem::addDouble(MediaAnalyticsItem::Attr name, double value) {
+ Prop *prop = allocateProp(name);
+ switch (prop->mType) {
+ case kTypeDouble:
+ prop->u.doubleValue += value;
+ break;
+ default:
+ clearPropValue(prop);
+ prop->mType = kTypeDouble;
+ prop->u.doubleValue = value;
+ break;
+ }
+}
+
+// find & extract values
+bool MediaAnalyticsItem::getInt32(MediaAnalyticsItem::Attr name, int32_t *value) {
+ Prop *prop = findProp(name);
+ if (prop == NULL || prop->mType != kTypeInt32) {
+ return false;
+ }
+ if (value != NULL) {
+ *value = prop->u.int32Value;
+ }
+ return true;
+}
+
+bool MediaAnalyticsItem::getInt64(MediaAnalyticsItem::Attr name, int64_t *value) {
+ Prop *prop = findProp(name);
+ if (prop == NULL || prop->mType != kTypeInt64) {
+ return false;
+ }
+ if (value != NULL) {
+ *value = prop->u.int64Value;
+ }
+ return true;
+}
+
+bool MediaAnalyticsItem::getRate(MediaAnalyticsItem::Attr name, int64_t *count, int64_t *duration, double *rate) {
+ Prop *prop = findProp(name);
+ if (prop == NULL || prop->mType != kTypeRate) {
+ return false;
+ }
+ if (count != NULL) {
+ *count = prop->u.rate.count;
+ }
+ if (duration != NULL) {
+ *duration = prop->u.rate.duration;
+ }
+ if (rate != NULL) {
+ double r = 0.0;
+ if (prop->u.rate.duration != 0) {
+ r = prop->u.rate.count / (double) prop->u.rate.duration;
+ }
+ *rate = r;
+ }
+ return true;
+}
+
+bool MediaAnalyticsItem::getDouble(MediaAnalyticsItem::Attr name, double *value) {
+ Prop *prop = findProp(name);
+ if (prop == NULL || prop->mType != kTypeDouble) {
+ return false;
+ }
+ if (value != NULL) {
+ *value = prop->u.doubleValue;
+ }
+ return true;
+}
+
+// caller responsible for the returned string
+bool MediaAnalyticsItem::getCString(MediaAnalyticsItem::Attr name, char **value) {
+ Prop *prop = findProp(name);
+ if (prop == NULL || prop->mType != kTypeDouble) {
+ return false;
+ }
+ if (value != NULL) {
+ *value = strdup(prop->u.CStringValue);
+ }
+ return true;
+}
+
+// remove indicated keys and their values
+// return value is # keys removed
+int32_t MediaAnalyticsItem::filter(int n, MediaAnalyticsItem::Attr attrs[]) {
+ int zapped = 0;
+ if (attrs == NULL || n <= 0) {
+ return -1;
+ }
+ for (ssize_t i = 0 ; i < n ; i++) {
+ const char *name = attrs[i];
+ size_t len = strlen(name);
+ size_t j = findPropIndex(name, len);
+ if (j >= mPropCount) {
+ // not there
+ continue;
+ } else if (j+1 == mPropCount) {
+ // last one, shorten
+ zapped++;
+ clearProp(&mProps[j]);
+ mPropCount--;
+ } else {
+ // in the middle, bring last one down and shorten
+ zapped++;
+ clearProp(&mProps[j]);
+ mProps[j] = mProps[mPropCount-1];
+ mPropCount--;
+ }
+ }
+ return zapped;
+}
+
+// remove any keys NOT in the provided list
+// return value is # keys removed
+int32_t MediaAnalyticsItem::filterNot(int n, MediaAnalyticsItem::Attr attrs[]) {
+ int zapped = 0;
+ if (attrs == NULL || n <= 0) {
+ return -1;
+ }
+ for (ssize_t i = mPropCount-1 ; i >=0 ; i--) {
+ Prop *prop = &mProps[i];
+ for (ssize_t j = 0; j < n ; j++) {
+ if (strcmp(prop->mName, attrs[j]) == 0) {
+ clearProp(prop);
+ zapped++;
+ if (i != (ssize_t)(mPropCount-1)) {
+ *prop = mProps[mPropCount-1];
+ }
+ initProp(&mProps[mPropCount-1]);
+ mPropCount--;
+ break;
+ }
+ }
+ }
+ return zapped;
+}
+
+// remove a single key
+// return value is 0 (not found) or 1 (found and removed)
+int32_t MediaAnalyticsItem::filter(MediaAnalyticsItem::Attr name) {
+ return filter(1, &name);
+}
+
+// handle individual items/properties stored within the class
+//
+
+void MediaAnalyticsItem::initProp(Prop *prop) {
+ if (prop != NULL) {
+ prop->mName = NULL;
+ prop->mNameLen = 0;
+
+ prop->mType = kTypeNone;
+ }
+}
+
+void MediaAnalyticsItem::clearProp(Prop *prop)
+{
+ if (prop != NULL) {
+ if (prop->mName != NULL) {
+ free((void *)prop->mName);
+ prop->mName = NULL;
+ prop->mNameLen = 0;
+ }
+
+ clearPropValue(prop);
+ }
+}
+
+void MediaAnalyticsItem::clearPropValue(Prop *prop)
+{
+ if (prop != NULL) {
+ if (prop->mType == kTypeCString && prop->u.CStringValue != NULL) {
+ free(prop->u.CStringValue);
+ prop->u.CStringValue = NULL;
+ }
+ prop->mType = kTypeNone;
+ }
+}
+
+void MediaAnalyticsItem::copyProp(Prop *dst, const Prop *src)
+{
+ // get rid of any pointers in the dst
+ clearProp(dst);
+
+ *dst = *src;
+
+ // fix any pointers that we blindly copied, so we have our own copies
+ if (dst->mName) {
+ void *p = malloc(dst->mNameLen + 1);
+ memcpy (p, src->mName, dst->mNameLen + 1);
+ dst->mName = (const char *) p;
+ }
+ if (dst->mType == kTypeCString) {
+ dst->u.CStringValue = strdup(src->u.CStringValue);
+ }
+}
+
+void MediaAnalyticsItem::growProps(int increment)
+{
+ if (increment <= 0) {
+ increment = kGrowProps;
+ }
+ int nsize = mPropSize + increment;
+ Prop *ni = (Prop *)realloc(mProps, sizeof(Prop) * nsize);
+
+ if (ni != NULL) {
+ for (int i = mPropSize; i < nsize; i++) {
+ initProp(&ni[i]);
+ }
+ mProps = ni;
+ mPropSize = nsize;
+ }
+}
+
+// Parcel / serialize things for binder calls
+//
+
+int32_t MediaAnalyticsItem::readFromParcel(const Parcel& data) {
+ // into 'this' object
+ // .. we make a copy of the string to put away.
+ mKey = data.readCString();
+ mSessionID = data.readInt64();
+ mFinalized = data.readInt32();
+ mTimestamp = data.readInt64();
+
+ int count = data.readInt32();
+ for (int i = 0; i < count ; i++) {
+ MediaAnalyticsItem::Attr attr = data.readCString();
+ int32_t ztype = data.readInt32();
+ switch (ztype) {
+ case MediaAnalyticsItem::kTypeInt32:
+ setInt32(attr, data.readInt32());
+ break;
+ case MediaAnalyticsItem::kTypeInt64:
+ setInt64(attr, data.readInt64());
+ break;
+ case MediaAnalyticsItem::kTypeDouble:
+ setDouble(attr, data.readDouble());
+ break;
+ case MediaAnalyticsItem::kTypeCString:
+ setCString(attr, data.readCString());
+ break;
+ case MediaAnalyticsItem::kTypeRate:
+ {
+ int64_t count = data.readInt64();
+ int64_t duration = data.readInt64();
+ setRate(attr, count, duration);
+ }
+ break;
+ default:
+ ALOGE("reading bad item type: %d, idx %d",
+ ztype, i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int32_t MediaAnalyticsItem::writeToParcel(Parcel *data) {
+ if (data == NULL) return -1;
+
+
+ data->writeCString(mKey.c_str());
+ data->writeInt64(mSessionID);
+ data->writeInt32(mFinalized);
+ data->writeInt64(mTimestamp);
+
+ // set of items
+ int count = mPropCount;
+ data->writeInt32(count);
+ for (int i = 0 ; i < count; i++ ) {
+ Prop *prop = &mProps[i];
+ data->writeCString(prop->mName);
+ data->writeInt32(prop->mType);
+ switch (prop->mType) {
+ case MediaAnalyticsItem::kTypeInt32:
+ data->writeInt32(prop->u.int32Value);
+ break;
+ case MediaAnalyticsItem::kTypeInt64:
+ data->writeInt64(prop->u.int64Value);
+ break;
+ case MediaAnalyticsItem::kTypeDouble:
+ data->writeDouble(prop->u.doubleValue);
+ break;
+ case MediaAnalyticsItem::kTypeRate:
+ data->writeInt64(prop->u.rate.count);
+ data->writeInt64(prop->u.rate.duration);
+ break;
+ case MediaAnalyticsItem::kTypeCString:
+ data->writeCString(prop->u.CStringValue);
+ break;
+ default:
+ ALOGE("found bad Prop type: %d, idx %d, name %s",
+ prop->mType, i, prop->mName);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+
+AString MediaAnalyticsItem::toString() {
+
+ AString result = "(";
+ char buffer[512];
+
+ // same order as we spill into the parcel, although not required
+ // key+session are our primary matching criteria
+ //RBE ALOGD("mKey.c_str");
+ result.append(mKey.c_str());
+ //RBE ALOGD("post-mKey.c_str");
+ result.append(":");
+ snprintf(buffer, sizeof(buffer), "%" PRId64 ":", mSessionID);
+ result.append(buffer);
+
+ // we need these internally, but don't want to upload them
+ snprintf(buffer, sizeof(buffer), "%d:%d", mUid, mPid);
+ result.append(buffer);
+
+ snprintf(buffer, sizeof(buffer), "%d:", mFinalized);
+ result.append(buffer);
+ snprintf(buffer, sizeof(buffer), "%" PRId64 ":", mTimestamp);
+ result.append(buffer);
+
+ // set of items
+ int count = mPropCount;
+ snprintf(buffer, sizeof(buffer), "%d:", count);
+ result.append(buffer);
+ for (int i = 0 ; i < count; i++ ) {
+ Prop *prop = &mProps[i];
+ switch (prop->mType) {
+ case MediaAnalyticsItem::kTypeInt32:
+ snprintf(buffer,sizeof(buffer),
+ "%s=%d:", prop->mName, prop->u.int32Value);
+ break;
+ case MediaAnalyticsItem::kTypeInt64:
+ snprintf(buffer,sizeof(buffer),
+ "%s=%" PRId64 ":", prop->mName, prop->u.int64Value);
+ break;
+ case MediaAnalyticsItem::kTypeDouble:
+ snprintf(buffer,sizeof(buffer),
+ "%s=%e:", prop->mName, prop->u.doubleValue);
+ break;
+ case MediaAnalyticsItem::kTypeRate:
+ snprintf(buffer,sizeof(buffer),
+ "%s=%" PRId64 "/%" PRId64 ":", prop->mName,
+ prop->u.rate.count, prop->u.rate.duration);
+ break;
+ case MediaAnalyticsItem::kTypeCString:
+ snprintf(buffer,sizeof(buffer), "%s=", prop->mName);
+ result.append(buffer);
+ // XXX: sanitize string for ':' '='
+ result.append(prop->u.CStringValue);
+ buffer[0] = ':';
+ buffer[1] = '\0';
+ break;
+ default:
+ ALOGE("to_String bad item type: %d for %s",
+ prop->mType, prop->mName);
+ break;
+ }
+ result.append(buffer);
+ }
+
+ result.append(")");
+
+ return result;
+}
+
+// for the lazy, we offer methods that finds the service and
+// calls the appropriate daemon
+bool MediaAnalyticsItem::selfrecord() {
+ return selfrecord(false);
+}
+
+bool MediaAnalyticsItem::selfrecord(bool forcenew) {
+
+ if (DEBUG_API) {
+ AString p = this->toString();
+ ALOGD("selfrecord of: %s [forcenew=%d]", p.c_str(), forcenew);
+ }
+
+ sp<IMediaAnalyticsService> svc = getInstance();
+
+ if (svc != NULL) {
+ svc->submit(this, forcenew);
+ return true;
+ } else {
+ AString p = this->toString();
+ ALOGD("Unable to record: %s [forcenew=%d]", p.c_str(), forcenew);
+ return false;
+ }
+}
+
+// get a connection we can reuse for most of our lifetime
+// static
+sp<IMediaAnalyticsService> MediaAnalyticsItem::sAnalyticsService;
+static Mutex sInitMutex;
+
+//static
+bool MediaAnalyticsItem::isEnabled() {
+ int enabled = property_get_int32(MediaAnalyticsItem::EnabledProperty, -1);
+
+ if (enabled == -1) {
+ enabled = property_get_int32(MediaAnalyticsItem::EnabledPropertyPersist, -1);
+ }
+ if (enabled == -1) {
+ enabled = MediaAnalyticsItem::EnabledProperty_default;
+ }
+ if (enabled <= 0) {
+ return false;
+ }
+ return true;
+}
+
+//static
+sp<IMediaAnalyticsService> MediaAnalyticsItem::getInstance() {
+ static const char *servicename = "media.metrics";
+ static int tries_remaining = SVC_TRIES;
+ int enabled = isEnabled();
+
+ if (enabled == false) {
+ if (DEBUG_SERVICEACCESS) {
+ ALOGD("disabled");
+ }
+ return NULL;
+ }
+
+ // completely skip logging from certain UIDs. We do this here
+ // to avoid the multi-second timeouts while we learn that
+ // sepolicy will not let us find the service.
+ // We do this only for a select set of UIDs
+ // The sepolicy protection is still in place, we just want a faster
+ // response from this specific, small set of uids.
+ {
+ uid_t uid = getuid();
+ switch (uid) {
+ case AID_RADIO: // telephony subsystem, RIL
+ return NULL;
+ break;
+ default:
+ // let sepolicy deny access if appropriate
+ break;
+ }
+ }
+
+ {
+ Mutex::Autolock _l(sInitMutex);
+ const char *badness = "";
+
+ // think of tries_remaining as telling us whether service==NULL because
+ // (1) we haven't tried to initialize it yet
+ // (2) we've tried to initialize it, but failed.
+ if (sAnalyticsService == NULL && tries_remaining > 0) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ if (sm != NULL) {
+ sp<IBinder> binder = sm->getService(String16(servicename));
+ if (binder != NULL) {
+ sAnalyticsService = interface_cast<IMediaAnalyticsService>(binder);
+ } else {
+ badness = "did not find service";
+ }
+ } else {
+ badness = "No Service Manager access";
+ }
+
+ if (sAnalyticsService == NULL) {
+ if (tries_remaining > 0) {
+ tries_remaining--;
+ }
+ if (DEBUG_SERVICEACCESS) {
+ ALOGD("Unable to bind to service %s: %s", servicename, badness);
+ }
+ }
+ }
+
+ return sAnalyticsService;
+ }
+}
+
+
+// merge the info from 'incoming' into this record.
+// we finish with a union of this+incoming and special handling for collisions
+bool MediaAnalyticsItem::merge(MediaAnalyticsItem *incoming) {
+
+ // if I don't have key or session id, take them from incoming
+ // 'this' should never be missing both of them...
+ if (mKey.empty()) {
+ mKey = incoming->mKey;
+ } else if (mSessionID == 0) {
+ mSessionID = incoming->mSessionID;
+ }
+
+ // we always take the more recent 'finalized' value
+ setFinalized(incoming->getFinalized());
+
+ // for each attribute from 'incoming', resolve appropriately
+ int nattr = incoming->mPropCount;
+ for (int i = 0 ; i < nattr; i++ ) {
+ Prop *iprop = &incoming->mProps[i];
+ Prop *oprop = findProp(iprop->mName);
+ const char *p = iprop->mName;
+ size_t len = strlen(p);
+ char semantic = p[len-1];
+
+ if (oprop == NULL) {
+ // no oprop, so we insert the new one
+ oprop = allocateProp(p);
+ copyProp(oprop, iprop);
+ } else {
+ // merge iprop into oprop
+ switch (semantic) {
+ case '<': // first aka keep old)
+ /* nop */
+ break;
+
+ default: // default is 'last'
+ case '>': // last (aka keep new)
+ copyProp(oprop, iprop);
+ break;
+
+ case '+': /* sum */
+ // XXX validate numeric types, sum in place
+ break;
+
+ }
+ }
+ }
+
+ // not sure when we'd return false...
+ return true;
+}
+
+} // namespace android
+
diff --git a/media/libmediametrics/include/IMediaAnalyticsService.h b/media/libmediametrics/include/IMediaAnalyticsService.h
new file mode 100644
index 0000000..f635e94
--- /dev/null
+++ b/media/libmediametrics/include/IMediaAnalyticsService.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IMEDIAANALYTICSSERVICE_H
+#define ANDROID_IMEDIAANALYTICSSERVICE_H
+
+#include <utils/String8.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/RefBase.h>
+#include <utils/List.h>
+
+#include <binder/IServiceManager.h>
+
+#include <media/MediaAnalyticsItem.h>
+// nope...#include <media/MediaAnalytics.h>
+
+namespace android {
+
+class IMediaAnalyticsService: public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(MediaAnalyticsService);
+
+ // generate a unique sessionID to use across multiple requests
+ // 'unique' is within this device, since last reboot
+ virtual MediaAnalyticsItem::SessionID_t generateUniqueSessionID() = 0;
+
+ // submit the indicated record to the mediaanalytics service, where
+ // it will be merged (if appropriate) with incomplete records that
+ // share the same key and sessionid.
+ // 'forcenew' marks any matching incomplete record as complete before
+ // inserting this new record.
+ // returns the sessionID associated with that item.
+ // caller continues to own the passed item
+ virtual MediaAnalyticsItem::SessionID_t submit(MediaAnalyticsItem *item, bool forcenew) = 0;
+
+};
+
+// ----------------------------------------------------------------------------
+
+class BnMediaAnalyticsService: public BnInterface<IMediaAnalyticsService>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IMEDIASTATISTICSSERVICE_H
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
new file mode 100644
index 0000000..dc501b2
--- /dev/null
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_MEDIAANALYTICSITEM_H
+#define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
+
+#include <cutils/properties.h>
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+#include <media/stagefright/foundation/AString.h>
+
+namespace android {
+
+
+
+class IMediaAnalyticsService;
+
+// the class interface
+//
+
+class MediaAnalyticsItem {
+
+ friend class MediaAnalyticsService;
+ friend class IMediaAnalyticsService;
+ friend class MediaMetricsJNI;
+ friend class MetricsSummarizer;
+
+ public:
+
+ enum Type {
+ kTypeNone = 0,
+ kTypeInt32 = 1,
+ kTypeInt64 = 2,
+ kTypeDouble = 3,
+ kTypeCString = 4,
+ kTypeRate = 5,
+ };
+
+ // sessionid
+ // unique within device, within boot,
+ typedef int64_t SessionID_t;
+ static constexpr SessionID_t SessionIDInvalid = -1;
+ static constexpr SessionID_t SessionIDNone = 0;
+
+ // Key: the record descriminator
+ // values for the record discriminator
+ // values can be "component/component"
+ // basic values: "video", "audio", "drm"
+ // XXX: need to better define the format
+ typedef AString Key;
+ static const Key kKeyNone; // ""
+ static const Key kKeyAny; // "*"
+
+ // Attr: names for attributes within a record
+ // format "prop1" or "prop/subprop"
+ // XXX: need to better define the format
+ typedef const char *Attr;
+
+
+ public:
+
+ // access functions for the class
+ MediaAnalyticsItem();
+ MediaAnalyticsItem(Key);
+ ~MediaAnalyticsItem();
+
+ // so clients can send intermediate values to be overlaid later
+ MediaAnalyticsItem &setFinalized(bool);
+ bool getFinalized() const;
+
+ // SessionID ties multiple submissions for same key together
+ // so that if video "height" and "width" are known at one point
+ // and "framerate" is only known later, they can be be brought
+ // together.
+ MediaAnalyticsItem &setSessionID(SessionID_t);
+ MediaAnalyticsItem &clearSessionID();
+ SessionID_t getSessionID() const;
+ // generates and stores a new ID iff mSessionID == SessionIDNone
+ SessionID_t generateSessionID();
+
+ // reset all contents, discarding any extra data
+ void clear();
+ MediaAnalyticsItem *dup();
+
+ // set the key discriminator for the record.
+ // most often initialized as part of the constructor
+ MediaAnalyticsItem &setKey(MediaAnalyticsItem::Key);
+ MediaAnalyticsItem::Key getKey();
+
+ // # of attributes in the record
+ int32_t count() const;
+
+ // set values appropriately
+ void setInt32(Attr, int32_t value);
+ void setInt64(Attr, int64_t value);
+ void setDouble(Attr, double value);
+ void setRate(Attr, int64_t count, int64_t duration);
+ void setCString(Attr, const char *value);
+
+ // fused get/add/set; if attr wasn't there, it's a simple set.
+ // type-mismatch counts as "wasn't there".
+ void addInt32(Attr, int32_t value);
+ void addInt64(Attr, int64_t value);
+ void addDouble(Attr, double value);
+ void addRate(Attr, int64_t count, int64_t duration);
+
+ // find & extract values
+ // return indicates whether attr exists (and thus value filled in)
+ // NULL parameter value suppresses storage of value.
+ bool getInt32(Attr, int32_t *value);
+ bool getInt64(Attr, int64_t *value);
+ bool getDouble(Attr, double *value);
+ bool getRate(Attr, int64_t *count, int64_t *duration, double *rate);
+ // Caller owns the returned string
+ bool getCString(Attr, char **value);
+
+ // parameter indicates whether to close any existing open
+ // record with same key before establishing a new record
+ // caller retains ownership of 'this'.
+ bool selfrecord(bool);
+ bool selfrecord();
+
+ // remove indicated attributes and their values
+ // filterNot() could also be called keepOnly()
+ // return value is # attributes removed
+ // XXX: perhaps 'remove' instead of 'filter'
+ // XXX: filterNot would become 'keep'
+ int32_t filter(int count, Attr attrs[]);
+ int32_t filterNot(int count, Attr attrs[]);
+ int32_t filter(Attr attr);
+
+ // below here are used on server side or to talk to server
+ // clients need not worry about these.
+
+ // timestamp, pid, and uid only used on server side
+ // timestamp is in 'nanoseconds, unix time'
+ MediaAnalyticsItem &setTimestamp(nsecs_t);
+ nsecs_t getTimestamp() const;
+
+ MediaAnalyticsItem &setPid(pid_t);
+ pid_t getPid() const;
+
+ MediaAnalyticsItem &setUid(uid_t);
+ uid_t getUid() const;
+
+ // our serialization code for binder calls
+ int32_t writeToParcel(Parcel *);
+ int32_t readFromParcel(const Parcel&);
+
+ AString toString();
+
+ // are we collecting analytics data
+ static bool isEnabled();
+
+ protected:
+
+ // merge fields from arg into this
+ // with rules for first/last/add, etc
+ // XXX: document semantics and how they are indicated
+ // caller continues to own 'incoming'
+ bool merge(MediaAnalyticsItem *incoming);
+
+ // enabled 1, disabled 0
+ static const char * const EnabledProperty;
+ static const char * const EnabledPropertyPersist;
+ static const int EnabledProperty_default;
+
+ private:
+
+ // to help validate that A doesn't mess with B's records
+ pid_t mPid;
+ uid_t mUid;
+
+ // let's reuse a binder connection
+ static sp<IMediaAnalyticsService> sAnalyticsService;
+ static sp<IMediaAnalyticsService> getInstance();
+
+ // tracking information
+ SessionID_t mSessionID; // grouping similar records
+ nsecs_t mTimestamp; // ns, system_time_monotonic
+
+ // will this record accept further updates
+ bool mFinalized;
+
+ Key mKey;
+
+ struct Prop {
+
+ Type mType;
+ const char *mName;
+ size_t mNameLen; // the strlen(), doesn't include the null
+ union {
+ int32_t int32Value;
+ int64_t int64Value;
+ double doubleValue;
+ char *CStringValue;
+ struct { int64_t count, duration; } rate;
+ } u;
+ void setName(const char *name, size_t len);
+ };
+
+ void initProp(Prop *item);
+ void clearProp(Prop *item);
+ void clearPropValue(Prop *item);
+ void copyProp(Prop *dst, const Prop *src);
+ enum {
+ kGrowProps = 10
+ };
+ void growProps(int increment = kGrowProps);
+ size_t findPropIndex(const char *name, size_t len);
+ Prop *findProp(const char *name);
+ Prop *allocateProp(const char *name);
+
+ size_t mPropCount;
+ size_t mPropSize;
+ Prop *mProps;
+};
+
+} // namespace android
+
+#endif
diff --git a/media/libmediaplayerservice/ActivityManager.cpp b/media/libmediaplayerservice/ActivityManager.cpp
index 60a209f..438d422 100644
--- a/media/libmediaplayerservice/ActivityManager.cpp
+++ b/media/libmediaplayerservice/ActivityManager.cpp
@@ -14,18 +14,14 @@
* limitations under the License.
*/
-#include <unistd.h>
+#include <binder/IActivityManager.h>
#include <binder/IBinder.h>
#include <binder/IServiceManager.h>
-#include <binder/Parcel.h>
-#include <utils/String8.h>
#include "ActivityManager.h"
namespace android {
-const uint32_t OPEN_CONTENT_URI_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION + 4;
-
// Perform ContentProvider.openFile() on the given URI, returning
// the resulting native file descriptor. Returns < 0 on error.
int openContentProviderFile(const String16& uri)
@@ -33,26 +29,10 @@
int fd = -1;
sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> am = sm->getService(String16("activity"));
+ sp<IBinder> binder = sm->getService(String16("activity"));
+ sp<IActivityManager> am = interface_cast<IActivityManager>(binder);
if (am != NULL) {
- Parcel data, reply;
- data.writeInterfaceToken(String16("android.app.IActivityManager"));
- data.writeString16(uri);
- status_t ret = am->transact(OPEN_CONTENT_URI_TRANSACTION, data, &reply);
- if (ret == NO_ERROR) {
- int32_t exceptionCode = reply.readExceptionCode();
- if (!exceptionCode) {
- // Success is indicated here by a nonzero int followed by the fd;
- // failure by a zero int with no data following.
- if (reply.readInt32() != 0) {
- fd = dup(reply.readFileDescriptor());
- }
- } else {
- // An exception was thrown back; fall through to return failure
- ALOGD("openContentUri(%s) caught exception %d\n",
- String8(uri).string(), exceptionCode);
- }
- }
+ fd = am->openContentUri(uri);
}
return fd;
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 2e6d0f1..1fc74a9 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -25,8 +25,10 @@
liblog \
libdl \
libgui \
- libmedia \
libaudioclient \
+ libmedia \
+ libmediametrics \
+ libmediadrm \
libmediautils \
libmemunreachable \
libstagefright \
@@ -35,6 +37,9 @@
libstagefright_omx \
libstagefright_wfd \
libutils \
+ libnativewindow \
+ libhidlbase \
+ android.hardware.media.omx@1.0 \
LOCAL_STATIC_LIBRARIES := \
libstagefright_nuplayer \
@@ -48,7 +53,7 @@
frameworks/av/media/libstagefright/rtsp \
frameworks/av/media/libstagefright/wifi-display \
frameworks/av/media/libstagefright/webm \
- frameworks/av/include/media \
+ $(LOCAL_PATH)/include/media \
frameworks/av/include/camera \
frameworks/native/include/media/openmax \
frameworks/native/include/media/hardware \
@@ -60,6 +65,9 @@
LOCAL_32_BIT_ONLY := true
+LOCAL_SANITIZE := cfi
+LOCAL_SANITIZE_DIAG := cfi
+
include $(BUILD_SHARED_LIBRARY)
include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index f7c691d..6da1ec1 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -26,7 +26,6 @@
#include <media/stagefright/foundation/ADebug.h>
#include <utils/Errors.h>
#include <utils/misc.h>
-#include <../libstagefright/include/WVMExtractor.h>
#include "MediaPlayerFactory.h"
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 95c91d1..18fd857 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -60,6 +60,7 @@
#include <media/stagefright/Utils.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooperRoster.h>
+#include <media/stagefright/SurfaceUtils.h>
#include <mediautils/BatteryNotifier.h>
#include <memunreachable/memunreachable.h>
@@ -274,20 +275,6 @@
ALOGV("MediaPlayerService created");
mNextConnId = 1;
- mBatteryAudio.refCount = 0;
- for (int i = 0; i < NUM_AUDIO_DEVICES; i++) {
- mBatteryAudio.deviceOn[i] = 0;
- mBatteryAudio.lastTime[i] = 0;
- mBatteryAudio.totalTime[i] = 0;
- }
- // speaker is on by default
- mBatteryAudio.deviceOn[SPEAKER] = 1;
-
- // reset battery stats
- // if the mediaserver has crashed, battery stats could be left
- // in bad state, reset the state upon service start.
- BatteryNotifier::getInstance().noteResetVideo();
-
MediaPlayerFactory::registerBuiltinFactories();
}
@@ -577,7 +564,7 @@
mLoop = false;
mStatus = NO_INIT;
mAudioSessionId = audioSessionId;
- mUID = uid;
+ mUid = uid;
mRetransmitEndpointValid = false;
mAudioAttributes = NULL;
@@ -597,6 +584,7 @@
if (mAudioAttributes != NULL) {
free(mAudioAttributes);
}
+ clearDeathNotifiers();
}
void MediaPlayerService::Client::disconnect()
@@ -643,7 +631,7 @@
}
if (p != NULL) {
- p->setUID(mUID);
+ p->setUID(mUid);
}
return p;
@@ -654,12 +642,22 @@
const sp<MediaPlayerBase>& listener,
int which) {
mService = service;
+ mOmx = nullptr;
+ mListener = listener;
+ mWhich = which;
+}
+
+MediaPlayerService::Client::ServiceDeathNotifier::ServiceDeathNotifier(
+ const sp<IOmx>& omx,
+ const sp<MediaPlayerBase>& listener,
+ int which) {
+ mService = nullptr;
+ mOmx = omx;
mListener = listener;
mWhich = which;
}
MediaPlayerService::Client::ServiceDeathNotifier::~ServiceDeathNotifier() {
- mService->unlinkToDeath(this);
}
void MediaPlayerService::Client::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
@@ -671,10 +669,43 @@
}
}
+void MediaPlayerService::Client::ServiceDeathNotifier::serviceDied(
+ uint64_t /* cookie */,
+ const wp<::android::hidl::base::V1_0::IBase>& /* who */) {
+ sp<MediaPlayerBase> listener = mListener.promote();
+ if (listener != NULL) {
+ listener->sendEvent(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
+ } else {
+ ALOGW("listener for process %d death is gone", mWhich);
+ }
+}
+
+void MediaPlayerService::Client::ServiceDeathNotifier::unlinkToDeath() {
+ if (mService != nullptr) {
+ mService->unlinkToDeath(this);
+ mService = nullptr;
+ } else if (mOmx != nullptr) {
+ mOmx->unlinkToDeath(this);
+ mOmx = nullptr;
+ }
+}
+
+void MediaPlayerService::Client::clearDeathNotifiers() {
+ if (mExtractorDeathListener != nullptr) {
+ mExtractorDeathListener->unlinkToDeath();
+ mExtractorDeathListener = nullptr;
+ }
+ if (mCodecDeathListener != nullptr) {
+ mCodecDeathListener->unlinkToDeath();
+ mCodecDeathListener = nullptr;
+ }
+}
+
sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
player_type playerType)
{
ALOGV("player type = %d", playerType);
+ clearDeathNotifiers();
// create the right type of player
sp<MediaPlayerBase> p = createPlayer(playerType);
@@ -684,12 +715,32 @@
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder = sm->getService(String16("media.extractor"));
+ if (binder == NULL) {
+ ALOGE("extractor service not available");
+ return NULL;
+ }
mExtractorDeathListener = new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
binder->linkToDeath(mExtractorDeathListener);
- binder = sm->getService(String16("media.codec"));
- mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
- binder->linkToDeath(mCodecDeathListener);
+ if (property_get_bool("persist.media.treble_omx", true)) {
+ // Treble IOmx
+ sp<IOmx> omx = IOmx::getService();
+ if (omx == nullptr) {
+ ALOGE("Treble IOmx not available");
+ return NULL;
+ }
+ mCodecDeathListener = new ServiceDeathNotifier(omx, p, MEDIACODEC_PROCESS_DEATH);
+ omx->linkToDeath(mCodecDeathListener, 0);
+ } else {
+ // Legacy IOMX
+ binder = sm->getService(String16("media.codec"));
+ if (binder == NULL) {
+ ALOGE("codec service not available");
+ return NULL;
+ }
+ mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
+ binder->linkToDeath(mCodecDeathListener);
+ }
if (!p->hardwareOutput()) {
Mutex::Autolock l(mLock);
@@ -837,11 +888,11 @@
void MediaPlayerService::Client::disconnectNativeWindow() {
if (mConnectedWindow != NULL) {
- status_t err = native_window_api_disconnect(mConnectedWindow.get(),
- NATIVE_WINDOW_API_MEDIA);
+ status_t err = nativeWindowDisconnect(
+ mConnectedWindow.get(), "disconnectNativeWindow");
if (err != OK) {
- ALOGW("native_window_api_disconnect returned an error: %s (%d)",
+ ALOGW("nativeWindowDisconnect returned an error: %s (%d)",
strerror(-err), err);
}
}
@@ -863,8 +914,7 @@
sp<ANativeWindow> anw;
if (bufferProducer != NULL) {
anw = new Surface(bufferProducer, true /* controlledByApp */);
- status_t err = native_window_api_connect(anw.get(),
- NATIVE_WINDOW_API_MEDIA);
+ status_t err = nativeWindowConnect(anw.get(), "setVideoSurfaceTexture");
if (err != OK) {
ALOGE("setVideoSurfaceTexture failed: %d", err);
@@ -964,6 +1014,32 @@
return OK;
}
+status_t MediaPlayerService::Client::setBufferingSettings(
+ const BufferingSettings& buffering)
+{
+ ALOGV("[%d] setBufferingSettings{%s}",
+ mConnId, buffering.toString().string());
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == 0) return UNKNOWN_ERROR;
+ return p->setBufferingSettings(buffering);
+}
+
+status_t MediaPlayerService::Client::getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */)
+{
+ sp<MediaPlayerBase> p = getPlayer();
+ // TODO: create mPlayer on demand.
+ if (p == 0) return UNKNOWN_ERROR;
+ status_t ret = p->getDefaultBufferingSettings(buffering);
+ if (ret == NO_ERROR) {
+ ALOGV("[%d] getDefaultBufferingSettings{%s}",
+ mConnId, buffering->toString().string());
+ } else {
+ ALOGV("[%d] getDefaultBufferingSettings returned %d", mConnId, ret);
+ }
+ return ret;
+}
+
status_t MediaPlayerService::Client::prepareAsync()
{
ALOGV("[%d] prepareAsync", mConnId);
@@ -1113,12 +1189,48 @@
return OK;
}
-status_t MediaPlayerService::Client::seekTo(int msec)
+VolumeShaper::Status MediaPlayerService::Client::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) {
+ // for hardware output, call player instead
+ ALOGV("Client::applyVolumeShaper(%p)", this);
+ sp<MediaPlayerBase> p = getPlayer();
+ {
+ Mutex::Autolock l(mLock);
+ if (p != 0 && p->hardwareOutput()) {
+ // TODO: investigate internal implementation
+ return VolumeShaper::Status(INVALID_OPERATION);
+ }
+ if (mAudioOutput.get() != nullptr) {
+ return mAudioOutput->applyVolumeShaper(configuration, operation);
+ }
+ }
+ return VolumeShaper::Status(INVALID_OPERATION);
+}
+
+sp<VolumeShaper::State> MediaPlayerService::Client::getVolumeShaperState(int id) {
+ // for hardware output, call player instead
+ ALOGV("Client::getVolumeShaperState(%p)", this);
+ sp<MediaPlayerBase> p = getPlayer();
+ {
+ Mutex::Autolock l(mLock);
+ if (p != 0 && p->hardwareOutput()) {
+ // TODO: investigate internal implementation.
+ return nullptr;
+ }
+ if (mAudioOutput.get() != nullptr) {
+ return mAudioOutput->getVolumeShaperState(id);
+ }
+ }
+ return nullptr;
+}
+
+status_t MediaPlayerService::Client::seekTo(int msec, MediaPlayerSeekMode mode)
{
- ALOGV("[%d] seekTo(%d)", mConnId, msec);
+ ALOGV("[%d] seekTo(%d, %d)", mConnId, msec, mode);
sp<MediaPlayerBase> p = getPlayer();
if (p == 0) return UNKNOWN_ERROR;
- return p->seekTo(msec);
+ return p->seekTo(msec, mode);
}
status_t MediaPlayerService::Client::reset()
@@ -1284,16 +1396,33 @@
}
sp<IMediaPlayerClient> c;
+ sp<Client> nextClient;
+ status_t errStartNext = NO_ERROR;
{
Mutex::Autolock l(client->mLock);
c = client->mClient;
if (msg == MEDIA_PLAYBACK_COMPLETE && client->mNextClient != NULL) {
+ nextClient = client->mNextClient;
+
if (client->mAudioOutput != NULL)
client->mAudioOutput->switchToNextOutput();
- client->mNextClient->start();
- if (client->mNextClient->mClient != NULL) {
- client->mNextClient->mClient->notify(
- MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+
+ errStartNext = nextClient->start();
+ }
+ }
+
+ if (nextClient != NULL) {
+ sp<IMediaPlayerClient> nc;
+ {
+ Mutex::Autolock l(nextClient->mLock);
+ nc = nextClient->mClient;
+ }
+ if (nc != NULL) {
+ if (errStartNext == NO_ERROR) {
+ nc->notify(MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+ } else {
+ nc->notify(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN , 0, obj);
+ ALOGE("gapless:start playback for next track failed, err(%d)", errStartNext);
}
}
}
@@ -1341,6 +1470,32 @@
}
}
+// Modular DRM
+status_t MediaPlayerService::Client::prepareDrm(const uint8_t uuid[16],
+ const Vector<uint8_t>& drmSessionId)
+{
+ ALOGV("[%d] prepareDrm", mConnId);
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == 0) return UNKNOWN_ERROR;
+
+ status_t ret = p->prepareDrm(uuid, drmSessionId);
+ ALOGV("prepareDrm ret: %d", ret);
+
+ return ret;
+}
+
+status_t MediaPlayerService::Client::releaseDrm()
+{
+ ALOGV("[%d] releaseDrm", mConnId);
+ sp<MediaPlayerBase> p = getPlayer();
+ if (p == 0) return UNKNOWN_ERROR;
+
+ status_t ret = p->releaseDrm();
+ ALOGV("releaseDrm ret: %d", ret);
+
+ return ret;
+}
+
#if CALLBACK_ANTAGONIZER
const int Antagonizer::interval = 10000; // 10 msecs
@@ -1378,7 +1533,7 @@
#undef LOG_TAG
#define LOG_TAG "AudioSink"
-MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, int uid, int pid,
+MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
const audio_attributes_t* attr)
: mCallback(NULL),
mCallbackCookie(NULL),
@@ -1395,7 +1550,8 @@
mPid(pid),
mSendLevel(0.0),
mAuxEffectId(0),
- mFlags(AUDIO_OUTPUT_FLAG_NONE)
+ mFlags(AUDIO_OUTPUT_FLAG_NONE),
+ mVolumeHandler(new VolumeHandler())
{
ALOGV("AudioOutput(%d)", sessionId);
if (attr != NULL) {
@@ -1513,57 +1669,45 @@
}
uint32_t numFramesPlayed;
- int64_t numFramesPlayedAt;
+ int64_t numFramesPlayedAtUs;
AudioTimestamp ts;
- static const int64_t kStaleTimestamp100ms = 100000;
status_t res = mTrack->getTimestamp(ts);
if (res == OK) { // case 1: mixing audio tracks and offloaded tracks.
numFramesPlayed = ts.mPosition;
- numFramesPlayedAt = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
- const int64_t timestampAge = nowUs - numFramesPlayedAt;
- if (timestampAge > kStaleTimestamp100ms) {
- // This is an audio FIXME.
- // getTimestamp returns a timestamp which may come from audio mixing threads.
- // After pausing, the MixerThread may go idle, thus the mTime estimate may
- // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
- // the max latency should be about 25ms with an average around 12ms (to be verified).
- // For safety we use 100ms.
- ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
- (long long)nowUs, (long long)numFramesPlayedAt);
- numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
- }
- //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+ numFramesPlayedAtUs = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
+ //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
} else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
numFramesPlayed = 0;
- numFramesPlayedAt = nowUs;
+ numFramesPlayedAtUs = nowUs;
//ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
- // numFramesPlayed, (long long)numFramesPlayedAt);
+ // numFramesPlayed, (long long)numFramesPlayedAtUs);
} else { // case 3: transitory at new track or audio fast tracks.
res = mTrack->getPosition(&numFramesPlayed);
CHECK_EQ(res, (status_t)OK);
- numFramesPlayedAt = nowUs;
- numFramesPlayedAt += 1000LL * mTrack->latency() / 2; /* XXX */
- //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+ numFramesPlayedAtUs = nowUs;
+ numFramesPlayedAtUs += 1000LL * mTrack->latency() / 2; /* XXX */
+ //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
}
// CHECK_EQ(numFramesPlayed & (1 << 31), 0); // can't be negative until 12.4 hrs, test
// TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
int64_t durationUs = (int64_t)((int32_t)numFramesPlayed * 1000000LL / mSampleRateHz)
- + nowUs - numFramesPlayedAt;
+ + nowUs - numFramesPlayedAtUs;
if (durationUs < 0) {
// Occurs when numFramesPlayed position is very small and the following:
// (1) In case 1, the time nowUs is computed before getTimestamp() is called and
- // numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
+ // numFramesPlayedAtUs is greater than nowUs by time more than numFramesPlayed.
// (2) In case 3, using getPosition and adding mAudioSink->latency() to
- // numFramesPlayedAt, by a time amount greater than numFramesPlayed.
+ // numFramesPlayedAtUs, by a time amount greater than numFramesPlayed.
//
// Both of these are transitory conditions.
ALOGV("getPlayedOutDurationUs: negative duration %lld set to zero", (long long)durationUs);
durationUs = 0;
}
ALOGV("getPlayedOutDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
- (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
+ (long long)durationUs, (long long)nowUs,
+ numFramesPlayed, (long long)numFramesPlayedAtUs);
return durationUs;
}
@@ -1871,6 +2015,24 @@
ALOGV("setVolume");
t->setVolume(mLeftVolume, mRightVolume);
+ // Restore VolumeShapers for the MediaPlayer in case the track was recreated
+ // due to an output sink error (e.g. offload to non-offload switch).
+ mVolumeHandler->forall([&t](const VolumeShaper &shaper) -> VolumeShaper::Status {
+ sp<VolumeShaper::Operation> operationToEnd =
+ new VolumeShaper::Operation(shaper.mOperation);
+ // TODO: Ideally we would restore to the exact xOffset position
+ // as returned by getVolumeShaperState(), but we don't have that
+ // information when restoring at the client unless we periodically poll
+ // the server or create shared memory state.
+ //
+ // For now, we simply advance to the end of the VolumeShaper effect
+ // if it has been started.
+ if (shaper.isStarted()) {
+ operationToEnd->setNormalizedTime(1.f);
+ }
+ return t->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
+ });
+
mSampleRateHz = sampleRate;
mFlags = flags;
mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
@@ -1910,7 +2072,11 @@
if (mTrack != 0) {
mTrack->setVolume(mLeftVolume, mRightVolume);
mTrack->setAuxEffectSendLevel(mSendLevel);
- return mTrack->start();
+ status_t status = mTrack->start();
+ if (status == NO_ERROR) {
+ mVolumeHandler->setStarted();
+ }
+ return status;
}
return NO_INIT;
}
@@ -2107,6 +2273,40 @@
return NO_ERROR;
}
+VolumeShaper::Status MediaPlayerService::AudioOutput::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation)
+{
+ Mutex::Autolock lock(mLock);
+ ALOGV("AudioOutput::applyVolumeShaper");
+
+ mVolumeHandler->setIdIfNecessary(configuration);
+
+ VolumeShaper::Status status;
+ if (mTrack != 0) {
+ status = mTrack->applyVolumeShaper(configuration, operation);
+ if (status >= 0) {
+ (void)mVolumeHandler->applyVolumeShaper(configuration, operation);
+ if (mTrack->isPlaying()) { // match local AudioTrack to properly restore.
+ mVolumeHandler->setStarted();
+ }
+ }
+ } else {
+ status = mVolumeHandler->applyVolumeShaper(configuration, operation);
+ }
+ return status;
+}
+
+sp<VolumeShaper::State> MediaPlayerService::AudioOutput::getVolumeShaperState(int id)
+{
+ Mutex::Autolock lock(mLock);
+ if (mTrack != 0) {
+ return mTrack->getVolumeShaperState(id);
+ } else {
+ return mVolumeHandler->getVolumeShaperState(id);
+ }
+}
+
// static
void MediaPlayerService::AudioOutput::CallbackWrapper(
int event, void *cookie, void *info) {
@@ -2272,7 +2472,31 @@
////////////////////////////////////////////////////////////////////////////////
-void MediaPlayerService::addBatteryData(uint32_t params)
+void MediaPlayerService::addBatteryData(uint32_t params) {
+ mBatteryTracker.addBatteryData(params);
+}
+
+status_t MediaPlayerService::pullBatteryData(Parcel* reply) {
+ return mBatteryTracker.pullBatteryData(reply);
+}
+
+MediaPlayerService::BatteryTracker::BatteryTracker() {
+ mBatteryAudio.refCount = 0;
+ for (int i = 0; i < NUM_AUDIO_DEVICES; i++) {
+ mBatteryAudio.deviceOn[i] = 0;
+ mBatteryAudio.lastTime[i] = 0;
+ mBatteryAudio.totalTime[i] = 0;
+ }
+ // speaker is on by default
+ mBatteryAudio.deviceOn[SPEAKER] = 1;
+
+ // reset battery stats
+ // if the mediaserver has crashed, battery stats could be left
+ // in bad state, reset the state upon service start.
+ BatteryNotifier::getInstance().noteResetVideo();
+}
+
+void MediaPlayerService::BatteryTracker::addBatteryData(uint32_t params)
{
Mutex::Autolock lock(mLock);
@@ -2352,7 +2576,7 @@
return;
}
- int uid = IPCThreadState::self()->getCallingUid();
+ uid_t uid = IPCThreadState::self()->getCallingUid();
if (uid == AID_MEDIA) {
return;
}
@@ -2412,7 +2636,7 @@
}
}
-status_t MediaPlayerService::pullBatteryData(Parcel* reply) {
+status_t MediaPlayerService::BatteryTracker::pullBatteryData(Parcel* reply) {
Mutex::Autolock lock(mLock);
// audio output devices usage
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 601b046..06b9cad 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -30,6 +30,8 @@
#include <media/Metadata.h>
#include <media/stagefright/foundation/ABase.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+
#include <system/audio.h>
namespace android {
@@ -69,13 +71,14 @@
class MediaPlayerService : public BnMediaPlayerService
{
class Client;
+ typedef ::android::hardware::media::omx::V1_0::IOmx IOmx;
class AudioOutput : public MediaPlayerBase::AudioSink
{
class CallbackData;
public:
- AudioOutput(audio_session_t sessionId, int uid, int pid,
+ AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
const audio_attributes_t * attr);
virtual ~AudioOutput();
@@ -129,6 +132,11 @@
virtual status_t setParameters(const String8& keyValuePairs);
virtual String8 getParameters(const String8& keys);
+ virtual VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) override;
+ virtual sp<VolumeShaper::State> getVolumeShaperState(int id) override;
+
private:
static void setMinBufferCount();
static void CallbackWrapper(
@@ -152,11 +160,12 @@
float mMsecsPerFrame;
size_t mFrameSize;
audio_session_t mSessionId;
- int mUid;
+ uid_t mUid;
int mPid;
float mSendLevel;
int mAuxEffectId;
audio_output_flags_t mFlags;
+ sp<VolumeHandler> mVolumeHandler;
mutable Mutex mLock;
// static variables below not protected by mutex
@@ -237,57 +246,71 @@
CAMERA_PROCESS_DEATH = 4
};
- // For battery usage tracking purpose
- struct BatteryUsageInfo {
- // how many streams are being played by one UID
- int refCount;
- // a temp variable to store the duration(ms) of audio codecs
- // when we start a audio codec, we minus the system time from audioLastTime
- // when we pause it, we add the system time back to the audioLastTime
- // so after the pause, audioLastTime = pause time - start time
- // if multiple audio streams are played (or recorded), then audioLastTime
- // = the total playing time of all the streams
- int32_t audioLastTime;
- // when all the audio streams are being paused, we assign audioLastTime to
- // this variable, so this value could be provided to the battery app
- // in the next pullBatteryData call
- int32_t audioTotalTime;
-
- int32_t videoLastTime;
- int32_t videoTotalTime;
- };
- KeyedVector<int, BatteryUsageInfo> mBatteryData;
-
- enum {
- SPEAKER,
- OTHER_AUDIO_DEVICE,
- SPEAKER_AND_OTHER,
- NUM_AUDIO_DEVICES
- };
-
- struct BatteryAudioFlingerUsageInfo {
- int refCount; // how many audio streams are being played
- int deviceOn[NUM_AUDIO_DEVICES]; // whether the device is currently used
- int32_t lastTime[NUM_AUDIO_DEVICES]; // in ms
- // totalTime[]: total time of audio output devices usage
- int32_t totalTime[NUM_AUDIO_DEVICES]; // in ms
- };
-
- // This varialble is used to record the usage of audio output device
- // for battery app
- BatteryAudioFlingerUsageInfo mBatteryAudio;
-
// Collect info of the codec usage from media player and media recorder
virtual void addBatteryData(uint32_t params);
// API for the Battery app to pull the data of codecs usage
virtual status_t pullBatteryData(Parcel* reply);
private:
+ struct BatteryTracker {
+ BatteryTracker();
+ // Collect info of the codec usage from media player and media recorder
+ void addBatteryData(uint32_t params);
+ // API for the Battery app to pull the data of codecs usage
+ status_t pullBatteryData(Parcel* reply);
+
+ private:
+ // For battery usage tracking purpose
+ struct BatteryUsageInfo {
+ // how many streams are being played by one UID
+ int refCount;
+ // a temp variable to store the duration(ms) of audio codecs
+ // when we start a audio codec, we minus the system time from audioLastTime
+ // when we pause it, we add the system time back to the audioLastTime
+ // so after the pause, audioLastTime = pause time - start time
+ // if multiple audio streams are played (or recorded), then audioLastTime
+ // = the total playing time of all the streams
+ int32_t audioLastTime;
+ // when all the audio streams are being paused, we assign audioLastTime to
+ // this variable, so this value could be provided to the battery app
+ // in the next pullBatteryData call
+ int32_t audioTotalTime;
+
+ int32_t videoLastTime;
+ int32_t videoTotalTime;
+ };
+ KeyedVector<int, BatteryUsageInfo> mBatteryData;
+
+ enum {
+ SPEAKER,
+ OTHER_AUDIO_DEVICE,
+ SPEAKER_AND_OTHER,
+ NUM_AUDIO_DEVICES
+ };
+
+ struct BatteryAudioFlingerUsageInfo {
+ int refCount; // how many audio streams are being played
+ int deviceOn[NUM_AUDIO_DEVICES]; // whether the device is currently used
+ int32_t lastTime[NUM_AUDIO_DEVICES]; // in ms
+ // totalTime[]: total time of audio output devices usage
+ int32_t totalTime[NUM_AUDIO_DEVICES]; // in ms
+ };
+
+ // This varialble is used to record the usage of audio output device
+ // for battery app
+ BatteryAudioFlingerUsageInfo mBatteryAudio;
+
+ mutable Mutex mLock;
+ };
+ BatteryTracker mBatteryTracker;
class Client : public BnMediaPlayer {
// IMediaPlayer interface
virtual void disconnect();
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer);
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
virtual status_t prepareAsync();
virtual status_t start();
virtual status_t stop();
@@ -298,7 +321,9 @@
virtual status_t setSyncSettings(const AVSyncSettings& rate, float videoFpsHint);
virtual status_t getSyncSettings(AVSyncSettings* rate /* nonnull */,
float* videoFps /* nonnull */);
- virtual status_t seekTo(int msec);
+ virtual status_t seekTo(
+ int msec,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
virtual status_t getCurrentPosition(int* msec);
virtual status_t getDuration(int* msec);
virtual status_t reset();
@@ -318,6 +343,11 @@
virtual status_t getRetransmitEndpoint(struct sockaddr_in* endpoint);
virtual status_t setNextPlayer(const sp<IMediaPlayer>& player);
+ virtual VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) override;
+ virtual sp<VolumeShaper::State> getVolumeShaperState(int id) override;
+
sp<MediaPlayerBase> createPlayer(player_type playerType);
virtual status_t setDataSource(
@@ -342,24 +372,40 @@
virtual status_t dump(int fd, const Vector<String16>& args);
audio_session_t getAudioSessionId() { return mAudioSessionId; }
+ // Modular DRM
+ virtual status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
+ virtual status_t releaseDrm();
private:
- class ServiceDeathNotifier: public IBinder::DeathRecipient
+ class ServiceDeathNotifier:
+ public IBinder::DeathRecipient,
+ public ::android::hardware::hidl_death_recipient
{
public:
ServiceDeathNotifier(
const sp<IBinder>& service,
const sp<MediaPlayerBase>& listener,
int which);
+ ServiceDeathNotifier(
+ const sp<IOmx>& omx,
+ const sp<MediaPlayerBase>& listener,
+ int which);
virtual ~ServiceDeathNotifier();
virtual void binderDied(const wp<IBinder>& who);
+ virtual void serviceDied(
+ uint64_t cookie,
+ const wp<::android::hidl::base::V1_0::IBase>& who);
+ void unlinkToDeath();
private:
int mWhich;
sp<IBinder> mService;
+ sp<IOmx> mOmx;
wp<MediaPlayerBase> mListener;
};
+ void clearDeathNotifiers();
+
friend class MediaPlayerService;
Client( const sp<MediaPlayerService>& service,
pid_t pid,
@@ -402,7 +448,7 @@
int32_t mConnId;
audio_session_t mAudioSessionId;
audio_attributes_t * mAudioAttributes;
- uid_t mUID;
+ uid_t mUid;
sp<ANativeWindow> mConnectedWindow;
sp<IBinder> mConnectedWindowBinder;
struct sockaddr_in mRetransmitEndpoint;
@@ -419,8 +465,8 @@
// getMetadata clears this set.
media::Metadata::Filter mMetadataUpdated; // protected by mLock
- sp<IBinder::DeathRecipient> mExtractorDeathListener;
- sp<IBinder::DeathRecipient> mCodecDeathListener;
+ sp<ServiceDeathNotifier> mExtractorDeathListener;
+ sp<ServiceDeathNotifier> mCodecDeathListener;
#if CALLBACK_ANTAGONIZER
Antagonizer* mAntagonizer;
#endif
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 609b00d..6400481 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -52,7 +52,7 @@
return ok;
}
-status_t MediaRecorderClient::setInputSurface(const sp<IGraphicBufferConsumer>& surface)
+status_t MediaRecorderClient::setInputSurface(const sp<PersistentSurface>& surface)
{
ALOGV("setInputSurface");
Mutex::Autolock lock(mLock);
@@ -161,15 +161,26 @@
return mRecorder->setAudioEncoder((audio_encoder)ae);
}
-status_t MediaRecorderClient::setOutputFile(int fd, int64_t offset, int64_t length)
+status_t MediaRecorderClient::setOutputFile(int fd)
{
- ALOGV("setOutputFile(%d, %lld, %lld)", fd, (long long)offset, (long long)length);
+ ALOGV("setOutputFile(%d)", fd);
Mutex::Autolock lock(mLock);
if (mRecorder == NULL) {
ALOGE("recorder is not initialized");
return NO_INIT;
}
- return mRecorder->setOutputFile(fd, offset, length);
+ return mRecorder->setOutputFile(fd);
+}
+
+status_t MediaRecorderClient::setNextOutputFile(int fd)
+{
+ ALOGV("setNextOutputFile(%d)", fd);
+ Mutex::Autolock lock(mLock);
+ if (mRecorder == NULL) {
+ ALOGE("recorder is not initialized");
+ return NO_INIT;
+ }
+ return mRecorder->setNextOutputFile(fd);
}
status_t MediaRecorderClient::setVideoSize(int width, int height)
@@ -227,6 +238,17 @@
return mRecorder->getMaxAmplitude(max);
}
+status_t MediaRecorderClient::getMetrics(Parcel* reply)
+{
+ ALOGV("MediaRecorderClient::getMetrics");
+ Mutex::Autolock lock(mLock);
+ if (mRecorder == NULL) {
+ ALOGE("recorder is not initialized");
+ return NO_INIT;
+ }
+ return mRecorder->getMetrics(reply);
+}
+
status_t MediaRecorderClient::start()
{
ALOGV("start");
@@ -317,6 +339,7 @@
wp<MediaRecorderClient> client(this);
mMediaPlayerService->removeMediaRecorderClient(client);
}
+ clearDeathNotifiers();
return NO_ERROR;
}
@@ -340,15 +363,25 @@
const sp<IMediaRecorderClient>& listener,
int which) {
mService = service;
+ mOmx = nullptr;
+ mListener = listener;
+ mWhich = which;
+}
+
+MediaRecorderClient::ServiceDeathNotifier::ServiceDeathNotifier(
+ const sp<IOmx>& omx,
+ const sp<IMediaRecorderClient>& listener,
+ int which) {
+ mService = nullptr;
+ mOmx = omx;
mListener = listener;
mWhich = which;
}
MediaRecorderClient::ServiceDeathNotifier::~ServiceDeathNotifier() {
- mService->unlinkToDeath(this);
}
-void MediaRecorderClient::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
+void MediaRecorderClient::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
sp<IMediaRecorderClient> listener = mListener.promote();
if (listener != NULL) {
listener->notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
@@ -357,9 +390,42 @@
}
}
+void MediaRecorderClient::ServiceDeathNotifier::serviceDied(
+ uint64_t /* cookie */,
+ const wp<::android::hidl::base::V1_0::IBase>& /* who */) {
+ sp<IMediaRecorderClient> listener = mListener.promote();
+ if (listener != NULL) {
+ listener->notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
+ } else {
+ ALOGW("listener for process %d death is gone", mWhich);
+ }
+}
+
+void MediaRecorderClient::ServiceDeathNotifier::unlinkToDeath() {
+ if (mService != nullptr) {
+ mService->unlinkToDeath(this);
+ mService = nullptr;
+ } else if (mOmx != nullptr) {
+ mOmx->unlinkToDeath(this);
+ mOmx = nullptr;
+ }
+}
+
+void MediaRecorderClient::clearDeathNotifiers() {
+ if (mCameraDeathListener != nullptr) {
+ mCameraDeathListener->unlinkToDeath();
+ mCameraDeathListener = nullptr;
+ }
+ if (mCodecDeathListener != nullptr) {
+ mCodecDeathListener->unlinkToDeath();
+ mCodecDeathListener = nullptr;
+ }
+}
+
status_t MediaRecorderClient::setListener(const sp<IMediaRecorderClient>& listener)
{
ALOGV("setListener");
+ clearDeathNotifiers();
Mutex::Autolock lock(mLock);
if (mRecorder == NULL) {
ALOGE("recorder is not initialized");
@@ -384,10 +450,27 @@
}
sCameraChecked = true;
- binder = sm->getService(String16("media.codec"));
- mCodecDeathListener = new ServiceDeathNotifier(binder, listener,
- MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
- binder->linkToDeath(mCodecDeathListener);
+ if (property_get_bool("persist.media.treble_omx", true)) {
+ // Treble IOmx
+ sp<IOmx> omx = IOmx::getService();
+ if (omx == nullptr) {
+ ALOGE("Treble IOmx not available");
+ return NO_INIT;
+ }
+ mCodecDeathListener = new ServiceDeathNotifier(omx, listener,
+ MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
+ omx->linkToDeath(mCodecDeathListener, 0);
+ } else {
+ // Legacy IOMX
+ binder = sm->getService(String16("media.codec"));
+ if (binder == NULL) {
+ ALOGE("Unable to connect to media codec service");
+ return NO_INIT;
+ }
+ mCodecDeathListener = new ServiceDeathNotifier(binder, listener,
+ MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
+ binder->linkToDeath(mCodecDeathListener);
+ }
return OK;
}
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index b2d0f0e..7868a91 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -20,31 +20,46 @@
#include <media/IMediaRecorder.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+
namespace android {
struct MediaRecorderBase;
class MediaPlayerService;
class ICameraRecordingProxy;
-class IGraphicBufferProducer;
class MediaRecorderClient : public BnMediaRecorder
{
- class ServiceDeathNotifier: public IBinder::DeathRecipient
+ typedef ::android::hardware::media::omx::V1_0::IOmx IOmx;
+
+ class ServiceDeathNotifier :
+ public IBinder::DeathRecipient,
+ public ::android::hardware::hidl_death_recipient
{
public:
ServiceDeathNotifier(
const sp<IBinder>& service,
const sp<IMediaRecorderClient>& listener,
int which);
+ ServiceDeathNotifier(
+ const sp<IOmx>& omx,
+ const sp<IMediaRecorderClient>& listener,
+ int which);
virtual ~ServiceDeathNotifier();
virtual void binderDied(const wp<IBinder>& who);
-
+ virtual void serviceDied(
+ uint64_t cookie,
+ const wp<::android::hidl::base::V1_0::IBase>& who);
+ void unlinkToDeath();
private:
int mWhich;
sp<IBinder> mService;
+ sp<IOmx> mOmx;
wp<IMediaRecorderClient> mListener;
};
+ void clearDeathNotifiers();
+
public:
virtual status_t setCamera(const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy);
@@ -54,8 +69,8 @@
virtual status_t setOutputFormat(int of);
virtual status_t setVideoEncoder(int ve);
virtual status_t setAudioEncoder(int ae);
- virtual status_t setOutputFile(int fd, int64_t offset,
- int64_t length);
+ virtual status_t setOutputFile(int fd);
+ virtual status_t setNextOutputFile(int fd);
virtual status_t setVideoSize(int width, int height);
virtual status_t setVideoFrameRate(int frames_per_second);
virtual status_t setParameters(const String8& params);
@@ -64,6 +79,7 @@
virtual status_t setClientName(const String16& clientName);
virtual status_t prepare();
virtual status_t getMaxAmplitude(int* max);
+ virtual status_t getMetrics(Parcel* reply);
virtual status_t start();
virtual status_t stop();
virtual status_t reset();
@@ -73,7 +89,7 @@
virtual status_t close();
virtual status_t release();
virtual status_t dump(int fd, const Vector<String16>& args);
- virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface);
+ virtual status_t setInputSurface(const sp<PersistentSurface>& surface);
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource();
private:
@@ -85,8 +101,8 @@
const String16& opPackageName);
virtual ~MediaRecorderClient();
- sp<IBinder::DeathRecipient> mCameraDeathListener;
- sp<IBinder::DeathRecipient> mCodecDeathListener;
+ sp<ServiceDeathNotifier> mCameraDeathListener;
+ sp<ServiceDeathNotifier> mCodecDeathListener;
pid_t mPid;
Mutex mLock;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index cdb0a7b..e1d762f 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -30,6 +30,7 @@
#include <binder/IServiceManager.h>
#include <media/IMediaPlayerService.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -45,6 +46,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaCodecSource.h>
+#include <media/stagefright/PersistentSurface.h>
#include <media/MediaProfiles.h>
#include <camera/CameraParameters.h>
@@ -64,6 +66,26 @@
static const float kMinTypicalDisplayRefreshingRate = kTypicalDisplayRefreshingRate / 2;
static const int kMaxNumVideoTemporalLayers = 8;
+// key for media statistics
+static const char *kKeyRecorder = "recorder";
+// attrs for media statistics
+static const char *kRecorderHeight = "android.media.mediarecorder.height";
+static const char *kRecorderWidth = "android.media.mediarecorder.width";
+static const char *kRecorderFrameRate = "android.media.mediarecorder.frame-rate";
+static const char *kRecorderVideoBitrate = "android.media.mediarecorder.video-bitrate";
+static const char *kRecorderAudioSampleRate = "android.media.mediarecorder.audio-samplerate";
+static const char *kRecorderAudioChannels = "android.media.mediarecorder.audio-channels";
+static const char *kRecorderAudioBitrate = "android.media.mediarecorder.audio-bitrate";
+static const char *kRecorderVideoIframeInterval = "android.media.mediarecorder.video-iframe-interval";
+static const char *kRecorderMovieTimescale = "android.media.mediarecorder.movie-timescale";
+static const char *kRecorderAudioTimescale = "android.media.mediarecorder.audio-timescale";
+static const char *kRecorderVideoTimescale = "android.media.mediarecorder.video-timescale";
+static const char *kRecorderVideoProfile = "android.media.mediarecorder.video-encoder-profile";
+static const char *kRecorderVideoLevel = "android.media.mediarecorder.video-encoder-level";
+static const char *kRecorderCaptureFpsEnable = "android.media.mediarecorder.capture-fpsenable";
+static const char *kRecorderCaptureFps = "android.media.mediarecorder.capture-fps";
+static const char *kRecorderRotation = "android.media.mediarecorder.rotation";
+
// To collect the encoder usage for the battery app
static void addBatteryData(uint32_t params) {
sp<IBinder> binder =
@@ -84,6 +106,8 @@
mStarted(false) {
ALOGV("Constructor");
+
+ mAnalyticsDirty = false;
reset();
}
@@ -94,6 +118,80 @@
if (mLooper != NULL) {
mLooper->stop();
}
+
+ // log the current record, provided it has some information worth recording
+ if (mAnalyticsDirty && mAnalyticsItem != NULL) {
+ updateMetrics();
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->setFinalized(true);
+ mAnalyticsItem->selfrecord();
+ }
+ delete mAnalyticsItem;
+ mAnalyticsItem = NULL;
+ }
+}
+
+void StagefrightRecorder::updateMetrics() {
+ ALOGV("updateMetrics");
+
+ // we'll populate the values from the raw fields.
+ // (NOT going to populate as we go through the various set* ops)
+
+ // TBD mOutputFormat = OUTPUT_FORMAT_THREE_GPP;
+ // TBD mAudioEncoder = AUDIO_ENCODER_AMR_NB;
+ // TBD mVideoEncoder = VIDEO_ENCODER_DEFAULT;
+ mAnalyticsItem->setInt32(kRecorderHeight, mVideoHeight);
+ mAnalyticsItem->setInt32(kRecorderWidth, mVideoWidth);
+ mAnalyticsItem->setInt32(kRecorderFrameRate, mFrameRate);
+ mAnalyticsItem->setInt32(kRecorderVideoBitrate, mVideoBitRate);
+ mAnalyticsItem->setInt32(kRecorderAudioSampleRate, mSampleRate);
+ mAnalyticsItem->setInt32(kRecorderAudioChannels, mAudioChannels);
+ mAnalyticsItem->setInt32(kRecorderAudioBitrate, mAudioBitRate);
+ // TBD mInterleaveDurationUs = 0;
+ mAnalyticsItem->setInt32(kRecorderVideoIframeInterval, mIFramesIntervalSec);
+ // TBD mAudioSourceNode = 0;
+ // TBD mUse64BitFileOffset = false;
+ mAnalyticsItem->setInt32(kRecorderMovieTimescale, mMovieTimeScale);
+ mAnalyticsItem->setInt32(kRecorderAudioTimescale, mAudioTimeScale);
+ mAnalyticsItem->setInt32(kRecorderVideoTimescale, mVideoTimeScale);
+ // TBD mCameraId = 0;
+ // TBD mStartTimeOffsetMs = -1;
+ mAnalyticsItem->setInt32(kRecorderVideoProfile, mVideoEncoderProfile);
+ mAnalyticsItem->setInt32(kRecorderVideoLevel, mVideoEncoderLevel);
+ // TBD mMaxFileDurationUs = 0;
+ // TBD mMaxFileSizeBytes = 0;
+ // TBD mTrackEveryTimeDurationUs = 0;
+ mAnalyticsItem->setInt32(kRecorderCaptureFpsEnable, mCaptureFpsEnable);
+ mAnalyticsItem->setDouble(kRecorderCaptureFps, mCaptureFps);
+ // TBD mCaptureFps = -1.0;
+ // TBD mCameraSourceTimeLapse = NULL;
+ // TBD mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
+ // TBD mEncoderProfiles = MediaProfiles::getInstance();
+ mAnalyticsItem->setInt32(kRecorderRotation, mRotationDegrees);
+ // PII mLatitudex10000 = -3600000;
+ // PII mLongitudex10000 = -3600000;
+ // TBD mTotalBitRate = 0;
+
+ // TBD: some duration information (capture, paused)
+ //
+
+}
+
+void StagefrightRecorder::resetMetrics() {
+ ALOGV("resetMetrics");
+ // flush anything we have, restart the record
+ if (mAnalyticsDirty && mAnalyticsItem != NULL) {
+ updateMetrics();
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->setFinalized(true);
+ mAnalyticsItem->selfrecord();
+ }
+ delete mAnalyticsItem;
+ mAnalyticsItem = NULL;
+ }
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyRecorder);
+ (void) mAnalyticsItem->generateSessionID();
+ mAnalyticsDirty = false;
}
status_t StagefrightRecorder::init() {
@@ -248,17 +346,14 @@
}
status_t StagefrightRecorder::setInputSurface(
- const sp<IGraphicBufferConsumer>& surface) {
+ const sp<PersistentSurface>& surface) {
mPersistentSurface = surface;
return OK;
}
-status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) {
- ALOGV("setOutputFile: %d, %lld, %lld", fd, (long long)offset, (long long)length);
- // These don't make any sense, do they?
- CHECK_EQ(offset, 0ll);
- CHECK_EQ(length, 0ll);
+status_t StagefrightRecorder::setOutputFile(int fd) {
+ ALOGV("setOutputFile: %d", fd);
if (fd < 0) {
ALOGE("Invalid file descriptor: %d", fd);
@@ -276,15 +371,39 @@
return OK;
}
+status_t StagefrightRecorder::setNextOutputFile(int fd) {
+ Mutex::Autolock autolock(mLock);
+ // Only support MPEG4
+ if (mOutputFormat != OUTPUT_FORMAT_MPEG_4) {
+ ALOGE("Only MP4 file format supports setting next output file");
+ return INVALID_OPERATION;
+ }
+ ALOGV("setNextOutputFile: %d", fd);
+
+ if (fd < 0) {
+ ALOGE("Invalid file descriptor: %d", fd);
+ return -EBADF;
+ }
+
+ // start with a clean, empty file
+ ftruncate(fd, 0);
+ int nextFd = dup(fd);
+ if (mWriter == NULL) {
+ ALOGE("setNextOutputFile failed. Writer has been freed");
+ return INVALID_OPERATION;
+ }
+ return mWriter->setNextFd(nextFd);
+}
+
// Attempt to parse an float literal optionally surrounded by whitespace,
// returns true on success, false otherwise.
-static bool safe_strtof(const char *s, float *val) {
+static bool safe_strtod(const char *s, double *val) {
char *end;
// It is lame, but according to man page, we have to set errno to 0
- // before calling strtof().
+ // before calling strtod().
errno = 0;
- *val = strtof(s, &end);
+ *val = strtod(s, &end);
if (end == s || errno == ERANGE) {
return false;
@@ -365,6 +484,7 @@
// Additional check on the sample rate will be performed later.
mSampleRate = sampleRate;
+
return OK;
}
@@ -377,6 +497,7 @@
// Additional check on the number of channels will be performed later.
mAudioChannels = channels;
+
return OK;
}
@@ -585,19 +706,14 @@
return OK;
}
-status_t StagefrightRecorder::setParamCaptureFps(float fps) {
+status_t StagefrightRecorder::setParamCaptureFps(double fps) {
ALOGV("setParamCaptureFps: %.2f", fps);
- int64_t timeUs = (int64_t) (1000000.0 / fps + 0.5f);
-
- // Not allowing time more than a day
- if (timeUs <= 0 || timeUs > 86400*1E6) {
- ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", (long long)timeUs);
+ if (!(fps >= 1.0 / 86400)) {
+ ALOGE("FPS is too small");
return BAD_VALUE;
}
-
mCaptureFps = fps;
- mTimeBetweenCaptureUs = timeUs;
return OK;
}
@@ -725,8 +841,8 @@
return setParamCaptureFpsEnable(captureFpsEnable);
}
} else if (key == "time-lapse-fps") {
- float fps;
- if (safe_strtof(value.string(), &fps)) {
+ double fps;
+ if (safe_strtod(value.string(), &fps)) {
return setParamCaptureFps(fps);
}
} else {
@@ -834,6 +950,8 @@
}
status_t StagefrightRecorder::prepare() {
+ ALOGV("prepare");
+ Mutex::Autolock autolock(mLock);
if (mVideoSource == VIDEO_SOURCE_SURFACE) {
return prepareInternal();
}
@@ -842,6 +960,7 @@
status_t StagefrightRecorder::start() {
ALOGV("start");
+ Mutex::Autolock autolock(mLock);
if (mOutputFd < 0) {
ALOGE("Output file descriptor is invalid");
return INVALID_OPERATION;
@@ -884,7 +1003,10 @@
case OUTPUT_FORMAT_RTP_AVP:
case OUTPUT_FORMAT_MPEG2TS:
{
- status = mWriter->start();
+ sp<MetaData> meta = new MetaData;
+ int64_t startTimeUs = systemTime() / 1000;
+ meta->setInt64(kKeyTime, startTimeUs);
+ status = mWriter->start(meta.get());
break;
}
@@ -902,6 +1024,7 @@
}
if ((status == OK) && (!mStarted)) {
+ mAnalyticsDirty = true;
mStarted = true;
uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted;
@@ -1444,16 +1567,15 @@
videoSize.width = mVideoWidth;
videoSize.height = mVideoHeight;
if (mCaptureFpsEnable) {
- if (mTimeBetweenCaptureUs < 0) {
- ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
- (long long)mTimeBetweenCaptureUs);
+ if (!(mCaptureFps > 0.)) {
+ ALOGE("Invalid mCaptureFps value: %lf", mCaptureFps);
return BAD_VALUE;
}
mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
mCamera, mCameraProxy, mCameraId, mClientName, mClientUid, mClientPid,
videoSize, mFrameRate, mPreviewSurface,
- mTimeBetweenCaptureUs);
+ std::llround(1e6 / mCaptureFps));
*cameraSource = mCameraSourceTimeLapse;
} else {
*cameraSource = CameraSource::CreateFromCamera(
@@ -1549,12 +1671,11 @@
// set up time lapse/slow motion for surface source
if (mCaptureFpsEnable) {
- if (mTimeBetweenCaptureUs <= 0) {
- ALOGE("Invalid mTimeBetweenCaptureUs value: %lld",
- (long long)mTimeBetweenCaptureUs);
+ if (!(mCaptureFps > 0.)) {
+ ALOGE("Invalid mCaptureFps value: %lf", mCaptureFps);
return BAD_VALUE;
}
- format->setInt64("time-lapse", mTimeBetweenCaptureUs);
+ format->setDouble("time-lapse-fps", mCaptureFps);
}
}
@@ -1784,15 +1905,17 @@
return OK;
}
+ mPauseStartTimeUs = systemTime() / 1000;
+ sp<MetaData> meta = new MetaData;
+ meta->setInt64(kKeyTime, mPauseStartTimeUs);
+
if (mAudioEncoderSource != NULL) {
mAudioEncoderSource->pause();
}
if (mVideoEncoderSource != NULL) {
- mVideoEncoderSource->pause();
+ mVideoEncoderSource->pause(meta.get());
}
- mPauseStartTimeUs = systemTime() / 1000;
-
return OK;
}
@@ -1807,6 +1930,8 @@
return OK;
}
+ int64_t resumeStartTimeUs = systemTime() / 1000;
+
int64_t bufferStartTimeUs = 0;
bool allSourcesStarted = true;
for (const auto &source : { mAudioEncoderSource, mVideoEncoderSource }) {
@@ -1827,18 +1952,20 @@
mPauseStartTimeUs = bufferStartTimeUs;
}
// 30 ms buffer to avoid timestamp overlap
- mTotalPausedDurationUs += (systemTime() / 1000) - mPauseStartTimeUs - 30000;
+ mTotalPausedDurationUs += resumeStartTimeUs - mPauseStartTimeUs - 30000;
}
double timeOffset = -mTotalPausedDurationUs;
if (mCaptureFpsEnable) {
timeOffset *= mCaptureFps / mFrameRate;
}
+ sp<MetaData> meta = new MetaData;
+ meta->setInt64(kKeyTime, resumeStartTimeUs);
for (const auto &source : { mAudioEncoderSource, mVideoEncoderSource }) {
if (source == nullptr) {
continue;
}
source->setInputBufferTimeOffset((int64_t)timeOffset);
- source->start();
+ source->start(meta.get());
}
mPauseStartTimeUs = 0;
@@ -1847,6 +1974,7 @@
status_t StagefrightRecorder::stop() {
ALOGV("stop");
+ Mutex::Autolock autolock(mLock);
status_t err = OK;
if (mCaptureFpsEnable && mCameraSourceTimeLapse != NULL) {
@@ -1854,10 +1982,19 @@
mCameraSourceTimeLapse = NULL;
}
+ if (mVideoEncoderSource != NULL) {
+ int64_t stopTimeUs = systemTime() / 1000;
+ sp<MetaData> meta = new MetaData;
+ err = mVideoEncoderSource->setStopStimeUs(stopTimeUs);
+ }
+
if (mWriter != NULL) {
err = mWriter->stop();
mWriter.clear();
}
+
+ resetMetrics();
+
mTotalPausedDurationUs = 0;
mPauseStartTimeUs = 0;
@@ -1929,8 +2066,7 @@
mMaxFileSizeBytes = 0;
mTrackEveryTimeDurationUs = 0;
mCaptureFpsEnable = false;
- mCaptureFps = 0.0f;
- mTimeBetweenCaptureUs = -1;
+ mCaptureFps = -1.0;
mCameraSourceTimeLapse = NULL;
mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
mEncoderProfiles = MediaProfiles::getInstance();
@@ -1961,9 +2097,27 @@
return OK;
}
+status_t StagefrightRecorder::getMetrics(Parcel *reply) {
+ ALOGD("StagefrightRecorder::getMetrics");
+
+ if (reply == NULL) {
+ ALOGE("Null pointer argument");
+ return BAD_VALUE;
+ }
+
+ if (mAnalyticsItem == NULL) {
+ return UNKNOWN_ERROR;
+ }
+
+ updateMetrics();
+ mAnalyticsItem->writeToParcel(reply);
+ return OK;
+}
+
status_t StagefrightRecorder::dump(
int fd, const Vector<String16>& args) const {
ALOGV("dump");
+ Mutex::Autolock autolock(mLock);
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 4dbd039..9a6c4da 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -18,6 +18,7 @@
#define STAGEFRIGHT_RECORDER_H_
+#include <media/MediaAnalyticsItem.h>
#include <media/MediaRecorderBase.h>
#include <camera/CameraParameters.h>
#include <utils/String8.h>
@@ -38,15 +39,11 @@
class MetaData;
struct AudioSource;
class MediaProfiles;
-class IGraphicBufferConsumer;
-class IGraphicBufferProducer;
-class SurfaceMediaSource;
struct ALooper;
struct StagefrightRecorder : public MediaRecorderBase {
explicit StagefrightRecorder(const String16 &opPackageName);
virtual ~StagefrightRecorder();
-
virtual status_t init();
virtual status_t setAudioSource(audio_source_t as);
virtual status_t setVideoSource(video_source vs);
@@ -57,8 +54,9 @@
virtual status_t setVideoFrameRate(int frames_per_second);
virtual status_t setCamera(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
- virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface);
- virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
+ virtual status_t setInputSurface(const sp<PersistentSurface>& surface);
+ virtual status_t setOutputFile(int fd);
+ virtual status_t setNextOutputFile(int fd);
virtual status_t setParameters(const String8 ¶ms);
virtual status_t setListener(const sp<IMediaRecorderClient> &listener);
virtual status_t setClientName(const String16 &clientName);
@@ -70,15 +68,17 @@
virtual status_t close();
virtual status_t reset();
virtual status_t getMaxAmplitude(int *max);
+ virtual status_t getMetrics(Parcel *reply);
virtual status_t dump(int fd, const Vector<String16> &args) const;
// Querying a SurfaceMediaSourcer
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const;
private:
+ mutable Mutex mLock;
sp<hardware::ICamera> mCamera;
sp<ICameraRecordingProxy> mCameraProxy;
sp<IGraphicBufferProducer> mPreviewSurface;
- sp<IGraphicBufferConsumer> mPersistentSurface;
+ sp<PersistentSurface> mPersistentSurface;
sp<IMediaRecorderClient> mListener;
String16 mClientName;
uid_t mClientUid;
@@ -87,6 +87,11 @@
int mOutputFd;
sp<AudioSource> mAudioSourceNode;
+ MediaAnalyticsItem *mAnalyticsItem;
+ bool mAnalyticsDirty;
+ void resetMetrics();
+ void updateMetrics();
+
audio_source_t mAudioSource;
video_source mVideoSource;
output_format mOutputFormat;
@@ -117,7 +122,7 @@
int32_t mTotalBitRate;
bool mCaptureFpsEnable;
- float mCaptureFps;
+ double mCaptureFps;
int64_t mTimeBetweenCaptureUs;
sp<CameraSourceTimeLapse> mCameraSourceTimeLapse;
@@ -167,7 +172,7 @@
status_t setParamAudioSamplingRate(int32_t sampleRate);
status_t setParamAudioTimeScale(int32_t timeScale);
status_t setParamCaptureFpsEnable(int32_t timeLapseEnable);
- status_t setParamCaptureFps(float fps);
+ status_t setParamCaptureFps(double fps);
status_t setParamVideoEncodingBitRate(int32_t bitRate);
status_t setParamVideoIFramesInterval(int32_t seconds);
status_t setParamVideoEncoderProfile(int32_t profile);
diff --git a/media/libmediaplayerservice/TestPlayerStub.h b/media/libmediaplayerservice/TestPlayerStub.h
index 55bf2c8..11fddf6 100644
--- a/media/libmediaplayerservice/TestPlayerStub.h
+++ b/media/libmediaplayerservice/TestPlayerStub.h
@@ -87,7 +87,11 @@
virtual status_t stop() {return mPlayer->stop();}
virtual status_t pause() {return mPlayer->pause();}
virtual bool isPlaying() {return mPlayer->isPlaying();}
- virtual status_t seekTo(int msec) {return mPlayer->seekTo(msec);}
+ virtual status_t seekTo(
+ int msec,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) {
+ return mPlayer->seekTo(msec, mode);
+ }
virtual status_t getCurrentPosition(int *p) {
return mPlayer->getCurrentPosition(p);
}
diff --git a/media/libmediaplayerservice/include/MediaPlayerInterface.h b/media/libmediaplayerservice/include/MediaPlayerInterface.h
new file mode 100644
index 0000000..e8d59a7
--- /dev/null
+++ b/media/libmediaplayerservice/include/MediaPlayerInterface.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIAPLAYERINTERFACE_H
+#define ANDROID_MEDIAPLAYERINTERFACE_H
+
+#ifdef __cplusplus
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/RefBase.h>
+
+#include <media/mediaplayer.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/AudioSystem.h>
+#include <media/AudioTimestamp.h>
+#include <media/AVSyncSettings.h>
+#include <media/BufferingSettings.h>
+#include <media/Metadata.h>
+
+// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
+// global, and not in android::
+struct sockaddr_in;
+
+namespace android {
+
+class DataSource;
+class Parcel;
+class Surface;
+class IGraphicBufferProducer;
+
+template<typename T> class SortedVector;
+
+enum player_type {
+ STAGEFRIGHT_PLAYER = 3,
+ NU_PLAYER = 4,
+ // Test players are available only in the 'test' and 'eng' builds.
+ // The shared library with the test player is passed passed as an
+ // argument to the 'test:' url in the setDataSource call.
+ TEST_PLAYER = 5,
+};
+
+
+#define DEFAULT_AUDIOSINK_BUFFERCOUNT 4
+#define DEFAULT_AUDIOSINK_BUFFERSIZE 1200
+#define DEFAULT_AUDIOSINK_SAMPLERATE 44100
+
+// when the channel mask isn't known, use the channel count to derive a mask in AudioSink::open()
+#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
+
+// duration below which we do not allow deep audio buffering
+#define AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US 5000000
+
+// callback mechanism for passing messages to MediaPlayer object
+typedef void (*notify_callback_f)(void* cookie,
+ int msg, int ext1, int ext2, const Parcel *obj);
+
+// abstract base class - use MediaPlayerInterface
+class MediaPlayerBase : public RefBase
+{
+public:
+ // AudioSink: abstraction layer for audio output
+ class AudioSink : public RefBase {
+ public:
+ enum cb_event_t {
+ CB_EVENT_FILL_BUFFER, // Request to write more data to buffer.
+ CB_EVENT_STREAM_END, // Sent after all the buffers queued in AF and HW are played
+ // back (after stop is called)
+ CB_EVENT_TEAR_DOWN // The AudioTrack was invalidated due to use case change:
+ // Need to re-evaluate offloading options
+ };
+
+ // Callback returns the number of bytes actually written to the buffer.
+ typedef size_t (*AudioCallback)(
+ AudioSink *audioSink, void *buffer, size_t size, void *cookie,
+ cb_event_t event);
+
+ virtual ~AudioSink() {}
+ virtual bool ready() const = 0; // audio output is open and ready
+ virtual ssize_t bufferSize() const = 0;
+ virtual ssize_t frameCount() const = 0;
+ virtual ssize_t channelCount() const = 0;
+ virtual ssize_t frameSize() const = 0;
+ virtual uint32_t latency() const = 0;
+ virtual float msecsPerFrame() const = 0;
+ virtual status_t getPosition(uint32_t *position) const = 0;
+ virtual status_t getTimestamp(AudioTimestamp &ts) const = 0;
+ virtual int64_t getPlayedOutDurationUs(int64_t nowUs) const = 0;
+ virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0;
+ virtual audio_session_t getSessionId() const = 0;
+ virtual audio_stream_type_t getAudioStreamType() const = 0;
+ virtual uint32_t getSampleRate() const = 0;
+ virtual int64_t getBufferDurationInUs() const = 0;
+
+ // If no callback is specified, use the "write" API below to submit
+ // audio data.
+ virtual status_t open(
+ uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+ audio_format_t format=AUDIO_FORMAT_PCM_16_BIT,
+ int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ AudioCallback cb = NULL,
+ void *cookie = NULL,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ const audio_offload_info_t *offloadInfo = NULL,
+ bool doNotReconnect = false,
+ uint32_t suggestedFrameCount = 0) = 0;
+
+ virtual status_t start() = 0;
+
+ /* Input parameter |size| is in byte units stored in |buffer|.
+ * Data is copied over and actual number of bytes written (>= 0)
+ * is returned, or no data is copied and a negative status code
+ * is returned (even when |blocking| is true).
+ * When |blocking| is false, AudioSink will immediately return after
+ * part of or full |buffer| is copied over.
+ * When |blocking| is true, AudioSink will wait to copy the entire
+ * buffer, unless an error occurs or the copy operation is
+ * prematurely stopped.
+ */
+ virtual ssize_t write(const void* buffer, size_t size, bool blocking = true) = 0;
+
+ virtual void stop() = 0;
+ virtual void flush() = 0;
+ virtual void pause() = 0;
+ virtual void close() = 0;
+
+ virtual status_t setPlaybackRate(const AudioPlaybackRate& rate) = 0;
+ virtual status_t getPlaybackRate(AudioPlaybackRate* rate /* nonnull */) = 0;
+ virtual bool needsTrailingPadding() { return true; }
+
+ virtual status_t setParameters(const String8& /* keyValuePairs */) { return NO_ERROR; }
+ virtual String8 getParameters(const String8& /* keys */) { return String8::empty(); }
+
+ virtual VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation);
+ virtual sp<VolumeShaper::State> getVolumeShaperState(int id);
+ };
+
+ MediaPlayerBase() : mCookie(0), mNotify(0) {}
+ virtual ~MediaPlayerBase() {}
+ virtual status_t initCheck() = 0;
+ virtual bool hardwareOutput() = 0;
+
+ virtual status_t setUID(uid_t /* uid */) {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t setDataSource(
+ const sp<IMediaHTTPService> &httpService,
+ const char *url,
+ const KeyedVector<String8, String8> *headers = NULL) = 0;
+
+ virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
+
+ virtual status_t setDataSource(const sp<IStreamSource>& /* source */) {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t setDataSource(const sp<DataSource>& /* source */) {
+ return INVALID_OPERATION;
+ }
+
+ // pass the buffered IGraphicBufferProducer to the media player service
+ virtual status_t setVideoSurfaceTexture(
+ const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ *buffering = BufferingSettings();
+ return OK;
+ }
+ virtual status_t setBufferingSettings(const BufferingSettings& /* buffering */) {
+ return OK;
+ }
+
+ virtual status_t prepare() = 0;
+ virtual status_t prepareAsync() = 0;
+ virtual status_t start() = 0;
+ virtual status_t stop() = 0;
+ virtual status_t pause() = 0;
+ virtual bool isPlaying() = 0;
+ virtual status_t setPlaybackSettings(const AudioPlaybackRate& rate) {
+ // by default, players only support setting rate to the default
+ if (!isAudioPlaybackRateEqual(rate, AUDIO_PLAYBACK_RATE_DEFAULT)) {
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ virtual status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) {
+ *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+ return OK;
+ }
+ virtual status_t setSyncSettings(const AVSyncSettings& sync, float /* videoFps */) {
+ // By default, players only support setting sync source to default; all other sync
+ // settings are ignored. There is no requirement for getters to return set values.
+ if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ virtual status_t getSyncSettings(
+ AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */) {
+ *sync = AVSyncSettings();
+ *videoFps = -1.f;
+ return OK;
+ }
+ virtual status_t seekTo(
+ int msec, MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) = 0;
+ virtual status_t getCurrentPosition(int *msec) = 0;
+ virtual status_t getDuration(int *msec) = 0;
+ virtual status_t reset() = 0;
+ virtual status_t setLooping(int loop) = 0;
+ virtual player_type playerType() = 0;
+ virtual status_t setParameter(int key, const Parcel &request) = 0;
+ virtual status_t getParameter(int key, Parcel *reply) = 0;
+
+ // default no-op implementation of optional extensions
+ virtual status_t setRetransmitEndpoint(const struct sockaddr_in* /* endpoint */) {
+ return INVALID_OPERATION;
+ }
+ virtual status_t getRetransmitEndpoint(struct sockaddr_in* /* endpoint */) {
+ return INVALID_OPERATION;
+ }
+ virtual status_t setNextPlayer(const sp<MediaPlayerBase>& /* next */) {
+ return OK;
+ }
+
+ // Invoke a generic method on the player by using opaque parcels
+ // for the request and reply.
+ //
+ // @param request Parcel that is positioned at the start of the
+ // data sent by the java layer.
+ // @param[out] reply Parcel to hold the reply data. Cannot be null.
+ // @return OK if the call was successful.
+ virtual status_t invoke(const Parcel& request, Parcel *reply) = 0;
+
+ // The Client in the MetadataPlayerService calls this method on
+ // the native player to retrieve all or a subset of metadata.
+ //
+ // @param ids SortedList of metadata ID to be fetch. If empty, all
+ // the known metadata should be returned.
+ // @param[inout] records Parcel where the player appends its metadata.
+ // @return OK if the call was successful.
+ virtual status_t getMetadata(const media::Metadata::Filter& /* ids */,
+ Parcel* /* records */) {
+ return INVALID_OPERATION;
+ };
+
+ void setNotifyCallback(
+ void* cookie, notify_callback_f notifyFunc) {
+ Mutex::Autolock autoLock(mNotifyLock);
+ mCookie = cookie; mNotify = notifyFunc;
+ }
+
+ void sendEvent(int msg, int ext1=0, int ext2=0,
+ const Parcel *obj=NULL) {
+ notify_callback_f notifyCB;
+ void* cookie;
+ {
+ Mutex::Autolock autoLock(mNotifyLock);
+ notifyCB = mNotify;
+ cookie = mCookie;
+ }
+
+ if (notifyCB) notifyCB(cookie, msg, ext1, ext2, obj);
+ }
+
+ virtual status_t dump(int /* fd */, const Vector<String16>& /* args */) const {
+ return INVALID_OPERATION;
+ }
+
+ // Modular DRM
+ virtual status_t prepareDrm(const uint8_t /* uuid */[16], const Vector<uint8_t>& /* drmSessionId */) {
+ return INVALID_OPERATION;
+ }
+ virtual status_t releaseDrm() {
+ return INVALID_OPERATION;
+ }
+
+private:
+ friend class MediaPlayerService;
+
+ Mutex mNotifyLock;
+ void* mCookie;
+ notify_callback_f mNotify;
+};
+
+// Implement this class for media players that use the AudioFlinger software mixer
+class MediaPlayerInterface : public MediaPlayerBase
+{
+public:
+ virtual ~MediaPlayerInterface() { }
+ virtual bool hardwareOutput() { return false; }
+ virtual void setAudioSink(const sp<AudioSink>& audioSink) { mAudioSink = audioSink; }
+protected:
+ sp<AudioSink> mAudioSink;
+};
+
+// Implement this class for media players that output audio directly to hardware
+class MediaPlayerHWInterface : public MediaPlayerBase
+{
+public:
+ virtual ~MediaPlayerHWInterface() {}
+ virtual bool hardwareOutput() { return true; }
+ virtual status_t setVolume(float leftVolume, float rightVolume) = 0;
+ virtual status_t setAudioStreamType(audio_stream_type_t streamType) = 0;
+};
+
+}; // namespace android
+
+#endif // __cplusplus
+
+
+#endif // ANDROID_MEDIAPLAYERINTERFACE_H
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index 21487ca..c582631 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -10,6 +10,7 @@
NuPlayerDecoderBase.cpp \
NuPlayerDecoderPassThrough.cpp \
NuPlayerDriver.cpp \
+ NuPlayerDrm.cpp \
NuPlayerRenderer.cpp \
NuPlayerStreamListener.cpp \
RTSPSource.cpp \
@@ -32,11 +33,18 @@
LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
endif
-LOCAL_SHARED_LIBRARIES := libmedia
+LOCAL_SHARED_LIBRARIES := \
+ libbinder \
+ libui \
+ libgui \
+ libmedia \
+ libmediadrm \
LOCAL_MODULE:= libstagefright_nuplayer
LOCAL_MODULE_TAGS := eng
-include $(BUILD_STATIC_LIBRARY)
+LOCAL_SANITIZE := cfi
+LOCAL_SANITIZE_DIAG := cfi
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 57a7286..d83c406 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -18,9 +18,12 @@
#define LOG_TAG "GenericSource"
#include "GenericSource.h"
+#include "NuPlayerDrm.h"
#include "AnotherPacketSource.h"
-
+#include <binder/IServiceManager.h>
+#include <cutils/properties.h>
+#include <media/IMediaExtractorService.h>
#include <media/IMediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -33,18 +36,17 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
-#include "../../libstagefright/include/DRMExtractor.h"
#include "../../libstagefright/include/NuCachedSource2.h"
-#include "../../libstagefright/include/WVMExtractor.h"
#include "../../libstagefright/include/HTTPBase.h"
namespace android {
-static int64_t kLowWaterMarkUs = 2000000ll; // 2secs
-static int64_t kHighWaterMarkUs = 5000000ll; // 5secs
-static int64_t kHighWaterMarkRebufferUs = 15000000ll; // 15secs
-static const ssize_t kLowWaterMarkBytes = 40000;
-static const ssize_t kHighWaterMarkBytes = 200000;
+static const int kLowWaterMarkMs = 2000; // 2secs
+static const int kHighWaterMarkMs = 5000; // 5secs
+static const int kHighWaterMarkRebufferMs = 15000; // 15secs
+
+static const int kLowWaterMarkKB = 40;
+static const int kHighWaterMarkKB = 200;
NuPlayer::GenericSource::GenericSource(
const sp<AMessage> ¬ify,
@@ -59,21 +61,22 @@
mFetchTimedTextDataGeneration(0),
mDurationUs(-1ll),
mAudioIsVorbis(false),
- mIsWidevine(false),
mIsSecure(false),
mIsStreaming(false),
mUIDValid(uidValid),
mUID(uid),
mFd(-1),
- mDrmManagerClient(NULL),
mBitrate(-1ll),
mPendingReadBufferTypes(0) {
+ ALOGV("GenericSource");
+
mBufferingMonitor = new BufferingMonitor(notify);
resetDataSource();
- DataSource::RegisterDefaultSniffers();
}
void NuPlayer::GenericSource::resetDataSource() {
+ ALOGV("resetDataSource");
+
mHTTPService.clear();
mHttpSource.clear();
mUri.clear();
@@ -84,9 +87,6 @@
}
mOffset = 0;
mLength = 0;
- setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
- mDecryptHandle = NULL;
- mDrmManagerClient = NULL;
mStarted = false;
mStopRead = true;
@@ -96,12 +96,19 @@
mBufferingMonitorLooper = NULL;
}
mBufferingMonitor->stop();
+
+ mIsDrmProtected = false;
+ mIsDrmReleased = false;
+ mIsSecure = false;
+ mMimes.clear();
}
status_t NuPlayer::GenericSource::setDataSource(
const sp<IMediaHTTPService> &httpService,
const char *url,
const KeyedVector<String8, String8> *headers) {
+ ALOGV("setDataSource url: %s", url);
+
resetDataSource();
mHTTPService = httpService;
@@ -118,6 +125,8 @@
status_t NuPlayer::GenericSource::setDataSource(
int fd, int64_t offset, int64_t length) {
+ ALOGV("setDataSource %d/%lld/%lld", fd, (long long)offset, (long long)length);
+
resetDataSource();
mFd = dup(fd);
@@ -130,6 +139,8 @@
}
status_t NuPlayer::GenericSource::setDataSource(const sp<DataSource>& source) {
+ ALOGV("setDataSource (source: %p)", source.get());
+
resetDataSource();
mDataSource = source;
return OK;
@@ -141,79 +152,33 @@
status_t NuPlayer::GenericSource::initFromDataSource() {
sp<IMediaExtractor> extractor;
- String8 mimeType;
- float confidence;
- sp<AMessage> dummy;
- bool isWidevineStreaming = false;
-
CHECK(mDataSource != NULL);
- if (mIsWidevine) {
- isWidevineStreaming = SniffWVM(
- mDataSource, &mimeType, &confidence, &dummy);
- if (!isWidevineStreaming ||
- strcasecmp(
- mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
- ALOGE("unsupported widevine mime: %s", mimeType.string());
- return UNKNOWN_ERROR;
- }
- } else if (mIsStreaming) {
- if (!mDataSource->sniff(&mimeType, &confidence, &dummy)) {
- return UNKNOWN_ERROR;
- }
- isWidevineStreaming = !strcasecmp(
- mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM);
- }
-
- if (isWidevineStreaming) {
- // we don't want cached source for widevine streaming.
- mCachedSource.clear();
- mDataSource = mHttpSource;
- mWVMExtractor = new WVMExtractor(mDataSource);
- mWVMExtractor->setAdaptiveStreamingMode(true);
- if (mUIDValid) {
- mWVMExtractor->setUID(mUID);
- }
- extractor = mWVMExtractor;
- } else {
- extractor = MediaExtractor::Create(mDataSource,
- mimeType.isEmpty() ? NULL : mimeType.string());
- }
+ extractor = MediaExtractor::Create(mDataSource, NULL);
if (extractor == NULL) {
+ ALOGE("initFromDataSource, cannot create extractor!");
return UNKNOWN_ERROR;
}
- if (extractor->getDrmFlag()) {
- checkDrmStatus(mDataSource);
- }
-
mFileMeta = extractor->getMetaData();
if (mFileMeta != NULL) {
int64_t duration;
if (mFileMeta->findInt64(kKeyDuration, &duration)) {
mDurationUs = duration;
}
-
- if (!mIsWidevine) {
- // Check mime to see if we actually have a widevine source.
- // If the data source is not URL-type (eg. file source), we
- // won't be able to tell until now.
- const char *fileMime;
- if (mFileMeta->findCString(kKeyMIMEType, &fileMime)
- && !strncasecmp(fileMime, "video/wvm", 9)) {
- mIsWidevine = true;
- }
- }
}
int32_t totalBitrate = 0;
size_t numtracks = extractor->countTracks();
if (numtracks == 0) {
+ ALOGE("initFromDataSource, source has no track!");
return UNKNOWN_ERROR;
}
+ mMimes.clear();
+
for (size_t i = 0; i < numtracks; ++i) {
sp<IMediaSource> track = extractor->getTrack(i);
if (track == NULL) {
@@ -229,6 +194,8 @@
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
+ ALOGV("initFromDataSource track[%zu]: %s", i, mime);
+
// Do the string compare immediately with "mime",
// we can't assume "mime" would stay valid after another
// extractor operation, some extractors might modify meta
@@ -245,6 +212,8 @@
} else {
mAudioIsVorbis = false;
}
+
+ mMimes.add(String8(mime));
}
} else if (!strncasecmp(mime, "video/", 6)) {
if (mVideoTrack.mSource == NULL) {
@@ -253,15 +222,8 @@
mVideoTrack.mPackets =
new AnotherPacketSource(mVideoTrack.mSource->getFormat());
- // check if the source requires secure buffers
- int32_t secure;
- if (meta->findInt32(kKeyRequiresSecureBuffers, &secure)
- && secure) {
- mIsSecure = true;
- if (mUIDValid) {
- extractor->setUID(mUID);
- }
- }
+ // video always at the beginning
+ mMimes.insertAt(String8(mime), 0);
}
}
@@ -281,21 +243,40 @@
}
}
+ ALOGV("initFromDataSource mSources.size(): %zu mIsSecure: %d mime[0]: %s", mSources.size(),
+ mIsSecure, (mMimes.isEmpty() ? "NONE" : mMimes[0].string()));
+
if (mSources.size() == 0) {
ALOGE("b/23705695");
return UNKNOWN_ERROR;
}
+ // Modular DRM: The return value doesn't affect source initialization.
+ (void)checkDrmInfo();
+
mBitrate = totalBitrate;
return OK;
}
+status_t NuPlayer::GenericSource::getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ mBufferingMonitor->getDefaultBufferingSettings(buffering);
+ return OK;
+}
+
+status_t NuPlayer::GenericSource::setBufferingSettings(const BufferingSettings& buffering) {
+ return mBufferingMonitor->setBufferingSettings(buffering);
+}
+
status_t NuPlayer::GenericSource::startSources() {
// Start the selected A/V tracks now before we start buffering.
// Widevine sources might re-initialize crypto when starting, if we delay
// this to start(), all data buffered during prepare would be wasted.
// (We don't actually start reading until start().)
+ //
+ // TODO: this logic may no longer be relevant after the removal of widevine
+ // support
if (mAudioTrack.mSource != NULL && mAudioTrack.mSource->start() != OK) {
ALOGE("failed to start audio track!");
return UNKNOWN_ERROR;
@@ -309,18 +290,6 @@
return OK;
}
-void NuPlayer::GenericSource::checkDrmStatus(const sp<DataSource>& dataSource) {
- dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
- if (mDecryptHandle != NULL) {
- CHECK(mDrmManagerClient);
- if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
- sp<AMessage> msg = dupNotify();
- msg->setInt32("what", kWhatDrmNoLicense);
- msg->post();
- }
- }
-}
-
int64_t NuPlayer::GenericSource::getLastReadPosition() {
if (mAudioTrack.mSource != NULL) {
return mAudioTimeUs;
@@ -348,6 +317,7 @@
}
NuPlayer::GenericSource::~GenericSource() {
+ ALOGV("~GenericSource");
if (mLooper != NULL) {
mLooper->unregisterHandler(id());
mLooper->stop();
@@ -356,6 +326,8 @@
}
void NuPlayer::GenericSource::prepareAsync() {
+ ALOGV("prepareAsync: (looper: %d)", (mLooper != NULL));
+
if (mLooper == NULL) {
mLooper = new ALooper;
mLooper->setName("generic");
@@ -369,6 +341,8 @@
}
void NuPlayer::GenericSource::onPrepareAsync() {
+ ALOGV("onPrepareAsync: mDataSource: %d", (mDataSource != NULL));
+
// delayed data source creation
if (mDataSource == NULL) {
// set to false first, if the extractor
@@ -378,11 +352,8 @@
if (!mUri.empty()) {
const char* uri = mUri.c_str();
String8 contentType;
- mIsWidevine = !strncasecmp(uri, "widevine://", 11);
- if (!strncasecmp("http://", uri, 7)
- || !strncasecmp("https://", uri, 8)
- || mIsWidevine) {
+ if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
mHttpSource = DataSource::CreateMediaHTTP(mHTTPService);
if (mHttpSource == NULL) {
ALOGE("Failed to create http source!");
@@ -395,9 +366,39 @@
mHTTPService, uri, &mUriHeaders, &contentType,
static_cast<HTTPBase *>(mHttpSource.get()));
} else {
- mIsWidevine = false;
-
- mDataSource = new FileSource(mFd, mOffset, mLength);
+ if (property_get_bool("media.stagefright.extractremote", true) &&
+ !FileSource::requiresDrm(mFd, mOffset, mLength, nullptr /* mime */)) {
+ sp<IBinder> binder =
+ defaultServiceManager()->getService(String16("media.extractor"));
+ if (binder != nullptr) {
+ ALOGD("FileSource remote");
+ sp<IMediaExtractorService> mediaExService(
+ interface_cast<IMediaExtractorService>(binder));
+ sp<IDataSource> source =
+ mediaExService->makeIDataSource(mFd, mOffset, mLength);
+ ALOGV("IDataSource(FileSource): %p %d %lld %lld",
+ source.get(), mFd, (long long)mOffset, (long long)mLength);
+ if (source.get() != nullptr) {
+ mDataSource = DataSource::CreateFromIDataSource(source);
+ if (mDataSource != nullptr) {
+ // Close the local file descriptor as it is not needed anymore.
+ close(mFd);
+ mFd = -1;
+ }
+ } else {
+ ALOGW("extractor service cannot make data source");
+ }
+ } else {
+ ALOGW("extractor service not running");
+ }
+ }
+ if (mDataSource == nullptr) {
+ ALOGD("FileSource local");
+ mDataSource = new FileSource(mFd, mOffset, mLength);
+ }
+ // TODO: close should always be done on mFd, see the lines following
+ // DataSource::CreateFromIDataSource above,
+ // and the FileSource constructor should dup the mFd argument as needed.
mFd = -1;
}
@@ -412,13 +413,9 @@
mCachedSource = static_cast<NuCachedSource2 *>(mDataSource.get());
}
- // For widevine or other cached streaming cases, we need to wait for
- // enough buffering before reporting prepared.
- // Note that even when URL doesn't start with widevine://, mIsWidevine
- // could still be set to true later, if the streaming or file source
- // is sniffed to be widevine. We don't want to buffer for file source
- // in that case, so must check the flag now.
- mIsStreaming = (mIsWidevine || mCachedSource != NULL);
+ // For cached streaming cases, we need to wait for enough
+ // buffering before reporting prepared.
+ mIsStreaming = (mCachedSource != NULL);
// init extractor from data source
status_t err = initFromDataSource();
@@ -441,32 +438,21 @@
}
notifyFlagsChanged(
- (mIsSecure ? FLAG_SECURE : 0)
- | (mDecryptHandle != NULL ? FLAG_PROTECTED : 0)
- | FLAG_CAN_PAUSE
- | FLAG_CAN_SEEK_BACKWARD
- | FLAG_CAN_SEEK_FORWARD
- | FLAG_CAN_SEEK);
+ // FLAG_SECURE will be known if/when prepareDrm is called by the app
+ // FLAG_PROTECTED will be known if/when prepareDrm is called by the app
+ FLAG_CAN_PAUSE |
+ FLAG_CAN_SEEK_BACKWARD |
+ FLAG_CAN_SEEK_FORWARD |
+ FLAG_CAN_SEEK);
- if (mIsSecure) {
- // secure decoders must be instantiated before starting widevine source
- sp<AMessage> reply = new AMessage(kWhatSecureDecodersInstantiated, this);
- notifyInstantiateSecureDecoders(reply);
- } else {
- finishPrepareAsync();
- }
-}
-
-void NuPlayer::GenericSource::onSecureDecodersInstantiated(status_t err) {
- if (err != OK) {
- ALOGE("Failed to instantiate secure decoders!");
- notifyPreparedAndCleanup(err);
- return;
- }
finishPrepareAsync();
+
+ ALOGV("onPrepareAsync: Done");
}
void NuPlayer::GenericSource::finishPrepareAsync() {
+ ALOGV("finishPrepareAsync");
+
status_t err = startSources();
if (err != OK) {
ALOGE("Failed to init start data source!");
@@ -476,7 +462,7 @@
if (mIsStreaming) {
if (mBufferingMonitorLooper == NULL) {
- mBufferingMonitor->prepare(mCachedSource, mWVMExtractor, mDurationUs, mBitrate,
+ mBufferingMonitor->prepare(mCachedSource, mDurationUs, mBitrate,
mIsStreaming);
mBufferingMonitorLooper = new ALooper;
@@ -501,8 +487,6 @@
{
Mutex::Autolock _l(mDisconnectLock);
mDataSource.clear();
- mDecryptHandle = NULL;
- mDrmManagerClient = NULL;
mCachedSource.clear();
mHttpSource.clear();
}
@@ -526,33 +510,20 @@
postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
}
- setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
mStarted = true;
(new AMessage(kWhatStart, this))->post();
}
void NuPlayer::GenericSource::stop() {
- // nothing to do, just account for DRM playback status
- setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
mStarted = false;
- if (mIsWidevine || mIsSecure) {
- // For widevine or secure sources we need to prevent any further reads.
- sp<AMessage> msg = new AMessage(kWhatStopWidevine, this);
- sp<AMessage> response;
- (void) msg->postAndAwaitResponse(&response);
- }
}
void NuPlayer::GenericSource::pause() {
- // nothing to do, just account for DRM playback status
- setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
mStarted = false;
}
void NuPlayer::GenericSource::resume() {
- // nothing to do, just account for DRM playback status
- setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
mStarted = true;
(new AMessage(kWhatResume, this))->post();
@@ -576,14 +547,6 @@
}
}
-void NuPlayer::GenericSource::setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position) {
- if (mDecryptHandle != NULL) {
- mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
- }
- mSubtitleTrack.mPackets = new AnotherPacketSource(NULL);
- mTimedTextTrack.mPackets = new AnotherPacketSource(NULL);
-}
-
status_t NuPlayer::GenericSource::feedMoreTSData() {
return OK;
}
@@ -665,8 +628,10 @@
} else {
timeUs = mVideoLastDequeueTimeUs;
}
- readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
- readBuffer(counterpartType, -1, NULL, formatChange);
+ readBuffer(trackType, timeUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ &actualTimeUs, formatChange);
+ readBuffer(counterpartType, -1, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ NULL, !formatChange);
ALOGV("timeUs %lld actualTimeUs %lld", (long long)timeUs, (long long)actualTimeUs);
break;
@@ -715,28 +680,28 @@
break;
}
- case kWhatSecureDecodersInstantiated:
+ case kWhatPrepareDrm:
{
- int32_t err;
- CHECK(msg->findInt32("err", &err));
- onSecureDecodersInstantiated(err);
- break;
- }
-
- case kWhatStopWidevine:
- {
- // mStopRead is only used for Widevine to prevent the video source
- // from being read while the associated video decoder is shutting down.
- mStopRead = true;
- if (mVideoTrack.mSource != NULL) {
- mVideoTrack.mPackets->clear();
- }
+ status_t status = onPrepareDrm(msg);
sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
response->postReply(replyID);
break;
}
+
+ case kWhatReleaseDrm:
+ {
+ status_t status = onReleaseDrm();
+ sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
+
default:
Source::onMessageReceived(msg);
break;
@@ -765,7 +730,7 @@
CHECK(msg->findInt64("timeUs", &timeUs));
int64_t subTimeUs;
- readBuffer(type, timeUs, &subTimeUs);
+ readBuffer(type, timeUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */, &subTimeUs);
int64_t delayUs = subTimeUs - timeUs;
if (msg->what() == kWhatFetchSubtitleData) {
@@ -796,7 +761,7 @@
}
int64_t nextSubTimeUs;
- readBuffer(type, -1, &nextSubTimeUs);
+ readBuffer(type, -1, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */, &nextSubTimeUs);
sp<ABuffer> buffer;
status_t dequeueStatus = packets->dequeueAccessUnit(&buffer);
@@ -886,17 +851,19 @@
return -EWOULDBLOCK;
}
+ // If has gone through stop/releaseDrm sequence, we no longer send down any buffer b/c
+ // the codec's crypto object has gone away (b/37960096).
+ // Note: This will be unnecessary when stop() changes behavior and releases codec (b/35248283).
+ if (!mStarted && mIsDrmReleased) {
+ return -EWOULDBLOCK;
+ }
+
Track *track = audio ? &mAudioTrack : &mVideoTrack;
if (track->mSource == NULL) {
return -EWOULDBLOCK;
}
- if (mIsWidevine && !audio) {
- // try to read a buffer as we may not have been able to the last time
- postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
- }
-
status_t finalResult;
if (!track->mPackets->hasBufferAvailable(&finalResult)) {
if (finalResult == OK) {
@@ -1220,9 +1187,10 @@
return INVALID_OPERATION;
}
-status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
+status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("seekTimeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
@@ -1235,10 +1203,12 @@
void NuPlayer::GenericSource::onSeek(const sp<AMessage>& msg) {
int64_t seekTimeUs;
+ int32_t mode;
CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
sp<AMessage> response = new AMessage;
- status_t err = doSeek(seekTimeUs);
+ status_t err = doSeek(seekTimeUs, (MediaPlayerSeekMode)mode);
response->setInt32("err", err);
sp<AReplyToken> replyID;
@@ -1246,20 +1216,25 @@
response->postReply(replyID);
}
-status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs) {
+status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
mBufferingMonitor->updateDequeuedBufferTime(-1ll);
// If the Widevine source is stopped, do not attempt to read any
// more buffers.
+ //
+ // TODO: revisit after widevine is removed. May be able to
+ // combine mStopRead with mStarted.
if (mStopRead) {
return INVALID_OPERATION;
}
if (mVideoTrack.mSource != NULL) {
int64_t actualTimeUs;
- readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, &actualTimeUs);
+ readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);
- seekTimeUs = actualTimeUs;
- mVideoLastDequeueTimeUs = seekTimeUs;
+ if (mode != MediaPlayerSeekMode::SEEK_CLOSEST) {
+ seekTimeUs = actualTimeUs;
+ }
+ mVideoLastDequeueTimeUs = actualTimeUs;
}
if (mAudioTrack.mSource != NULL) {
@@ -1267,11 +1242,6 @@
mAudioLastDequeueTimeUs = seekTimeUs;
}
- setDrmPlaybackStatusIfNeeded(Playback::START, seekTimeUs / 1000);
- if (!mStarted) {
- setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
- }
-
// If currently buffering, post kWhatBufferingEnd first, so that
// NuPlayer resumes. Otherwise, if cache hits high watermark
// before new polling happens, no one will resume the playback.
@@ -1283,9 +1253,7 @@
sp<ABuffer> NuPlayer::GenericSource::mediaBufferToABuffer(
MediaBuffer* mb,
- media_track_type trackType,
- int64_t /* seekTimeUs */,
- int64_t *actualTimeUs) {
+ media_track_type trackType) {
bool audio = trackType == MEDIA_TRACK_TYPE_AUDIO;
size_t outLength = mb->range_length();
@@ -1294,11 +1262,26 @@
}
sp<ABuffer> ab;
- if (mIsSecure && !audio) {
+
+ if (mIsDrmProtected) {
+ // Modular DRM
+ // Enabled for both video/audio so 1) media buffer is reused without extra copying
+ // 2) meta data can be retrieved in onInputBufferFetched for calling queueSecureInputBuffer.
+
// data is already provided in the buffer
ab = new ABuffer(NULL, mb->range_length());
mb->add_ref();
ab->setMediaBufferBase(mb);
+
+ // Modular DRM: Required b/c of the above add_ref.
+ // If ref>0, there must be an observer, or it'll crash at release().
+ // TODO: MediaBuffer might need to be revised to ease such need.
+ mb->setObserver(this);
+ // setMediaBufferBase() interestingly doesn't increment the ref count on its own.
+ // Extra increment (since we want to keep mb alive and attached to ab beyond this function
+ // call. This is to counter the effect of mb->release() towards the end.
+ mb->add_ref();
+
} else {
ab = new ABuffer(outLength);
memcpy(ab->data(),
@@ -1322,16 +1305,6 @@
CHECK(mb->meta_data()->findInt64(kKeyTime, &timeUs));
meta->setInt64("timeUs", timeUs);
-#if 0
- // Temporarily disable pre-roll till we have a full solution to handle
- // both single seek and continous seek gracefully.
- if (seekTimeUs > timeUs) {
- sp<AMessage> extra = new AMessage;
- extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
- meta->setMessage("extra", extra);
- }
-#endif
-
if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
int32_t layerId;
if (mb->meta_data()->findInt32(kKeyTemporalLayerId, &layerId)) {
@@ -1371,10 +1344,6 @@
meta->setBuffer("mpegUserData", mpegUserData);
}
- if (actualTimeUs) {
- *actualTimeUs = timeUs;
- }
-
mb->release();
mb = NULL;
@@ -1406,8 +1375,12 @@
}
void NuPlayer::GenericSource::readBuffer(
- media_track_type trackType, int64_t seekTimeUs, int64_t *actualTimeUs, bool formatChange) {
+ media_track_type trackType, int64_t seekTimeUs, MediaPlayerSeekMode mode,
+ int64_t *actualTimeUs, bool formatChange) {
// Do not read data if Widevine source is stopped
+ //
+ // TODO: revisit after widevine is removed. May be able to
+ // combine mStopRead with mStarted.
if (mStopRead) {
return;
}
@@ -1416,19 +1389,11 @@
switch (trackType) {
case MEDIA_TRACK_TYPE_VIDEO:
track = &mVideoTrack;
- if (mIsWidevine) {
- maxBuffers = 2;
- } else {
- maxBuffers = 8; // too large of a number may influence seeks
- }
+ maxBuffers = 8; // too large of a number may influence seeks
break;
case MEDIA_TRACK_TYPE_AUDIO:
track = &mAudioTrack;
- if (mIsWidevine) {
- maxBuffers = 8;
- } else {
- maxBuffers = 64;
- }
+ maxBuffers = 64;
break;
case MEDIA_TRACK_TYPE_SUBTITLE:
track = &mSubtitleTrack;
@@ -1452,13 +1417,13 @@
bool seeking = false;
if (seekTimeUs >= 0) {
- options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+ options.setSeekTo(seekTimeUs, mode);
seeking = true;
}
- const bool couldReadMultiple = (!mIsWidevine && track->mSource->supportReadMultiple());
+ const bool couldReadMultiple = (track->mSource->supportReadMultiple());
- if (mIsWidevine || couldReadMultiple) {
+ if (couldReadMultiple) {
options.setNonBlocking();
}
@@ -1499,9 +1464,20 @@
queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
- sp<ABuffer> buffer = mediaBufferToABuffer(
- mbuf, trackType, seekTimeUs,
- numBuffers == 0 ? actualTimeUs : NULL);
+ sp<ABuffer> buffer = mediaBufferToABuffer(mbuf, trackType);
+ if (numBuffers == 0 && actualTimeUs != nullptr) {
+ *actualTimeUs = timeUs;
+ }
+ if (seeking && buffer != nullptr) {
+ sp<AMessage> meta = buffer->meta();
+ if (meta != nullptr && mode == MediaPlayerSeekMode::SEEK_CLOSEST
+ && seekTimeUs > timeUs) {
+ sp<AMessage> extra = new AMessage;
+ extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
+ meta->setMessage("extra", extra);
+ }
+ }
+
track->mPackets->queueAccessUnit(buffer);
formatChange = false;
seeking = false;
@@ -1562,24 +1538,66 @@
mFirstDequeuedBufferRealUs(-1ll),
mFirstDequeuedBufferMediaUs(-1ll),
mlastDequeuedBufferMediaUs(-1ll) {
+ getDefaultBufferingSettings(&mSettings);
}
NuPlayer::GenericSource::BufferingMonitor::~BufferingMonitor() {
}
+void NuPlayer::GenericSource::BufferingMonitor::getDefaultBufferingSettings(
+ BufferingSettings *buffering /* nonnull */) {
+ buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+ buffering->mRebufferingMode = BUFFERING_MODE_TIME_THEN_SIZE;
+ buffering->mInitialWatermarkMs = kHighWaterMarkMs;
+ buffering->mRebufferingWatermarkLowMs = kLowWaterMarkMs;
+ buffering->mRebufferingWatermarkHighMs = kHighWaterMarkRebufferMs;
+ buffering->mRebufferingWatermarkLowKB = kLowWaterMarkKB;
+ buffering->mRebufferingWatermarkHighKB = kHighWaterMarkKB;
+
+ ALOGV("BufferingMonitor::getDefaultBufferingSettings{%s}",
+ buffering->toString().string());
+}
+
+status_t NuPlayer::GenericSource::BufferingMonitor::setBufferingSettings(
+ const BufferingSettings &buffering) {
+ ALOGV("BufferingMonitor::setBufferingSettings{%s}",
+ buffering.toString().string());
+
+ Mutex::Autolock _l(mLock);
+ if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+ || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+ && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)
+ || (buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+ && buffering.mRebufferingWatermarkLowKB > buffering.mRebufferingWatermarkHighKB)) {
+ return BAD_VALUE;
+ }
+ mSettings = buffering;
+ if (mSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+ mSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+ }
+ if (!mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
+ mSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+ mSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+ }
+ if (!mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
+ mSettings.mRebufferingWatermarkLowKB = BufferingSettings::kNoWatermark;
+ mSettings.mRebufferingWatermarkHighKB = INT32_MAX;
+ }
+ return OK;
+}
+
void NuPlayer::GenericSource::BufferingMonitor::prepare(
const sp<NuCachedSource2> &cachedSource,
- const sp<WVMExtractor> &wvmExtractor,
int64_t durationUs,
int64_t bitrate,
bool isStreaming) {
Mutex::Autolock _l(mLock);
- prepare_l(cachedSource, wvmExtractor, durationUs, bitrate, isStreaming);
+ prepare_l(cachedSource, durationUs, bitrate, isStreaming);
}
void NuPlayer::GenericSource::BufferingMonitor::stop() {
Mutex::Autolock _l(mLock);
- prepare_l(NULL /* cachedSource */, NULL /* wvmExtractor */, -1 /* durationUs */,
+ prepare_l(NULL /* cachedSource */, -1 /* durationUs */,
-1 /* bitrate */, false /* isStreaming */);
}
@@ -1634,22 +1652,17 @@
void NuPlayer::GenericSource::BufferingMonitor::prepare_l(
const sp<NuCachedSource2> &cachedSource,
- const sp<WVMExtractor> &wvmExtractor,
int64_t durationUs,
int64_t bitrate,
bool isStreaming) {
- ALOGW_IF(wvmExtractor != NULL && cachedSource != NULL,
- "WVMExtractor and NuCachedSource are both present when "
- "BufferingMonitor::prepare_l is called, ignore NuCachedSource");
mCachedSource = cachedSource;
- mWVMExtractor = wvmExtractor;
mDurationUs = durationUs;
mBitrate = bitrate;
mIsStreaming = isStreaming;
mAudioTimeUs = 0;
mVideoTimeUs = 0;
- mPrepareBuffering = (cachedSource != NULL || wvmExtractor != NULL);
+ mPrepareBuffering = (cachedSource != NULL);
cancelPollBuffering_l();
mOffloadAudio = false;
mFirstDequeuedBufferRealUs = -1ll;
@@ -1733,9 +1746,7 @@
int32_t kbps = 0;
status_t err = UNKNOWN_ERROR;
- if (mWVMExtractor != NULL) {
- err = mWVMExtractor->getEstimatedBandwidthKbps(&kbps);
- } else if (mCachedSource != NULL) {
+ if (mCachedSource != NULL) {
err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
}
@@ -1775,10 +1786,7 @@
int64_t cachedDurationUs = -1ll;
ssize_t cachedDataRemaining = -1;
- if (mWVMExtractor != NULL) {
- cachedDurationUs =
- mWVMExtractor->getCachedDurationUs(&finalStatus);
- } else if (mCachedSource != NULL) {
+ if (mCachedSource != NULL) {
cachedDataRemaining =
mCachedSource->approxDataRemaining(&finalStatus);
@@ -1806,7 +1814,9 @@
stopBufferingIfNecessary_l();
return;
- } else if (cachedDurationUs >= 0ll) {
+ }
+
+ if (cachedDurationUs >= 0ll) {
if (mDurationUs > 0ll) {
int64_t cachedPosUs = getLastReadPosition_l() + cachedDurationUs;
int percentage = 100.0 * cachedPosUs / mDurationUs;
@@ -1817,36 +1827,40 @@
notifyBufferingUpdate_l(percentage);
}
- ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec",
- cachedDurationUs / 1000000.0f);
+ ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
- if (cachedDurationUs < kLowWaterMarkUs) {
- // Take into account the data cached in downstream components to try to avoid
- // unnecessary pause.
- if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
- int64_t downStreamCacheUs = mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
- - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
- if (downStreamCacheUs > 0) {
- cachedDurationUs += downStreamCacheUs;
+ if (mPrepareBuffering) {
+ if (cachedDurationUs > mSettings.mInitialWatermarkMs * 1000) {
+ stopBufferingIfNecessary_l();
+ }
+ } else if (mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
+ if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
+ // Take into account the data cached in downstream components to try to avoid
+ // unnecessary pause.
+ if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
+ int64_t downStreamCacheUs =
+ mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
+ - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
+ if (downStreamCacheUs > 0) {
+ cachedDurationUs += downStreamCacheUs;
+ }
}
- }
- if (cachedDurationUs < kLowWaterMarkUs) {
- startBufferingIfNecessary_l();
- }
- } else {
- int64_t highWaterMark = mPrepareBuffering ? kHighWaterMarkUs : kHighWaterMarkRebufferUs;
- if (cachedDurationUs > highWaterMark) {
+ if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
+ startBufferingIfNecessary_l();
+ }
+ } else if (cachedDurationUs > mSettings.mRebufferingWatermarkHighMs * 1000) {
stopBufferingIfNecessary_l();
}
}
- } else if (cachedDataRemaining >= 0) {
+ } else if (cachedDataRemaining >= 0
+ && mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
ALOGV("onPollBuffering_l: cachedDataRemaining %zd bytes",
cachedDataRemaining);
- if (cachedDataRemaining < kLowWaterMarkBytes) {
+ if (cachedDataRemaining < (mSettings.mRebufferingWatermarkLowKB << 10)) {
startBufferingIfNecessary_l();
- } else if (cachedDataRemaining > kHighWaterMarkBytes) {
+ } else if (cachedDataRemaining > (mSettings.mRebufferingWatermarkHighKB << 10)) {
stopBufferingIfNecessary_l();
}
}
@@ -1872,4 +1886,167 @@
}
}
+// Modular DRM
+status_t NuPlayer::GenericSource::prepareDrm(
+ const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto)
+{
+ ALOGV("prepareDrm");
+
+ sp<AMessage> msg = new AMessage(kWhatPrepareDrm, this);
+ // synchronous call so just passing the address but with local copies of "const" args
+ uint8_t UUID[16];
+ memcpy(UUID, uuid, sizeof(UUID));
+ Vector<uint8_t> sessionId = drmSessionId;
+ msg->setPointer("uuid", (void*)UUID);
+ msg->setPointer("drmSessionId", (void*)&sessionId);
+ msg->setPointer("crypto", (void*)crypto);
+
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+
+ if (status == OK && response != NULL) {
+ CHECK(response->findInt32("status", &status));
+ ALOGV_IF(status == OK, "prepareDrm: mCrypto: %p (%d)", crypto->get(),
+ (*crypto != NULL ? (*crypto)->getStrongCount() : 0));
+ ALOGD("prepareDrm ret: %d ", status);
+ } else {
+ ALOGE("prepareDrm err: %d", status);
+ }
+
+ return status;
+}
+
+status_t NuPlayer::GenericSource::releaseDrm()
+{
+ ALOGV("releaseDrm");
+
+ sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
+
+ // synchronous call to update the source states before the player proceedes with crypto cleanup
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+
+ if (status == OK && response != NULL) {
+ ALOGD("releaseDrm ret: OK ");
+ } else {
+ ALOGE("releaseDrm err: %d", status);
+ }
+
+ return status;
+}
+
+status_t NuPlayer::GenericSource::onPrepareDrm(const sp<AMessage> &msg)
+{
+ ALOGV("onPrepareDrm ");
+
+ mIsDrmProtected = false;
+ mIsDrmReleased = false;
+ mIsSecure = false;
+
+ uint8_t *uuid;
+ Vector<uint8_t> *drmSessionId;
+ sp<ICrypto> *outCrypto;
+ CHECK(msg->findPointer("uuid", (void**)&uuid));
+ CHECK(msg->findPointer("drmSessionId", (void**)&drmSessionId));
+ CHECK(msg->findPointer("crypto", (void**)&outCrypto));
+
+ status_t status = OK;
+ sp<ICrypto> crypto = NuPlayerDrm::createCryptoAndPlugin(uuid, *drmSessionId, status);
+ if (crypto == NULL) {
+ ALOGE("onPrepareDrm: createCrypto failed. status: %d", status);
+ return status;
+ }
+ ALOGV("onPrepareDrm: createCryptoAndPlugin succeeded for uuid: %s",
+ DrmUUID::toHexString(uuid).string());
+
+ *outCrypto = crypto;
+ // as long a there is an active crypto
+ mIsDrmProtected = true;
+
+ if (mMimes.size() == 0) {
+ status = UNKNOWN_ERROR;
+ ALOGE("onPrepareDrm: Unexpected. Must have at least one track. status: %d", status);
+ return status;
+ }
+
+ // first mime in this list is either the video track, or the first audio track
+ const char *mime = mMimes[0].string();
+ mIsSecure = crypto->requiresSecureDecoderComponent(mime);
+ ALOGV("onPrepareDrm: requiresSecureDecoderComponent mime: %s isSecure: %d",
+ mime, mIsSecure);
+
+ // Checking the member flags while in the looper to send out the notification.
+ // The legacy mDecryptHandle!=NULL check (for FLAG_PROTECTED) is equivalent to mIsDrmProtected.
+ notifyFlagsChanged(
+ (mIsSecure ? FLAG_SECURE : 0) |
+ // Setting "protected screen" only for L1: b/38390836
+ (mIsSecure ? FLAG_PROTECTED : 0) |
+ FLAG_CAN_PAUSE |
+ FLAG_CAN_SEEK_BACKWARD |
+ FLAG_CAN_SEEK_FORWARD |
+ FLAG_CAN_SEEK);
+
+ return status;
+}
+
+status_t NuPlayer::GenericSource::onReleaseDrm()
+{
+ if (mIsDrmProtected) {
+ mIsDrmProtected = false;
+ // to prevent returning any more buffer after stop/releaseDrm (b/37960096)
+ mIsDrmReleased = true;
+ ALOGV("onReleaseDrm: mIsDrmProtected is reset.");
+ } else {
+ ALOGE("onReleaseDrm: mIsDrmProtected is already false.");
+ }
+
+ return OK;
+}
+
+status_t NuPlayer::GenericSource::checkDrmInfo()
+{
+ // clearing the flag at prepare in case the player is reused after stop/releaseDrm with the
+ // same source without being reset (called by prepareAsync/initFromDataSource)
+ mIsDrmReleased = false;
+
+ if (mFileMeta == NULL) {
+ ALOGI("checkDrmInfo: No metadata");
+ return OK; // letting the caller responds accordingly
+ }
+
+ uint32_t type;
+ const void *pssh;
+ size_t psshsize;
+
+ if (!mFileMeta->findData(kKeyPssh, &type, &pssh, &psshsize)) {
+ ALOGV("checkDrmInfo: No PSSH");
+ return OK; // source without DRM info
+ }
+
+ Parcel parcel;
+ NuPlayerDrm::retrieveDrmInfo(pssh, psshsize, &parcel);
+ ALOGV("checkDrmInfo: MEDIA_DRM_INFO PSSH size: %d Parcel size: %d objects#: %d",
+ (int)psshsize, (int)parcel.dataSize(), (int)parcel.objectsCount());
+
+ if (parcel.dataSize() == 0) {
+ ALOGE("checkDrmInfo: Unexpected parcel size: 0");
+ return UNKNOWN_ERROR;
+ }
+
+ // Can't pass parcel as a message to the player. Converting Parcel->ABuffer to pass it
+ // to the Player's onSourceNotify then back to Parcel for calling driver's notifyListener.
+ sp<ABuffer> drmInfoBuffer = ABuffer::CreateAsCopy(parcel.data(), parcel.dataSize());
+ notifyDrmInfo(drmInfoBuffer);
+
+ return OK;
+}
+
+void NuPlayer::GenericSource::signalBufferReturned(MediaBuffer *buffer)
+{
+ //ALOGV("signalBufferReturned %p refCount: %d", buffer, buffer->localRefcount());
+
+ buffer->setObserver(NULL);
+ buffer->release(); // this leads to delete since that there is no observor
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index e92a2ae..4064133 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -28,7 +28,6 @@
namespace android {
class DecryptHandle;
-class DrmManagerClient;
struct AnotherPacketSource;
struct ARTSPController;
class DataSource;
@@ -37,9 +36,10 @@
struct MediaSource;
class MediaBuffer;
struct NuCachedSource2;
-class WVMExtractor;
-struct NuPlayer::GenericSource : public NuPlayer::Source {
+struct NuPlayer::GenericSource : public NuPlayer::Source,
+ public MediaBufferObserver // Modular DRM
+{
GenericSource(const sp<AMessage> ¬ify, bool uidValid, uid_t uid);
status_t setDataSource(
@@ -51,6 +51,10 @@
status_t setDataSource(const sp<DataSource>& dataSource);
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
virtual void prepareAsync();
virtual void start();
@@ -71,7 +75,9 @@
virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
virtual ssize_t getSelectedTrack(media_track_type type) const;
virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
- virtual status_t seekTo(int64_t seekTimeUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
@@ -79,6 +85,15 @@
virtual void setOffloadAudio(bool offload);
+ // Modular DRM
+ virtual void signalBufferReturned(MediaBuffer *buffer);
+
+ virtual status_t prepareDrm(
+ const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto);
+
+ virtual status_t releaseDrm();
+
+
protected:
virtual ~GenericSource();
@@ -101,10 +116,12 @@
kWhatSelectTrack,
kWhatSeek,
kWhatReadBuffer,
- kWhatStopWidevine,
kWhatStart,
kWhatResume,
kWhatSecureDecodersInstantiated,
+ // Modular DRM
+ kWhatPrepareDrm,
+ kWhatReleaseDrm,
};
struct Track {
@@ -119,9 +136,11 @@
public:
explicit BufferingMonitor(const sp<AMessage> ¬ify);
+ void getDefaultBufferingSettings(BufferingSettings *buffering /* nonnull */);
+ status_t setBufferingSettings(const BufferingSettings &buffering);
+
// Set up state.
void prepare(const sp<NuCachedSource2> &cachedSource,
- const sp<WVMExtractor> &wvmExtractor,
int64_t durationUs,
int64_t bitrate,
bool isStreaming);
@@ -155,7 +174,6 @@
sp<AMessage> mNotify;
sp<NuCachedSource2> mCachedSource;
- sp<WVMExtractor> mWVMExtractor;
int64_t mDurationUs;
int64_t mBitrate;
bool mIsStreaming;
@@ -169,13 +187,13 @@
mutable Mutex mLock;
+ BufferingSettings mSettings;
bool mOffloadAudio;
int64_t mFirstDequeuedBufferRealUs;
int64_t mFirstDequeuedBufferMediaUs;
int64_t mlastDequeuedBufferMediaUs;
void prepare_l(const sp<NuCachedSource2> &cachedSource,
- const sp<WVMExtractor> &wvmExtractor,
int64_t durationUs,
int64_t bitrate,
bool isStreaming);
@@ -204,7 +222,7 @@
int32_t mFetchTimedTextDataGeneration;
int64_t mDurationUs;
bool mAudioIsVorbis;
- bool mIsWidevine;
+ // Secure codec is required.
bool mIsSecure;
bool mIsStreaming;
bool mUIDValid;
@@ -219,10 +237,7 @@
sp<DataSource> mDataSource;
sp<NuCachedSource2> mCachedSource;
sp<DataSource> mHttpSource;
- sp<WVMExtractor> mWVMExtractor;
sp<MetaData> mFileMeta;
- DrmManagerClient *mDrmManagerClient;
- sp<DecryptHandle> mDecryptHandle;
bool mStarted;
bool mStopRead;
int64_t mBitrate;
@@ -239,9 +254,7 @@
void resetDataSource();
status_t initFromDataSource();
- void checkDrmStatus(const sp<DataSource>& dataSource);
int64_t getLastReadPosition();
- void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
void notifyPreparedAndCleanup(status_t err);
void onSecureDecodersInstantiated(status_t err);
@@ -261,7 +274,7 @@
status_t doSelectTrack(size_t trackIndex, bool select, int64_t timeUs);
void onSeek(const sp<AMessage>& msg);
- status_t doSeek(int64_t seekTimeUs);
+ status_t doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode);
void onPrepareAsync();
@@ -279,19 +292,35 @@
sp<ABuffer> mediaBufferToABuffer(
MediaBuffer *mbuf,
- media_track_type trackType,
- int64_t seekTimeUs,
- int64_t *actualTimeUs = NULL);
+ media_track_type trackType);
void postReadBuffer(media_track_type trackType);
void onReadBuffer(const sp<AMessage>& msg);
+ // When |mode| is MediaPlayerSeekMode::SEEK_CLOSEST, the buffer read shall
+ // include an item indicating skipping rendering all buffers with timestamp
+ // earlier than |seekTimeUs|.
+ // For other modes, the buffer read will not include the item as above in order
+ // to facilitate fast seek operation.
void readBuffer(
media_track_type trackType,
- int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
+ int64_t seekTimeUs = -1ll,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC,
+ int64_t *actualTimeUs = NULL, bool formatChange = false);
void queueDiscontinuityIfNeeded(
bool seeking, bool formatChange, media_track_type trackType, Track *track);
+ // Modular DRM
+ // The source is DRM protected and is prepared for DRM.
+ bool mIsDrmProtected;
+ // releaseDrm has been processed.
+ bool mIsDrmReleased;
+ Vector<String8> mMimes;
+
+ status_t checkDrmInfo();
+ status_t onPrepareDrm(const sp<AMessage> &msg);
+ status_t onReleaseDrm();
+
DISALLOW_EVIL_CONSTRUCTORS(GenericSource);
};
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 5027e01..ad4c223 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -32,6 +32,11 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/Utils.h>
+// default buffer prepare/ready/underflow marks
+static const int kReadyMarkMs = 5000; // 5 seconds
+static const int kPrepareMarkMs = 1500; // 1.5 seconds
+static const int kUnderflowMarkMs = 1000; // 1 second
+
namespace android {
NuPlayer::HTTPLiveSource::HTTPLiveSource(
@@ -49,6 +54,7 @@
mFetchMetaDataGeneration(0),
mHasMetadata(false),
mMetadataSelected(false) {
+ getDefaultBufferingSettings(&mBufferingSettings);
if (headers) {
mExtraHeaders = *headers;
@@ -76,6 +82,42 @@
}
}
+status_t NuPlayer::HTTPLiveSource::getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+ buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+ buffering->mInitialWatermarkMs = kPrepareMarkMs;
+ buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
+ buffering->mRebufferingWatermarkHighMs = kReadyMarkMs;
+
+ return OK;
+}
+
+status_t NuPlayer::HTTPLiveSource::setBufferingSettings(const BufferingSettings& buffering) {
+ if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+ || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+ || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+ && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)) {
+ return BAD_VALUE;
+ }
+
+ mBufferingSettings = buffering;
+
+ if (mBufferingSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+ mBufferingSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+ }
+ if (mBufferingSettings.mRebufferingMode == BUFFERING_MODE_NONE) {
+ mBufferingSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+ mBufferingSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+ }
+
+ if (mLiveSession != NULL) {
+ mLiveSession->setBufferingSettings(mBufferingSettings);
+ }
+
+ return OK;
+}
+
void NuPlayer::HTTPLiveSource::prepareAsync() {
if (mLiveLooper == NULL) {
mLiveLooper = new ALooper;
@@ -94,6 +136,7 @@
mLiveLooper->registerHandler(mLiveSession);
+ mLiveSession->setBufferingSettings(mBufferingSettings);
mLiveSession->connectAsync(
mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
}
@@ -214,8 +257,12 @@
return (err == OK || err == BAD_VALUE) ? (status_t)OK : err;
}
-status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs) {
- return mLiveSession->seekTo(seekTimeUs);
+status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
+ if (mLiveSession->isSeekable()) {
+ return mLiveSession->seekTo(seekTimeUs, mode);
+ } else {
+ return INVALID_OPERATION;
+ }
}
void NuPlayer::HTTPLiveSource::pollForRawData(
@@ -317,8 +364,9 @@
notifyVideoSizeChanged();
}
- uint32_t flags = FLAG_CAN_PAUSE;
+ uint32_t flags = 0;
if (mLiveSession->isSeekable()) {
+ flags |= FLAG_CAN_PAUSE;
flags |= FLAG_CAN_SEEK;
flags |= FLAG_CAN_SEEK_BACKWARD;
flags |= FLAG_CAN_SEEK_FORWARD;
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 574937d..2866a6a 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -34,6 +34,10 @@
const char *url,
const KeyedVector<String8, String8> *headers);
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
virtual void prepareAsync();
virtual void start();
@@ -47,7 +51,9 @@
virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
virtual ssize_t getSelectedTrack(media_track_type /* type */) const;
virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
- virtual status_t seekTo(int64_t seekTimeUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
protected:
virtual ~HTTPLiveSource();
@@ -78,6 +84,7 @@
int32_t mFetchMetaDataGeneration;
bool mHasMetadata;
bool mMetadataSelected;
+ BufferingSettings mBufferingSettings;
void onSessionNotify(const sp<AMessage> &msg);
void pollForRawData(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 6380ae8..6a09227 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -16,6 +16,9 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "NuPlayer"
+
+#include <inttypes.h>
+
#include <utils/Log.h>
#include "NuPlayer.h"
@@ -39,6 +42,7 @@
#include <media/AudioResamplerPublic.h>
#include <media/AVSyncSettings.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -69,16 +73,18 @@
};
struct NuPlayer::SeekAction : public Action {
- explicit SeekAction(int64_t seekTimeUs)
- : mSeekTimeUs(seekTimeUs) {
+ explicit SeekAction(int64_t seekTimeUs, MediaPlayerSeekMode mode)
+ : mSeekTimeUs(seekTimeUs),
+ mMode(mode) {
}
virtual void execute(NuPlayer *player) {
- player->performSeek(mSeekTimeUs);
+ player->performSeek(mSeekTimeUs, mMode);
}
private:
int64_t mSeekTimeUs;
+ MediaPlayerSeekMode mMode;
DISALLOW_EVIL_CONSTRUCTORS(SeekAction);
};
@@ -191,9 +197,13 @@
mPrepared(false),
mResetting(false),
mSourceStarted(false),
+ mAudioDecoderError(false),
+ mVideoDecoderError(false),
mPaused(false),
mPausedByClient(true),
- mPausedForBuffering(false) {
+ mPausedForBuffering(false),
+ mIsDrmProtected(false),
+ mDataSourceType(DATA_SOURCE_TYPE_NONE) {
clearFlushComplete();
}
@@ -216,6 +226,7 @@
msg->setObject("source", new StreamingSource(notify, source));
msg->post();
+ mDataSourceType = DATA_SOURCE_TYPE_STREAM;
}
static bool IsHTTPLiveURL(const char *url) {
@@ -248,21 +259,26 @@
sp<Source> source;
if (IsHTTPLiveURL(url)) {
source = new HTTPLiveSource(notify, httpService, url, headers);
+ ALOGV("setDataSourceAsync HTTPLiveSource %s", url);
+ mDataSourceType = DATA_SOURCE_TYPE_HTTP_LIVE;
} else if (!strncasecmp(url, "rtsp://", 7)) {
source = new RTSPSource(
notify, httpService, url, headers, mUIDValid, mUID);
+ ALOGV("setDataSourceAsync RTSPSource %s", url);
+ mDataSourceType = DATA_SOURCE_TYPE_RTSP;
} else if ((!strncasecmp(url, "http://", 7)
|| !strncasecmp(url, "https://", 8))
&& ((len >= 4 && !strcasecmp(".sdp", &url[len - 4]))
|| strstr(url, ".sdp?"))) {
source = new RTSPSource(
notify, httpService, url, headers, mUIDValid, mUID, true);
+ ALOGV("setDataSourceAsync RTSPSource http/https/.sdp %s", url);
+ mDataSourceType = DATA_SOURCE_TYPE_RTSP;
} else {
+ ALOGV("setDataSourceAsync GenericSource %s", url);
+
sp<GenericSource> genericSource =
new GenericSource(notify, mUIDValid, mUID);
- // Don't set FLAG_SECURE on mSourceFlags here for widevine.
- // The correct flags will be updated in Source::kWhatFlagsChanged
- // handler when GenericSource is prepared.
status_t err = genericSource->setDataSource(httpService, url, headers);
@@ -271,6 +287,9 @@
} else {
ALOGE("Failed to set data source!");
}
+
+ // regardless of success/failure
+ mDataSourceType = DATA_SOURCE_TYPE_GENERIC_URL;
}
msg->setObject("source", source);
msg->post();
@@ -284,6 +303,9 @@
sp<GenericSource> source =
new GenericSource(notify, mUIDValid, mUID);
+ ALOGV("setDataSourceAsync fd %d/%lld/%lld source: %p",
+ fd, (long long)offset, (long long)length, source.get());
+
status_t err = source->setDataSource(fd, offset, length);
if (err != OK) {
@@ -293,6 +315,7 @@
msg->setObject("source", source);
msg->post();
+ mDataSourceType = DATA_SOURCE_TYPE_GENERIC_FD;
}
void NuPlayer::setDataSourceAsync(const sp<DataSource> &dataSource) {
@@ -309,9 +332,37 @@
msg->setObject("source", source);
msg->post();
+ mDataSourceType = DATA_SOURCE_TYPE_MEDIA;
+}
+
+status_t NuPlayer::getDefaultBufferingSettings(
+ BufferingSettings *buffering /* nonnull */) {
+ sp<AMessage> msg = new AMessage(kWhatGetDefaultBufferingSettings, this);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ if (err == OK) {
+ readFromAMessage(response, buffering);
+ }
+ }
+ return err;
+}
+
+status_t NuPlayer::setBufferingSettings(const BufferingSettings& buffering) {
+ sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+ writeToAMessage(msg, buffering);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+ return err;
}
void NuPlayer::prepareAsync() {
+ ALOGV("prepareAsync");
+
(new AMessage(kWhatPrepare, this))->post();
}
@@ -419,9 +470,10 @@
(new AMessage(kWhatReset, this))->post();
}
-void NuPlayer::seekToAsync(int64_t seekTimeUs, bool needNotify) {
+void NuPlayer::seekToAsync(int64_t seekTimeUs, MediaPlayerSeekMode mode, bool needNotify) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("seekTimeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
msg->setInt32("needNotify", needNotify);
msg->post();
}
@@ -504,8 +556,52 @@
break;
}
+ case kWhatGetDefaultBufferingSettings:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ ALOGV("kWhatGetDefaultBufferingSettings");
+ BufferingSettings buffering;
+ status_t err = OK;
+ if (mSource != NULL) {
+ err = mSource->getDefaultBufferingSettings(&buffering);
+ } else {
+ err = INVALID_OPERATION;
+ }
+ sp<AMessage> response = new AMessage;
+ if (err == OK) {
+ writeToAMessage(response, buffering);
+ }
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatSetBufferingSettings:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ ALOGV("kWhatSetBufferingSettings");
+ BufferingSettings buffering;
+ readFromAMessage(msg, &buffering);
+ status_t err = OK;
+ if (mSource != NULL) {
+ err = mSource->setBufferingSettings(buffering);
+ } else {
+ err = INVALID_OPERATION;
+ }
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
+
case kWhatPrepare:
{
+ ALOGV("onMessageReceived kWhatPrepare");
+
mSource->prepareAsync();
break;
}
@@ -681,7 +777,8 @@
int64_t currentPositionUs = 0;
if (getCurrentPosition(¤tPositionUs) == OK) {
mDeferredActions.push_back(
- new SeekAction(currentPositionUs));
+ new SeekAction(currentPositionUs,
+ MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */));
}
}
@@ -1009,12 +1106,14 @@
ALOGV("%s shutdown completed", audio ? "audio" : "video");
if (audio) {
mAudioDecoder.clear();
+ mAudioDecoderError = false;
++mAudioDecoderGeneration;
CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
mFlushingAudio = SHUT_DOWN;
} else {
mVideoDecoder.clear();
+ mVideoDecoderError = false;
++mVideoDecoderGeneration;
CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER);
@@ -1062,11 +1161,6 @@
case SHUTTING_DOWN_DECODER:
break; // Wait for shutdown to complete.
case FLUSHED:
- // Widevine source reads must stop before releasing the video decoder.
- if (!audio && mSource != NULL && mSourceFlags & Source::FLAG_SECURE) {
- mSource->stop();
- mSourceStarted = false;
- }
getDecoder(audio)->initiateShutdown(); // In the middle of a seek.
*flushing = SHUTTING_DOWN_DECODER; // Shut down.
break;
@@ -1074,7 +1168,31 @@
finishFlushIfPossible(); // Should not occur.
break; // Finish anyways.
}
- notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+ if (mSource != nullptr) {
+ if (audio) {
+ if (mVideoDecoderError || mSource->getFormat(false /* audio */) == NULL
+ || mSurface == NULL) {
+ // When both audio and video have error, or this stream has only audio
+ // which has error, notify client of error.
+ notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+ } else {
+ // Only audio track has error. Video track could be still good to play.
+ notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_AUDIO_ERROR, err);
+ }
+ mAudioDecoderError = true;
+ } else {
+ if (mAudioDecoderError || mSource->getFormat(true /* audio */) == NULL
+ || mAudioSink == NULL) {
+ // When both audio and video have error, or this stream has only video
+ // which has error, notify client of error.
+ notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+ } else {
+ // Only video track has error. Audio track could be still good to play.
+ notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_VIDEO_ERROR, err);
+ }
+ mVideoDecoderError = true;
+ }
+ }
} else {
ALOGV("Unhandled decoder notification %d '%c%c%c%c'.",
what,
@@ -1197,12 +1315,14 @@
case kWhatSeek:
{
int64_t seekTimeUs;
+ int32_t mode;
int32_t needNotify;
CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
CHECK(msg->findInt32("needNotify", &needNotify));
- ALOGV("kWhatSeek seekTimeUs=%lld us, needNotify=%d",
- (long long)seekTimeUs, needNotify);
+ ALOGV("kWhatSeek seekTimeUs=%lld us, mode=%d, needNotify=%d",
+ (long long)seekTimeUs, mode, needNotify);
if (!mStarted) {
// Seek before the player is started. In order to preview video,
@@ -1210,7 +1330,7 @@
// only once if needed. After the player is started, any seek
// operation will go through normal path.
// Audio-only cases are handled separately.
- onStart(seekTimeUs);
+ onStart(seekTimeUs, (MediaPlayerSeekMode)mode);
if (mStarted) {
onPause();
mPausedByClient = true;
@@ -1226,7 +1346,7 @@
FLUSH_CMD_FLUSH /* video */));
mDeferredActions.push_back(
- new SeekAction(seekTimeUs));
+ new SeekAction(seekTimeUs, (MediaPlayerSeekMode)mode));
// After a flush without shutdown, decoder is paused.
// Don't resume it until source seek is done, otherwise it could
@@ -1257,6 +1377,30 @@
break;
}
+ case kWhatPrepareDrm:
+ {
+ status_t status = onPrepareDrm(msg);
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
+
+ case kWhatReleaseDrm:
+ {
+ status_t status = onReleaseDrm();
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
+
default:
TRESPASS();
break;
@@ -1284,6 +1428,8 @@
} else {
ALOGW("resume called when renderer is gone or not set");
}
+
+ mLastStartedPlayingTimeNs = systemTime();
}
status_t NuPlayer::onInstantiateSecureDecoders() {
@@ -1315,13 +1461,16 @@
return OK;
}
-void NuPlayer::onStart(int64_t startPositionUs) {
+void NuPlayer::onStart(int64_t startPositionUs, MediaPlayerSeekMode mode) {
+ ALOGV("onStart: mCrypto: %p (%d)", mCrypto.get(),
+ (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+
if (!mSourceStarted) {
mSourceStarted = true;
mSource->start();
}
if (startPositionUs > 0) {
- performSeek(startPositionUs);
+ performSeek(startPositionUs, mode);
if (mSource->getFormat(false /* audio */) == NULL) {
return;
}
@@ -1360,6 +1509,13 @@
mOffloadAudio =
canOffloadStream(audioMeta, hasVideo, mSource->isStreaming(), streamType)
&& (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
+
+ // Modular DRM: Disabling audio offload if the source is protected
+ if (mOffloadAudio && mIsDrmProtected) {
+ mOffloadAudio = false;
+ ALOGV("onStart: Disabling mOffloadAudio now that the source is protected.");
+ }
+
if (mOffloadAudio) {
flags |= Renderer::FLAG_OFFLOAD_AUDIO;
}
@@ -1393,6 +1549,8 @@
mAudioDecoder->setRenderer(mRenderer);
}
+ mLastStartedPlayingTimeNs = systemTime();
+
postScanSources();
}
@@ -1411,6 +1569,14 @@
} else {
ALOGW("pause called when renderer is gone or not set");
}
+
+ sp<NuPlayerDriver> driver = mDriver.promote();
+ if (driver != NULL) {
+ int64_t now = systemTime();
+ int64_t played = now - mLastStartedPlayingTimeNs;
+
+ driver->notifyMorePlayingTimeUs((played+500)/1000);
+ }
}
bool NuPlayer::audioDecoderStillNeeded() {
@@ -1440,13 +1606,6 @@
*state = SHUTTING_DOWN_DECODER;
ALOGV("initiating %s decoder shutdown", audio ? "audio" : "video");
- if (!audio) {
- // Widevine source reads must stop before releasing the video decoder.
- if (mSource != NULL && mSourceFlags & Source::FLAG_SECURE) {
- mSource->stop();
- mSourceStarted = false;
- }
- }
getDecoder(audio)->initiateShutdown();
break;
}
@@ -1497,7 +1656,8 @@
// is possible; otherwise the decoders call the renderer openAudioSink directly.
status_t err = mRenderer->openAudioSink(
- format, true /* offloadOnly */, hasVideo, AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio);
+ format, true /* offloadOnly */, hasVideo,
+ AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio, mSource->isStreaming());
if (err != OK) {
// Any failure we turn off mOffloadAudio.
mOffloadAudio = false;
@@ -1515,6 +1675,7 @@
if (mAudioDecoder != NULL) {
mAudioDecoder->pause();
mAudioDecoder.clear();
+ mAudioDecoderError = false;
++mAudioDecoderGeneration;
}
if (mFlushingAudio == FLUSHING_DECODER) {
@@ -1537,7 +1698,7 @@
mRenderer->flush(false /* audio */, false /* notifyComplete */);
}
- performSeek(currentPositionUs);
+ performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
if (forceNonOffload) {
mRenderer->signalDisableOffloadAudio();
@@ -1563,9 +1724,16 @@
sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
const bool hasVideo = (videoFormat != NULL);
- const bool canOffload = canOffloadStream(
+ bool canOffload = canOffloadStream(
audioMeta, hasVideo, mSource->isStreaming(), streamType)
&& (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
+
+ // Modular DRM: Disabling audio offload if the source is protected
+ if (canOffload && mIsDrmProtected) {
+ canOffload = false;
+ ALOGV("determineAudioModeChange: Disabling mOffloadAudio b/c the source is protected.");
+ }
+
if (canOffload) {
if (!mOffloadAudio) {
mRenderer->signalEnableOffloadAudio();
@@ -1638,56 +1806,44 @@
const bool hasVideo = (mSource->getFormat(false /*audio */) != NULL);
format->setInt32("has-video", hasVideo);
*decoder = new DecoderPassThrough(notify, mSource, mRenderer);
+ ALOGV("instantiateDecoder audio DecoderPassThrough hasVideo: %d", hasVideo);
} else {
mSource->setOffloadAudio(false /* offload */);
- *decoder = new Decoder(notify, mSource, mPID, mRenderer);
+ *decoder = new Decoder(notify, mSource, mPID, mUID, mRenderer);
+ ALOGV("instantiateDecoder audio Decoder");
}
+ mAudioDecoderError = false;
} else {
sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
++mVideoDecoderGeneration;
notify->setInt32("generation", mVideoDecoderGeneration);
*decoder = new Decoder(
- notify, mSource, mPID, mRenderer, mSurface, mCCDecoder);
+ notify, mSource, mPID, mUID, mRenderer, mSurface, mCCDecoder);
+ mVideoDecoderError = false;
// enable FRC if high-quality AV sync is requested, even if not
// directly queuing to display, as this will even improve textureview
// playback.
{
- char value[PROPERTY_VALUE_MAX];
- if (property_get("persist.sys.media.avsync", value, NULL) &&
- (!strcmp("1", value) || !strcasecmp("true", value))) {
+ if (property_get_bool("persist.sys.media.avsync", false)) {
format->setInt32("auto-frc", 1);
}
}
}
(*decoder)->init();
- (*decoder)->configure(format);
- // allocate buffers to decrypt widevine source buffers
- if (!audio && (mSourceFlags & Source::FLAG_SECURE)) {
- Vector<sp<ABuffer> > inputBufs;
- CHECK_EQ((*decoder)->getInputBuffers(&inputBufs), (status_t)OK);
-
- Vector<MediaBuffer *> mediaBufs;
- for (size_t i = 0; i < inputBufs.size(); i++) {
- const sp<ABuffer> &buffer = inputBufs[i];
- MediaBuffer *mbuf = new MediaBuffer(buffer->data(), buffer->size());
- mediaBufs.push(mbuf);
- }
-
- status_t err = mSource->setBuffers(audio, mediaBufs);
- if (err != OK) {
- for (size_t i = 0; i < mediaBufs.size(); ++i) {
- mediaBufs[i]->release();
- }
- mediaBufs.clear();
- ALOGE("Secure source didn't support secure mediaBufs.");
- return err;
- }
+ // Modular DRM
+ if (mIsDrmProtected) {
+ format->setPointer("crypto", mCrypto.get());
+ ALOGV("instantiateDecoder: mCrypto: %p (%d) isSecure: %d", mCrypto.get(),
+ (mCrypto != NULL ? mCrypto->getStrongCount() : 0),
+ (mSourceFlags & Source::FLAG_SECURE) != 0);
}
+ (*decoder)->configure(format);
+
if (!audio) {
sp<AMessage> params = new AMessage();
float rate = getFrameRate();
@@ -1760,12 +1916,27 @@
// Take into account sample aspect ratio if necessary:
int32_t sarWidth, sarHeight;
if (inputFormat->findInt32("sar-width", &sarWidth)
- && inputFormat->findInt32("sar-height", &sarHeight)) {
+ && inputFormat->findInt32("sar-height", &sarHeight)
+ && sarWidth > 0 && sarHeight > 0) {
ALOGV("Sample aspect ratio %d : %d", sarWidth, sarHeight);
displayWidth = (displayWidth * sarWidth) / sarHeight;
ALOGV("display dimensions %d x %d", displayWidth, displayHeight);
+ } else {
+ int32_t width, height;
+ if (inputFormat->findInt32("display-width", &width)
+ && inputFormat->findInt32("display-height", &height)
+ && width > 0 && height > 0
+ && displayWidth > 0 && displayHeight > 0) {
+ if (displayHeight * (int64_t)width / height > (int64_t)displayWidth) {
+ displayHeight = (int32_t)(displayWidth * (int64_t)height / width);
+ } else {
+ displayWidth = (int32_t)(displayHeight * (int64_t)width / height);
+ }
+ ALOGV("Video display width and height are overridden to %d x %d",
+ displayWidth, displayHeight);
+ }
}
int32_t rotationDegrees;
@@ -1990,10 +2161,9 @@
}
}
-void NuPlayer::performSeek(int64_t seekTimeUs) {
- ALOGV("performSeek seekTimeUs=%lld us (%.2f secs)",
- (long long)seekTimeUs,
- seekTimeUs / 1E6);
+void NuPlayer::performSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
+ ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), mode=%d",
+ (long long)seekTimeUs, seekTimeUs / 1E6, mode);
if (mSource == NULL) {
// This happens when reset occurs right before the loop mode
@@ -2004,7 +2174,7 @@
return;
}
mPreviousSeekTimeUs = seekTimeUs;
- mSource->seekTo(seekTimeUs);
+ mSource->seekTo(seekTimeUs, mode);
++mTimedTextGeneration;
// everything's flushed, continue playback.
@@ -2066,6 +2236,16 @@
mPrepared = false;
mResetting = false;
mSourceStarted = false;
+
+ // Modular DRM
+ if (mCrypto != NULL) {
+ // decoders will be flushed before this so their mCrypto would go away on their own
+ // TODO change to ALOGV
+ ALOGD("performReset mCrypto: %p (%d)", mCrypto.get(),
+ (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+ mCrypto.clear();
+ }
+ mIsDrmProtected = false;
}
void NuPlayer::performScanSources() {
@@ -2160,6 +2340,7 @@
case Source::kWhatPrepared:
{
+ ALOGV("NuPlayer::onSourceNotify Source::kWhatPrepared source: %p", mSource.get());
if (mSource == NULL) {
// This is a stale notification from a source that was
// asynchronously preparing when the client called reset().
@@ -2194,6 +2375,22 @@
break;
}
+ // Modular DRM
+ case Source::kWhatDrmInfo:
+ {
+ Parcel parcel;
+ sp<ABuffer> drmInfo;
+ CHECK(msg->findBuffer("drmInfo", &drmInfo));
+ parcel.setData(drmInfo->data(), drmInfo->size());
+
+ ALOGV("onSourceNotify() kWhatDrmInfo MEDIA_DRM_INFO drmInfo: %p parcel size: %zu",
+ drmInfo.get(), parcel.dataSize());
+
+ notifyListener(MEDIA_DRM_INFO, 0 /* ext1 */, 0 /* ext2 */, &parcel);
+
+ break;
+ }
+
case Source::kWhatFlagsChanged:
{
uint32_t flags;
@@ -2201,6 +2398,19 @@
sp<NuPlayerDriver> driver = mDriver.promote();
if (driver != NULL) {
+
+ ALOGV("onSourceNotify() kWhatFlagsChanged FLAG_CAN_PAUSE: %d "
+ "FLAG_CAN_SEEK_BACKWARD: %d \n\t\t\t\t FLAG_CAN_SEEK_FORWARD: %d "
+ "FLAG_CAN_SEEK: %d FLAG_DYNAMIC_DURATION: %d \n"
+ "\t\t\t\t FLAG_SECURE: %d FLAG_PROTECTED: %d",
+ (flags & Source::FLAG_CAN_PAUSE) != 0,
+ (flags & Source::FLAG_CAN_SEEK_BACKWARD) != 0,
+ (flags & Source::FLAG_CAN_SEEK_FORWARD) != 0,
+ (flags & Source::FLAG_CAN_SEEK) != 0,
+ (flags & Source::FLAG_DYNAMIC_DURATION) != 0,
+ (flags & Source::FLAG_SECURE) != 0,
+ (flags & Source::FLAG_PROTECTED) != 0);
+
if ((flags & NuPlayer::Source::FLAG_CAN_SEEK) == 0) {
driver->notifyListener(
MEDIA_INFO, MEDIA_INFO_NOT_SEEKABLE, 0);
@@ -2451,6 +2661,167 @@
notifyListener(MEDIA_TIMED_TEXT, 0, 0);
}
}
+
+const char *NuPlayer::getDataSourceType() {
+ switch (mDataSourceType) {
+ case DATA_SOURCE_TYPE_HTTP_LIVE:
+ return "HTTPLive";
+
+ case DATA_SOURCE_TYPE_RTSP:
+ return "RTSP";
+
+ case DATA_SOURCE_TYPE_GENERIC_URL:
+ return "GenURL";
+
+ case DATA_SOURCE_TYPE_GENERIC_FD:
+ return "GenFD";
+
+ case DATA_SOURCE_TYPE_MEDIA:
+ return "Media";
+
+ case DATA_SOURCE_TYPE_STREAM:
+ return "Stream";
+
+ case DATA_SOURCE_TYPE_NONE:
+ default:
+ return "None";
+ }
+ }
+
+// Modular DRM begin
+status_t NuPlayer::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
+{
+ ALOGV("prepareDrm ");
+
+ // Passing to the looper anyway; called in a pre-config prepared state so no race on mCrypto
+ sp<AMessage> msg = new AMessage(kWhatPrepareDrm, this);
+ // synchronous call so just passing the address but with local copies of "const" args
+ uint8_t UUID[16];
+ memcpy(UUID, uuid, sizeof(UUID));
+ Vector<uint8_t> sessionId = drmSessionId;
+ msg->setPointer("uuid", (void*)UUID);
+ msg->setPointer("drmSessionId", (void*)&sessionId);
+
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+
+ if (status == OK && response != NULL) {
+ CHECK(response->findInt32("status", &status));
+ ALOGV("prepareDrm ret: %d ", status);
+ } else {
+ ALOGE("prepareDrm err: %d", status);
+ }
+
+ return status;
+}
+
+status_t NuPlayer::releaseDrm()
+{
+ ALOGV("releaseDrm ");
+
+ sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
+
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+
+ if (status == OK && response != NULL) {
+ CHECK(response->findInt32("status", &status));
+ ALOGV("releaseDrm ret: %d ", status);
+ } else {
+ ALOGE("releaseDrm err: %d", status);
+ }
+
+ return status;
+}
+
+status_t NuPlayer::onPrepareDrm(const sp<AMessage> &msg)
+{
+ // TODO change to ALOGV
+ ALOGD("onPrepareDrm ");
+
+ status_t status = INVALID_OPERATION;
+ if (mSource == NULL) {
+ ALOGE("onPrepareDrm: No source. onPrepareDrm failed with %d.", status);
+ return status;
+ }
+
+ uint8_t *uuid;
+ Vector<uint8_t> *drmSessionId;
+ CHECK(msg->findPointer("uuid", (void**)&uuid));
+ CHECK(msg->findPointer("drmSessionId", (void**)&drmSessionId));
+
+ status = OK;
+ sp<ICrypto> crypto = NULL;
+
+ status = mSource->prepareDrm(uuid, *drmSessionId, &crypto);
+ if (crypto == NULL) {
+ ALOGE("onPrepareDrm: mSource->prepareDrm failed. status: %d", status);
+ return status;
+ }
+ ALOGV("onPrepareDrm: mSource->prepareDrm succeeded");
+
+ if (mCrypto != NULL) {
+ ALOGE("onPrepareDrm: Unexpected. Already having mCrypto: %p (%d)",
+ mCrypto.get(), mCrypto->getStrongCount());
+ mCrypto.clear();
+ }
+
+ mCrypto = crypto;
+ mIsDrmProtected = true;
+ // TODO change to ALOGV
+ ALOGD("onPrepareDrm: mCrypto: %p (%d)", mCrypto.get(),
+ (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+
+ return status;
+}
+
+status_t NuPlayer::onReleaseDrm()
+{
+ // TODO change to ALOGV
+ ALOGD("onReleaseDrm ");
+
+ if (!mIsDrmProtected) {
+ ALOGW("onReleaseDrm: Unexpected. mIsDrmProtected is already false.");
+ }
+
+ mIsDrmProtected = false;
+
+ status_t status;
+ if (mCrypto != NULL) {
+ // notifying the source first before removing crypto from codec
+ if (mSource != NULL) {
+ mSource->releaseDrm();
+ }
+
+ status=OK;
+ // first making sure the codecs have released their crypto reference
+ const sp<DecoderBase> &videoDecoder = getDecoder(false/*audio*/);
+ if (videoDecoder != NULL) {
+ status = videoDecoder->releaseCrypto();
+ ALOGV("onReleaseDrm: video decoder ret: %d", status);
+ }
+
+ const sp<DecoderBase> &audioDecoder = getDecoder(true/*audio*/);
+ if (audioDecoder != NULL) {
+ status_t status_audio = audioDecoder->releaseCrypto();
+ if (status == OK) { // otherwise, returning the first error
+ status = status_audio;
+ }
+ ALOGV("onReleaseDrm: audio decoder ret: %d", status_audio);
+ }
+
+ // TODO change to ALOGV
+ ALOGD("onReleaseDrm: mCrypto: %p (%d)", mCrypto.get(),
+ (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+ mCrypto.clear();
+ } else { // mCrypto == NULL
+ ALOGE("onReleaseDrm: Unexpected. There is no crypto.");
+ status = INVALID_OPERATION;
+ }
+
+ return status;
+}
+// Modular DRM end
////////////////////////////////////////////////////////////////////////////////
sp<AMessage> NuPlayer::Source::getFormat(bool audio) {
@@ -2483,12 +2854,24 @@
}
void NuPlayer::Source::notifyPrepared(status_t err) {
+ ALOGV("Source::notifyPrepared %d", err);
sp<AMessage> notify = dupNotify();
notify->setInt32("what", kWhatPrepared);
notify->setInt32("err", err);
notify->post();
}
+void NuPlayer::Source::notifyDrmInfo(const sp<ABuffer> &drmInfoBuffer)
+{
+ ALOGV("Source::notifyDrmInfo");
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatDrmInfo);
+ notify->setBuffer("drmInfo", drmInfoBuffer);
+
+ notify->post();
+}
+
void NuPlayer::Source::notifyInstantiateSecureDecoders(const sp<AMessage> &reply) {
sp<AMessage> notify = dupNotify();
notify->setInt32("what", kWhatInstantiateSecureDecoders);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index a002f6f..c69835f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -19,6 +19,7 @@
#define NU_PLAYER_H_
#include <media/AudioResamplerPublic.h>
+#include <media/ICrypto.h>
#include <media/MediaPlayerInterface.h>
#include <media/stagefright/foundation/AHandler.h>
@@ -50,6 +51,9 @@
void setDataSourceAsync(const sp<DataSource> &source);
+ status_t getDefaultBufferingSettings(BufferingSettings* buffering /* nonnull */);
+ status_t setBufferingSettings(const BufferingSettings& buffering);
+
void prepareAsync();
void setVideoSurfaceTextureAsync(
@@ -70,7 +74,10 @@
// Will notify the driver through "notifySeekComplete" once finished
// and needNotify is true.
- void seekToAsync(int64_t seekTimeUs, bool needNotify = false);
+ void seekToAsync(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC,
+ bool needNotify = false);
status_t setVideoScalingMode(int32_t mode);
status_t getTrackInfo(Parcel* reply) const;
@@ -82,6 +89,12 @@
sp<MetaData> getFileMeta();
float getFrameRate();
+ // Modular DRM
+ status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
+ status_t releaseDrm();
+
+ const char *getDataSourceType();
+
protected:
virtual ~NuPlayer();
@@ -134,6 +147,10 @@
kWhatGetTrackInfo = 'gTrI',
kWhatGetSelectedTrack = 'gSel',
kWhatSelectTrack = 'selT',
+ kWhatGetDefaultBufferingSettings = 'gDBS',
+ kWhatSetBufferingSettings = 'sBuS',
+ kWhatPrepareDrm = 'pDrm',
+ kWhatReleaseDrm = 'rDrm',
};
wp<NuPlayerDriver> mDriver;
@@ -155,6 +172,8 @@
int32_t mVideoDecoderGeneration;
int32_t mRendererGeneration;
+ int64_t mLastStartedPlayingTimeNs;
+
int64_t mPreviousSeekTimeUs;
List<sp<Action> > mDeferredActions;
@@ -201,6 +220,8 @@
bool mPrepared;
bool mResetting;
bool mSourceStarted;
+ bool mAudioDecoderError;
+ bool mVideoDecoderError;
// Actual pause state, either as requested by client or due to buffering.
bool mPaused;
@@ -213,6 +234,22 @@
// Pause state as requested by source (internally) due to buffering
bool mPausedForBuffering;
+ // Modular DRM
+ sp<ICrypto> mCrypto;
+ bool mIsDrmProtected;
+
+ typedef enum {
+ DATA_SOURCE_TYPE_NONE,
+ DATA_SOURCE_TYPE_HTTP_LIVE,
+ DATA_SOURCE_TYPE_RTSP,
+ DATA_SOURCE_TYPE_GENERIC_URL,
+ DATA_SOURCE_TYPE_GENERIC_FD,
+ DATA_SOURCE_TYPE_MEDIA,
+ DATA_SOURCE_TYPE_STREAM,
+ } DATA_SOURCE_TYPE;
+
+ std::atomic<DATA_SOURCE_TYPE> mDataSourceType;
+
inline const sp<DecoderBase> &getDecoder(bool audio) {
return audio ? mAudioDecoder : mVideoDecoder;
}
@@ -245,7 +282,9 @@
void handleFlushComplete(bool audio, bool isDecoder);
void finishFlushIfPossible();
- void onStart(int64_t startPositionUs = -1);
+ void onStart(
+ int64_t startPositionUs = -1,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
void onResume();
void onPause();
@@ -263,7 +302,7 @@
void processDeferredActions();
- void performSeek(int64_t seekTimeUs);
+ void performSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode);
void performDecoderFlush(FlushCommand audio, FlushCommand video);
void performReset();
void performScanSources();
@@ -282,6 +321,9 @@
void writeTrackInfo(Parcel* reply, const sp<AMessage>& format) const;
+ status_t onPrepareDrm(const sp<AMessage> &msg);
+ status_t onReleaseDrm();
+
DISALLOW_EVIL_CONSTRUCTORS(NuPlayer);
};
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 978d360..73b07bb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -72,37 +72,37 @@
if (cc->mData1 >= 0x20 && cc->mData1 <= 0x7f) {
// 2 basic chars
- sprintf(tmp, "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
&& cc->mData2 >= 0x30 && cc->mData2 <= 0x3f) {
// 1 special char
- sprintf(tmp, "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 == 0x12 || cc->mData1 == 0x1A)
&& cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
// 1 Spanish/French char
- sprintf(tmp, "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 == 0x13 || cc->mData1 == 0x1B)
&& cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
// 1 Portuguese/German/Danish char
- sprintf(tmp, "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
&& cc->mData2 >= 0x20 && cc->mData2 <= 0x2f){
// Mid-Row Codes (Table 69)
- sprintf(tmp, "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if (((cc->mData1 == 0x14 || cc->mData1 == 0x1c)
&& cc->mData2 >= 0x20 && cc->mData2 <= 0x2f)
||
((cc->mData1 == 0x17 || cc->mData1 == 0x1f)
&& cc->mData2 >= 0x21 && cc->mData2 <= 0x23)){
// Misc Control Codes (Table 70)
- sprintf(tmp, "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else if ((cc->mData1 & 0x70) == 0x10
&& (cc->mData2 & 0x40) == 0x40
&& ((cc->mData1 & 0x07) || !(cc->mData2 & 0x20)) ) {
// Preamble Address Codes (Table 71)
- sprintf(tmp, "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
} else {
- sprintf(tmp, "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+ snprintf(tmp, sizeof(tmp), "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
}
if (out.size() > 0) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 594128c..8fe255b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -23,11 +23,13 @@
#include "NuPlayerCCDecoder.h"
#include "NuPlayerDecoder.h"
+#include "NuPlayerDrm.h"
#include "NuPlayerRenderer.h"
#include "NuPlayerSource.h"
#include <cutils/properties.h>
#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -35,7 +37,7 @@
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-
+#include <media/stagefright/SurfaceUtils.h>
#include <gui/Surface.h>
#include "avc_utils.h"
@@ -57,6 +59,7 @@
const sp<AMessage> ¬ify,
const sp<Source> &source,
pid_t pid,
+ uid_t uid,
const sp<Renderer> &renderer,
const sp<Surface> &surface,
const sp<CCDecoder> &ccDecoder)
@@ -66,6 +69,7 @@
mRenderer(renderer),
mCCDecoder(ccDecoder),
mPid(pid),
+ mUid(uid),
mSkipRenderingUntilMediaTimeUs(-1ll),
mNumFramesTotal(0ll),
mNumInputFramesDropped(0ll),
@@ -75,6 +79,8 @@
mIsAudio(true),
mIsVideoAVC(false),
mIsSecure(false),
+ mIsEncrypted(false),
+ mIsEncryptedObservedEarlier(false),
mFormatChangePending(false),
mTimeChangePending(false),
mFrameRateTotal(kDefaultVideoFrameRateTotal),
@@ -91,7 +97,11 @@
}
NuPlayer::Decoder::~Decoder() {
- mCodec->release();
+ // Need to stop looper first since mCodec could be accessed on the mDecoderLooper.
+ stopLooper();
+ if (mCodec != NULL) {
+ mCodec->release();
+ }
releaseAndResetMediaBuffers();
}
@@ -200,6 +210,18 @@
break;
}
+ case kWhatAudioOutputFormatChanged:
+ {
+ if (!isStaleReply(msg)) {
+ status_t err;
+ if (msg->findInt32("err", &err) && err != OK) {
+ ALOGE("Renderer reported 0x%x when changing audio output format", err);
+ handleError(err);
+ }
+ }
+ break;
+ }
+
case kWhatSetVideoSurface:
{
sp<AReplyToken> replyID;
@@ -216,21 +238,21 @@
//
// at this point MediaPlayerService::client has already connected to the
// surface, which MediaCodec does not expect
- err = native_window_api_disconnect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+ err = nativeWindowDisconnect(surface.get(), "kWhatSetVideoSurface(surface)");
if (err == OK) {
err = mCodec->setSurface(surface);
ALOGI_IF(err, "codec setSurface returned: %d", err);
if (err == OK) {
// reconnect to the old surface as MPS::Client will expect to
// be able to disconnect from it.
- (void)native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+ (void)nativeWindowConnect(mSurface.get(), "kWhatSetVideoSurface(mSurface)");
mSurface = surface;
}
}
if (err != OK) {
// reconnect to the new surface on error as MPS::Client will expect to
// be able to disconnect from it.
- (void)native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+ (void)nativeWindowConnect(surface.get(), "kWhatSetVideoSurface(err)");
}
}
@@ -240,6 +262,13 @@
break;
}
+ case kWhatDrmReleaseCrypto:
+ {
+ ALOGV("kWhatDrmReleaseCrypto");
+ onReleaseCrypto(msg);
+ break;
+ }
+
default:
DecoderBase::onMessageReceived(msg);
break;
@@ -265,7 +294,7 @@
ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
mCodec = MediaCodec::CreateByType(
- mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid);
+ mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid, mUid);
int32_t secure = 0;
if (format->findInt32("secure", &secure) && secure != 0) {
if (mCodec != NULL) {
@@ -274,7 +303,7 @@
mCodec->release();
ALOGI("[%s] creating", mComponentName.c_str());
mCodec = MediaCodec::CreateByComponentName(
- mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid);
+ mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid, mUid);
}
}
if (mCodec == NULL) {
@@ -290,15 +319,29 @@
status_t err;
if (mSurface != NULL) {
// disconnect from surface as MediaCodec will reconnect
- err = native_window_api_disconnect(
- mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+ err = nativeWindowDisconnect(mSurface.get(), "onConfigure");
// We treat this as a warning, as this is a preparatory step.
// Codec will try to connect to the surface, which is where
// any error signaling will occur.
ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err);
}
+
+ // Modular DRM
+ void *pCrypto;
+ if (!format->findPointer("crypto", &pCrypto)) {
+ pCrypto = NULL;
+ }
+ sp<ICrypto> crypto = (ICrypto*)pCrypto;
+ // non-encrypted source won't have a crypto
+ mIsEncrypted = (crypto != NULL);
+ // configure is called once; still using OR in case the behavior changes.
+ mIsEncryptedObservedEarlier = mIsEncryptedObservedEarlier || mIsEncrypted;
+ ALOGV("onConfigure mCrypto: %p (%d) mIsSecure: %d",
+ crypto.get(), (crypto != NULL ? crypto->getStrongCount() : 0), mIsSecure);
+
err = mCodec->configure(
- format, mSurface, NULL /* crypto */, 0 /* flags */);
+ format, mSurface, crypto, 0 /* flags */);
+
if (err != OK) {
ALOGE("Failed to configure %s decoder (err=%d)", mComponentName.c_str(), err);
mCodec->release();
@@ -408,17 +451,7 @@
}
void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
- bool hadNoRenderer = (mRenderer == NULL);
mRenderer = renderer;
- if (hadNoRenderer && mRenderer != NULL) {
- // this means that the widevine legacy source is ready
- onRequestInputBuffers();
- }
-}
-
-void NuPlayer::Decoder::onGetInputBuffers(
- Vector<sp<ABuffer> > *dstBuffers) {
- CHECK_EQ((status_t)OK, mCodec->getWidevineLegacyBuffers(dstBuffers));
}
void NuPlayer::Decoder::onResume(bool notifyComplete) {
@@ -486,8 +519,7 @@
if (mSurface != NULL) {
// reconnect to surface as MediaCodec disconnected from it
- status_t error =
- native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+ status_t error = nativeWindowConnect(mSurface.get(), "onShutdown");
ALOGW_IF(error != NO_ERROR,
"[%s] failed to connect to native window, error=%d",
mComponentName.c_str(), error);
@@ -515,9 +547,7 @@
* returns true if we should request more data
*/
bool NuPlayer::Decoder::doRequestBuffers() {
- // mRenderer is only NULL if we have a legacy widevine source that
- // is not yet ready. In this case we must not fetch input.
- if (isDiscontinuityPending() || mRenderer == NULL) {
+ if (isDiscontinuityPending()) {
return false;
}
status_t err = OK;
@@ -556,12 +586,52 @@
notify->post();
}
+status_t NuPlayer::Decoder::releaseCrypto()
+{
+ ALOGV("releaseCrypto");
+
+ sp<AMessage> msg = new AMessage(kWhatDrmReleaseCrypto, this);
+
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+ if (status == OK && response != NULL) {
+ CHECK(response->findInt32("status", &status));
+ ALOGV("releaseCrypto ret: %d ", status);
+ } else {
+ ALOGE("releaseCrypto err: %d", status);
+ }
+
+ return status;
+}
+
+void NuPlayer::Decoder::onReleaseCrypto(const sp<AMessage>& msg)
+{
+ status_t status = INVALID_OPERATION;
+ if (mCodec != NULL) {
+ status = mCodec->releaseCrypto();
+ } else {
+ // returning OK if the codec has been already released
+ status = OK;
+ ALOGE("onReleaseCrypto No mCodec. err: %d", status);
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
+ // Clearing the state as it's tied to crypto. mIsEncryptedObservedEarlier is sticky though
+ // and lasts for the lifetime of this codec. See its use in fetchInputData.
+ mIsEncrypted = false;
+
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
bool NuPlayer::Decoder::handleAnInputBuffer(size_t index) {
if (isDiscontinuityPending()) {
return false;
}
- sp<ABuffer> buffer;
+ sp<MediaCodecBuffer> buffer;
mCodec->getInputBuffer(index, &buffer);
if (buffer == NULL) {
@@ -628,9 +698,14 @@
int64_t timeUs,
int32_t flags) {
// CHECK_LT(bufferIx, mOutputBuffers.size());
- sp<ABuffer> buffer;
+ sp<MediaCodecBuffer> buffer;
mCodec->getOutputBuffer(index, &buffer);
+ if (buffer == NULL) {
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+
if (index >= mOutputBuffers.size()) {
for (size_t i = mOutputBuffers.size(); i <= index; ++i) {
mOutputBuffers.add();
@@ -700,19 +775,18 @@
int64_t durationUs;
bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
if (getAudioDeepBufferSetting() // override regardless of source duration
- || (!hasVideo
- && mSource->getDuration(&durationUs) == OK
+ || (mSource->getDuration(&durationUs) == OK
&& durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US)) {
flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
} else {
flags = AUDIO_OUTPUT_FLAG_NONE;
}
- status_t err = mRenderer->openAudioSink(
- format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaed */);
- if (err != OK) {
- handleError(err);
- }
+ sp<AMessage> reply = new AMessage(kWhatAudioOutputFormatChanged, this);
+ reply->setInt32("generation", mBufferGeneration);
+ mRenderer->changeAudioFormat(
+ format, false /* offloadOnly */, hasVideo,
+ flags, mSource->isStreaming(), reply);
}
}
@@ -813,7 +887,20 @@
}
dropAccessUnit = false;
- if (!mIsAudio && !mIsSecure) {
+ if (!mIsAudio && !mIsEncrypted) {
+ // Extra safeguard if higher-level behavior changes. Otherwise, not required now.
+ // Preventing the buffer from being processed (and sent to codec) if this is a later
+ // round of playback but this time without prepareDrm. Or if there is a race between
+ // stop (which is not blocking) and releaseDrm allowing buffers being processed after
+ // Crypto has been released (GenericSource currently prevents this race though).
+ // Particularly doing this check before IsAVCReferenceFrame call to prevent parsing
+ // of encrypted data.
+ if (mIsEncryptedObservedEarlier) {
+ ALOGE("fetchInputData: mismatched mIsEncrypted/mIsEncryptedObservedEarlier (0/1)");
+
+ return INVALID_OPERATION;
+ }
+
int32_t layerId = 0;
bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
if (mRenderer->getVideoLateByUs() > 100000ll
@@ -865,43 +952,11 @@
size_t bufferIx;
CHECK(msg->findSize("buffer-ix", &bufferIx));
CHECK_LT(bufferIx, mInputBuffers.size());
- sp<ABuffer> codecBuffer = mInputBuffers[bufferIx];
+ sp<MediaCodecBuffer> codecBuffer = mInputBuffers[bufferIx];
sp<ABuffer> buffer;
bool hasBuffer = msg->findBuffer("buffer", &buffer);
-
- // handle widevine classic source - that fills an arbitrary input buffer
- MediaBuffer *mediaBuffer = NULL;
- if (hasBuffer) {
- mediaBuffer = (MediaBuffer *)(buffer->getMediaBufferBase());
- if (mediaBuffer != NULL) {
- // likely filled another buffer than we requested: adjust buffer index
- size_t ix;
- for (ix = 0; ix < mInputBuffers.size(); ix++) {
- const sp<ABuffer> &buf = mInputBuffers[ix];
- if (buf->data() == mediaBuffer->data()) {
- // all input buffers are dequeued on start, hence the check
- if (!mInputBufferIsDequeued[ix]) {
- ALOGV("[%s] received MediaBuffer for #%zu instead of #%zu",
- mComponentName.c_str(), ix, bufferIx);
- mediaBuffer->release();
- return false;
- }
-
- // TRICKY: need buffer for the metadata, so instead, set
- // codecBuffer to the same (though incorrect) buffer to
- // avoid a memcpy into the codecBuffer
- codecBuffer = buffer;
- codecBuffer->setRange(
- mediaBuffer->range_offset(),
- mediaBuffer->range_length());
- bufferIx = ix;
- break;
- }
- }
- CHECK(ix < mInputBuffers.size());
- }
- }
+ bool needsCopy = true;
if (buffer == NULL /* includes !hasBuffer */) {
int32_t streamErr = ERROR_END_OF_STREAM;
@@ -954,38 +1009,79 @@
flags |= MediaCodec::BUFFER_FLAG_CODECCONFIG;
}
+ // Modular DRM
+ MediaBuffer *mediaBuf = NULL;
+ NuPlayerDrm::CryptoInfo *cryptInfo = NULL;
+
// copy into codec buffer
- if (buffer != codecBuffer) {
+ if (needsCopy) {
if (buffer->size() > codecBuffer->capacity()) {
handleError(ERROR_BUFFER_TOO_SMALL);
mDequeuedInputBuffers.push_back(bufferIx);
return false;
}
- codecBuffer->setRange(0, buffer->size());
- memcpy(codecBuffer->data(), buffer->data(), buffer->size());
- }
- status_t err = mCodec->queueInputBuffer(
- bufferIx,
- codecBuffer->offset(),
- codecBuffer->size(),
- timeUs,
- flags);
+ if (buffer->data() != NULL) {
+ codecBuffer->setRange(0, buffer->size());
+ memcpy(codecBuffer->data(), buffer->data(), buffer->size());
+ } else { // No buffer->data()
+ //Modular DRM
+ mediaBuf = (MediaBuffer*)buffer->getMediaBufferBase();
+ if (mediaBuf != NULL) {
+ codecBuffer->setRange(0, mediaBuf->size());
+ memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
+
+ sp<MetaData> meta_data = mediaBuf->meta_data();
+ cryptInfo = NuPlayerDrm::getSampleCryptoInfo(meta_data);
+
+ // since getMediaBuffer() has incremented the refCount
+ mediaBuf->release();
+ } else { // No mediaBuf
+ ALOGE("onInputBufferFetched: buffer->data()/mediaBuf are NULL for %p",
+ buffer.get());
+ handleError(UNKNOWN_ERROR);
+ return false;
+ }
+ } // buffer->data()
+ } // needsCopy
+
+ status_t err;
+ AString errorDetailMsg;
+ if (cryptInfo != NULL) {
+ err = mCodec->queueSecureInputBuffer(
+ bufferIx,
+ codecBuffer->offset(),
+ cryptInfo->subSamples,
+ cryptInfo->numSubSamples,
+ cryptInfo->key,
+ cryptInfo->iv,
+ cryptInfo->mode,
+ cryptInfo->pattern,
+ timeUs,
+ flags,
+ &errorDetailMsg);
+ // synchronous call so done with cryptInfo here
+ free(cryptInfo);
+ } else {
+ err = mCodec->queueInputBuffer(
+ bufferIx,
+ codecBuffer->offset(),
+ codecBuffer->size(),
+ timeUs,
+ flags,
+ &errorDetailMsg);
+ } // no cryptInfo
+
if (err != OK) {
- if (mediaBuffer != NULL) {
- mediaBuffer->release();
- }
- ALOGE("Failed to queue input buffer for %s (err=%d)",
- mComponentName.c_str(), err);
+ ALOGE("onInputBufferFetched: queue%sInputBuffer failed for %s (err=%d, %s)",
+ (cryptInfo != NULL ? "Secure" : ""),
+ mComponentName.c_str(), err, errorDetailMsg.c_str());
handleError(err);
} else {
mInputBufferIsDequeued.editItemAt(bufferIx) = false;
- if (mediaBuffer != NULL) {
- CHECK(mMediaBuffers[bufferIx] == NULL);
- mMediaBuffers.editItemAt(bufferIx) = mediaBuffer;
- }
}
- }
+
+ } // buffer != NULL
return true;
}
@@ -998,7 +1094,7 @@
if (!mIsAudio) {
int64_t timeUs;
- sp<ABuffer> buffer = mOutputBuffers[bufferIx];
+ sp<MediaCodecBuffer> buffer = mOutputBuffers[bufferIx];
buffer->meta()->findInt64("timeUs", &timeUs);
if (mCCDecoder != NULL && mCCDecoder->isSelected()) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 0c619ed..3da2f0b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -23,10 +23,13 @@
namespace android {
+class MediaCodecBuffer;
+
struct NuPlayer::Decoder : public DecoderBase {
Decoder(const sp<AMessage> ¬ify,
const sp<Source> &source,
pid_t pid,
+ uid_t uid,
const sp<Renderer> &renderer = NULL,
const sp<Surface> &surface = NULL,
const sp<CCDecoder> &ccDecoder = NULL);
@@ -36,6 +39,8 @@
// sets the output surface of video decoders.
virtual status_t setVideoSurface(const sp<Surface> &surface);
+ virtual status_t releaseCrypto();
+
protected:
virtual ~Decoder();
@@ -44,7 +49,6 @@
virtual void onConfigure(const sp<AMessage> &format);
virtual void onSetParameters(const sp<AMessage> ¶ms);
virtual void onSetRenderer(const sp<Renderer> &renderer);
- virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
virtual void onResume(bool notifyComplete);
virtual void onFlush();
virtual void onShutdown(bool notifyComplete);
@@ -54,7 +58,9 @@
enum {
kWhatCodecNotify = 'cdcN',
kWhatRenderBuffer = 'rndr',
- kWhatSetVideoSurface = 'sSur'
+ kWhatSetVideoSurface = 'sSur',
+ kWhatAudioOutputFormatChanged = 'aofc',
+ kWhatDrmReleaseCrypto = 'rDrm',
};
enum {
@@ -74,8 +80,8 @@
List<sp<AMessage> > mPendingInputMessages;
- Vector<sp<ABuffer> > mInputBuffers;
- Vector<sp<ABuffer> > mOutputBuffers;
+ Vector<sp<MediaCodecBuffer> > mInputBuffers;
+ Vector<sp<MediaCodecBuffer> > mOutputBuffers;
Vector<sp<ABuffer> > mCSDsForCurrentFormat;
Vector<sp<ABuffer> > mCSDsToSubmit;
Vector<bool> mInputBufferIsDequeued;
@@ -83,6 +89,7 @@
Vector<size_t> mDequeuedInputBuffers;
const pid_t mPid;
+ const uid_t mUid;
int64_t mSkipRenderingUntilMediaTimeUs;
int64_t mNumFramesTotal;
int64_t mNumInputFramesDropped;
@@ -92,6 +99,8 @@
bool mIsAudio;
bool mIsVideoAVC;
bool mIsSecure;
+ bool mIsEncrypted;
+ bool mIsEncryptedObservedEarlier;
bool mFormatChangePending;
bool mTimeChangePending;
float mFrameRateTotal;
@@ -131,6 +140,8 @@
void notifyResumeCompleteIfNecessary();
+ void onReleaseCrypto(const sp<AMessage>& msg);
+
DISALLOW_EVIL_CONSTRUCTORS(Decoder);
};
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index 04bb61c..d0de7b0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -23,6 +23,7 @@
#include "NuPlayerRenderer.h"
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -42,8 +43,7 @@
}
NuPlayer::DecoderBase::~DecoderBase() {
- mDecoderLooper->unregisterHandler(id());
- mDecoderLooper->stop();
+ stopLooper();
}
static
@@ -72,6 +72,11 @@
mDecoderLooper->registerHandler(this);
}
+void NuPlayer::DecoderBase::stopLooper() {
+ mDecoderLooper->unregisterHandler(id());
+ mDecoderLooper->stop();
+}
+
void NuPlayer::DecoderBase::setParameters(const sp<AMessage> ¶ms) {
sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
msg->setMessage("params", params);
@@ -91,14 +96,6 @@
PostAndAwaitResponse(msg, &response);
}
-status_t NuPlayer::DecoderBase::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
- sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, this);
- msg->setPointer("buffers", buffers);
-
- sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
-}
-
void NuPlayer::DecoderBase::signalFlush() {
(new AMessage(kWhatFlush, this))->post();
}
@@ -165,20 +162,6 @@
break;
}
- case kWhatGetInputBuffers:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- Vector<sp<ABuffer> > *dstBuffers;
- CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
-
- onGetInputBuffers(dstBuffers);
-
- (new AMessage)->postReply(replyID);
- break;
- }
-
case kWhatRequestInputBuffers:
{
mRequestInputBuffersPending = false;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index 9966144..d44c396 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -27,6 +27,7 @@
struct ABuffer;
struct MediaCodec;
class MediaBuffer;
+class MediaCodecBuffer;
class Surface;
struct NuPlayer::DecoderBase : public AHandler {
@@ -42,7 +43,6 @@
void setRenderer(const sp<Renderer> &renderer);
virtual status_t setVideoSurface(const sp<Surface> &) { return INVALID_OPERATION; }
- status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
void signalFlush();
void signalResume(bool notifyComplete);
void initiateShutdown();
@@ -51,6 +51,10 @@
return mStats;
}
+ virtual status_t releaseCrypto() {
+ return INVALID_OPERATION;
+ }
+
enum {
kWhatInputDiscontinuity = 'inDi',
kWhatVideoSizeChanged = 'viSC',
@@ -65,12 +69,13 @@
virtual ~DecoderBase();
+ void stopLooper();
+
virtual void onMessageReceived(const sp<AMessage> &msg);
virtual void onConfigure(const sp<AMessage> &format) = 0;
virtual void onSetParameters(const sp<AMessage> ¶ms) = 0;
virtual void onSetRenderer(const sp<Renderer> &renderer) = 0;
- virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers) = 0;
virtual void onResume(bool notifyComplete) = 0;
virtual void onFlush() = 0;
virtual void onShutdown(bool notifyComplete) = 0;
@@ -90,7 +95,6 @@
kWhatSetParameters = 'setP',
kWhatSetRenderer = 'setR',
kWhatPause = 'paus',
- kWhatGetInputBuffers = 'gInB',
kWhatRequestInputBuffers = 'reqB',
kWhatFlush = 'flus',
kWhatShutdown = 'shuD',
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index f224635..6b05b53 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -25,6 +25,7 @@
#include "NuPlayerSource.h"
#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -75,7 +76,7 @@
// format is different.
status_t err = mRenderer->openAudioSink(
format, true /* offloadOnly */, hasVideo,
- AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */);
+ AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */, mSource->isStreaming());
if (err != OK) {
handleError(err);
}
@@ -92,11 +93,6 @@
"ignoring request to change renderer");
}
-void NuPlayer::DecoderPassThrough::onGetInputBuffers(
- Vector<sp<ABuffer> > * /* dstBuffers */) {
- ALOGE("onGetInputBuffers() called unexpectedly");
-}
-
bool NuPlayer::DecoderPassThrough::isStaleReply(const sp<AMessage> &msg) {
int32_t generation;
CHECK(msg->findInt32("generation", &generation));
@@ -298,6 +294,9 @@
return;
}
+ if (streamErr != ERROR_END_OF_STREAM) {
+ handleError(streamErr);
+ }
mReachedEOS = true;
if (mRenderer != NULL) {
mRenderer->queueEOS(true /* audio */, ERROR_END_OF_STREAM);
@@ -319,10 +318,9 @@
int32_t bufferSize = buffer->size();
mCachedBytes += bufferSize;
+ int64_t timeUs = 0;
+ CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
if (mSkipRenderingUntilMediaTimeUs >= 0) {
- int64_t timeUs = 0;
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-
if (timeUs < mSkipRenderingUntilMediaTimeUs) {
ALOGV("[%s] dropping buffer at time %lld as requested.",
mComponentName.c_str(), (long long)timeUs);
@@ -343,7 +341,10 @@
reply->setInt32("generation", mBufferGeneration);
reply->setInt32("size", bufferSize);
- mRenderer->queueBuffer(true /* audio */, buffer, reply);
+ sp<MediaCodecBuffer> mcBuffer = new MediaCodecBuffer(nullptr, buffer);
+ mcBuffer->meta()->setInt64("timeUs", timeUs);
+
+ mRenderer->queueBuffer(true /* audio */, mcBuffer, reply);
++mPendingBuffersToDrain;
ALOGV("onInputBufferFilled: #ToDrain = %zu, cachedBytes = %zu",
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index 5850efa..173387a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -38,7 +38,6 @@
virtual void onConfigure(const sp<AMessage> &format);
virtual void onSetParameters(const sp<AMessage> ¶ms);
virtual void onSetRenderer(const sp<Renderer> &renderer);
- virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
virtual void onResume(bool notifyComplete);
virtual void onFlush();
virtual void onShutdown(bool notifyComplete);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 0f4dce9..ad788f7 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -31,8 +31,31 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#include <media/IMediaAnalyticsService.h>
+
+static const int kDumpLockRetries = 50;
+static const int kDumpLockSleepUs = 20000;
+
namespace android {
+// key for media statistics
+static const char *kKeyPlayer = "nuplayer";
+// attrs for media statistics
+static const char *kPlayerVMime = "android.media.mediaplayer.video.mime";
+static const char *kPlayerVCodec = "android.media.mediaplayer.video.codec";
+static const char *kPlayerWidth = "android.media.mediaplayer.width";
+static const char *kPlayerHeight = "android.media.mediaplayer.height";
+static const char *kPlayerFrames = "android.media.mediaplayer.frames";
+static const char *kPlayerFramesDropped = "android.media.mediaplayer.dropped";
+static const char *kPlayerAMime = "android.media.mediaplayer.audio.mime";
+static const char *kPlayerACodec = "android.media.mediaplayer.audio.codec";
+static const char *kPlayerDuration = "android.media.mediaplayer.durationMs";
+static const char *kPlayerPlaying = "android.media.mediaplayer.playingMs";
+static const char *kPlayerError = "android.media.mediaplayer.err";
+static const char *kPlayerErrorCode = "android.media.mediaplayer.errcode";
+static const char *kPlayerDataSourceType = "android.media.mediaplayer.dataSource";
+
+
NuPlayerDriver::NuPlayerDriver(pid_t pid)
: mState(STATE_IDLE),
mIsAsyncPrepare(false),
@@ -41,20 +64,26 @@
mDurationUs(-1),
mPositionUs(-1),
mSeekInProgress(false),
+ mPlayingTimeUs(0),
mLooper(new ALooper),
+ mPlayer(new NuPlayer(pid)),
mPlayerFlags(0),
+ mAnalyticsItem(NULL),
mAtEOS(false),
mLooping(false),
mAutoLoop(false) {
- ALOGV("NuPlayerDriver(%p)", this);
+ ALOGD("NuPlayerDriver(%p) created, clientPid(%d)", this, pid);
mLooper->setName("NuPlayerDriver Looper");
+ // set up an analytics record
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
+ mAnalyticsItem->generateSessionID();
+
mLooper->start(
false, /* runOnCallingThread */
true, /* canCallJava */
PRIORITY_AUDIO);
- mPlayer = new NuPlayer(pid);
mLooper->registerHandler(mPlayer);
mPlayer->setDriver(this);
@@ -63,6 +92,15 @@
NuPlayerDriver::~NuPlayerDriver() {
ALOGV("~NuPlayerDriver(%p)", this);
mLooper->stop();
+
+ // finalize any pending metrics, usually a no-op.
+ updateMetrics("destructor");
+ logMetrics("destructor");
+
+ if (mAnalyticsItem != NULL) {
+ delete mAnalyticsItem;
+ mAnalyticsItem = NULL;
+ }
}
status_t NuPlayerDriver::initCheck() {
@@ -183,6 +221,30 @@
return OK;
}
+status_t NuPlayerDriver::getDefaultBufferingSettings(BufferingSettings* buffering) {
+ ALOGV("getDefaultBufferingSettings(%p)", this);
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mState == STATE_IDLE) {
+ return INVALID_OPERATION;
+ }
+ }
+
+ return mPlayer->getDefaultBufferingSettings(buffering);
+}
+
+status_t NuPlayerDriver::setBufferingSettings(const BufferingSettings& buffering) {
+ ALOGV("setBufferingSettings(%p)", this);
+ {
+ Mutex::Autolock autoLock(mLock);
+ if (mState == STATE_IDLE) {
+ return INVALID_OPERATION;
+ }
+ }
+
+ return mPlayer->setBufferingSettings(buffering);
+}
+
status_t NuPlayerDriver::prepare() {
ALOGV("prepare(%p)", this);
Mutex::Autolock autoLock(mLock);
@@ -208,7 +270,8 @@
mAtEOS = false;
mState = STATE_STOPPED_AND_PREPARING;
mIsAsyncPrepare = false;
- mPlayer->seekToAsync(0, true /* needNotify */);
+ mPlayer->seekToAsync(0, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ true /* needNotify */);
while (mState == STATE_STOPPED_AND_PREPARING) {
mCondition.wait(mLock);
}
@@ -233,7 +296,8 @@
mAtEOS = false;
mState = STATE_STOPPED_AND_PREPARING;
mIsAsyncPrepare = true;
- mPlayer->seekToAsync(0, true /* needNotify */);
+ mPlayer->seekToAsync(0, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */,
+ true /* needNotify */);
return OK;
default:
return INVALID_OPERATION;
@@ -382,8 +446,8 @@
return mPlayer->getSyncSettings(sync, videoFps);
}
-status_t NuPlayerDriver::seekTo(int msec) {
- ALOGD("seekTo(%p) %d ms at state %d", this, msec, mState);
+status_t NuPlayerDriver::seekTo(int msec, MediaPlayerSeekMode mode) {
+ ALOGD("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
Mutex::Autolock autoLock(mLock);
int64_t seekTimeUs = msec * 1000ll;
@@ -398,7 +462,7 @@
mSeekInProgress = true;
// seeks can take a while, so we essentially paused
notifyListener_l(MEDIA_PAUSED);
- mPlayer->seekToAsync(seekTimeUs, true /* needNotify */);
+ mPlayer->seekToAsync(seekTimeUs, mode, true /* needNotify */);
break;
}
@@ -448,8 +512,107 @@
return OK;
}
+void NuPlayerDriver::updateMetrics(const char *where) {
+ if (where == NULL) {
+ where = "unknown";
+ }
+ ALOGV("updateMetrics(%p) from %s at state %d", this, where, mState);
+
+ // gather the final stats for this record
+ Vector<sp<AMessage>> trackStats;
+ mPlayer->getStats(&trackStats);
+
+ if (trackStats.size() > 0) {
+ for (size_t i = 0; i < trackStats.size(); ++i) {
+ const sp<AMessage> &stats = trackStats.itemAt(i);
+
+ AString mime;
+ stats->findString("mime", &mime);
+
+ AString name;
+ stats->findString("component-name", &name);
+
+ if (mime.startsWith("video/")) {
+ int32_t width, height;
+ mAnalyticsItem->setCString(kPlayerVMime, mime.c_str());
+ if (!name.empty()) {
+ mAnalyticsItem->setCString(kPlayerVCodec, name.c_str());
+ }
+
+ if (stats->findInt32("width", &width)
+ && stats->findInt32("height", &height)) {
+ mAnalyticsItem->setInt32(kPlayerWidth, width);
+ mAnalyticsItem->setInt32(kPlayerHeight, height);
+ }
+
+ int64_t numFramesTotal = 0;
+ int64_t numFramesDropped = 0;
+ stats->findInt64("frames-total", &numFramesTotal);
+ stats->findInt64("frames-dropped-output", &numFramesDropped);
+
+ mAnalyticsItem->setInt64(kPlayerFrames, numFramesTotal);
+ mAnalyticsItem->setInt64(kPlayerFramesDropped, numFramesDropped);
+
+
+ } else if (mime.startsWith("audio/")) {
+ mAnalyticsItem->setCString(kPlayerAMime, mime.c_str());
+ if (!name.empty()) {
+ mAnalyticsItem->setCString(kPlayerACodec, name.c_str());
+ }
+ }
+ }
+ }
+
+ // always provide duration and playing time, even if they have 0/unknown values.
+
+ // getDuration() uses mLock for mutex -- careful where we use it.
+ int duration_ms = -1;
+ getDuration(&duration_ms);
+ mAnalyticsItem->setInt64(kPlayerDuration, duration_ms);
+
+ mAnalyticsItem->setInt64(kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
+
+ mAnalyticsItem->setCString(kPlayerDataSourceType, mPlayer->getDataSourceType());
+}
+
+
+void NuPlayerDriver::logMetrics(const char *where) {
+ if (where == NULL) {
+ where = "unknown";
+ }
+ ALOGV("logMetrics(%p) from %s at state %d", this, where, mState);
+
+ if (mAnalyticsItem == NULL || mAnalyticsItem->isEnabled() == false) {
+ return;
+ }
+
+ // log only non-empty records
+ // we always updateMetrics() before we get here
+ // and that always injects 3 fields (duration, playing time, and
+ // datasource) into the record.
+ // So the canonical "empty" record has 3 elements in it.
+ if (mAnalyticsItem->count() > 3) {
+
+ mAnalyticsItem->setFinalized(true);
+ mAnalyticsItem->selfrecord();
+
+ // re-init in case we prepare() and start() again.
+ delete mAnalyticsItem ;
+ mAnalyticsItem = new MediaAnalyticsItem("nuplayer");
+ if (mAnalyticsItem) {
+ mAnalyticsItem->generateSessionID();
+ }
+ } else {
+ ALOGV("did not have anything to record");
+ }
+}
+
status_t NuPlayerDriver::reset() {
ALOGD("reset(%p) at state %d", this, mState);
+
+ updateMetrics("reset");
+ logMetrics("reset");
+
Mutex::Autolock autoLock(mLock);
switch (mState) {
@@ -476,9 +639,7 @@
notifyListener_l(MEDIA_STOPPED);
}
- char value[PROPERTY_VALUE_MAX];
- if (property_get("persist.debug.sf.stats", value, NULL) &&
- (!strcmp("1", value) || !strcasecmp("true", value))) {
+ if (property_get_bool("persist.debug.sf.stats", false)) {
Vector<String16> args;
dump(-1, args);
}
@@ -493,6 +654,7 @@
mDurationUs = -1;
mPositionUs = -1;
mLooping = false;
+ mPlayingTimeUs = 0;
return OK;
}
@@ -569,7 +731,16 @@
return INVALID_OPERATION;
}
-status_t NuPlayerDriver::getParameter(int /* key */, Parcel * /* reply */) {
+status_t NuPlayerDriver::getParameter(int key, Parcel *reply) {
+
+ if (key == FOURCC('m','t','r','X')) {
+ // mtrX -- a play on 'metrics' (not matrix)
+ // gather current info all together, parcel it, and send it back
+ updateMetrics("api");
+ mAnalyticsItem->writeToParcel(reply);
+ return OK;
+ }
+
return INVALID_OPERATION;
}
@@ -624,6 +795,11 @@
mDurationUs = durationUs;
}
+void NuPlayerDriver::notifyMorePlayingTimeUs(int64_t playingUs) {
+ Mutex::Autolock autoLock(mLock);
+ mPlayingTimeUs += playingUs;
+}
+
void NuPlayerDriver::notifySeekComplete() {
ALOGV("notifySeekComplete(%p)", this);
Mutex::Autolock autoLock(mLock);
@@ -657,6 +833,24 @@
AString logString(" NuPlayer\n");
char buf[256] = {0};
+ bool locked = false;
+ for (int i = 0; i < kDumpLockRetries; ++i) {
+ if (mLock.tryLock() == NO_ERROR) {
+ locked = true;
+ break;
+ }
+ usleep(kDumpLockSleepUs);
+ }
+
+ if (locked) {
+ snprintf(buf, sizeof(buf), " state(%d), atEOS(%d), looping(%d), autoLoop(%d)\n",
+ mState, mAtEOS, mLooping, mAutoLoop);
+ mLock.unlock();
+ } else {
+ snprintf(buf, sizeof(buf), " NPD(%p) lock is taken\n", this);
+ }
+ logString.append(buf);
+
for (size_t i = 0; i < trackStats.size(); ++i) {
const sp<AMessage> &stats = trackStats.itemAt(i);
@@ -715,8 +909,8 @@
void NuPlayerDriver::notifyListener_l(
int msg, int ext1, int ext2, const Parcel *in) {
- ALOGD("notifyListener_l(%p), (%d, %d, %d), loop setting(%d, %d)",
- this, msg, ext1, ext2, mAutoLoop, mLooping);
+ ALOGD("notifyListener_l(%p), (%d, %d, %d, %d), loop setting(%d, %d)",
+ this, msg, ext1, ext2, (in == NULL ? -1 : (int)in->dataSize()), mAutoLoop, mLooping);
switch (msg) {
case MEDIA_PLAYBACK_COMPLETE:
{
@@ -750,6 +944,15 @@
case MEDIA_ERROR:
{
+ // when we have an error, add it to the analytics for this playback.
+ // ext1 is our primary 'error type' value. Only add ext2 when non-zero.
+ // [test against msg is due to fall through from previous switch value]
+ if (msg == MEDIA_ERROR) {
+ mAnalyticsItem->setInt32(kPlayerError, ext1);
+ if (ext2 != 0) {
+ mAnalyticsItem->setInt32(kPlayerErrorCode, ext2);
+ }
+ }
mAtEOS = true;
break;
}
@@ -774,6 +977,8 @@
}
void NuPlayerDriver::notifyPrepareCompleted(status_t err) {
+ ALOGV("notifyPrepareCompleted %d", err);
+
Mutex::Autolock autoLock(mLock);
if (mState != STATE_PREPARING) {
@@ -818,4 +1023,29 @@
mPlayerFlags = flags;
}
+// Modular DRM
+status_t NuPlayerDriver::prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
+{
+ ALOGV("prepareDrm(%p) state: %d", this, mState);
+
+ // leaving the state verification for mediaplayer.cpp
+ status_t ret = mPlayer->prepareDrm(uuid, drmSessionId);
+
+ ALOGV("prepareDrm ret: %d", ret);
+
+ return ret;
+}
+
+status_t NuPlayerDriver::releaseDrm()
+{
+ ALOGV("releaseDrm(%p) state: %d", this, mState);
+
+ // leaving the state verification for mediaplayer.cpp
+ status_t ret = mPlayer->releaseDrm();
+
+ ALOGV("releaseDrm ret: %d", ret);
+
+ return ret;
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 58008f0..c5ddcb0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -16,6 +16,7 @@
#include <media/MediaPlayerInterface.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/stagefright/foundation/ABase.h>
namespace android {
@@ -43,6 +44,11 @@
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer> &bufferProducer);
+
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
virtual status_t prepare();
virtual status_t prepareAsync();
virtual status_t start();
@@ -53,7 +59,8 @@
virtual status_t getPlaybackSettings(AudioPlaybackRate *rate);
virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps);
- virtual status_t seekTo(int msec);
+ virtual status_t seekTo(
+ int msec, MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC);
virtual status_t getCurrentPosition(int *msec);
virtual status_t getDuration(int *msec);
virtual status_t reset();
@@ -74,11 +81,16 @@
void notifyResetComplete();
void notifySetSurfaceComplete();
void notifyDuration(int64_t durationUs);
+ void notifyMorePlayingTimeUs(int64_t timeUs);
void notifySeekComplete();
void notifySeekComplete_l();
void notifyListener(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
void notifyFlagsChanged(uint32_t flags);
+ // Modular DRM
+ virtual status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
+ virtual status_t releaseDrm();
+
protected:
virtual ~NuPlayerDriver();
@@ -111,17 +123,23 @@
int64_t mDurationUs;
int64_t mPositionUs;
bool mSeekInProgress;
+ int64_t mPlayingTimeUs;
// <<<
sp<ALooper> mLooper;
- sp<NuPlayer> mPlayer;
+ const sp<NuPlayer> mPlayer;
sp<AudioSink> mAudioSink;
uint32_t mPlayerFlags;
+ MediaAnalyticsItem *mAnalyticsItem;
+
bool mAtEOS;
bool mLooping;
bool mAutoLoop;
+ void updateMetrics(const char *where);
+ void logMetrics(const char *where);
+
status_t prepare_l();
status_t start_l();
void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
new file mode 100644
index 0000000..b7c9db7
--- /dev/null
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuPlayerDrm"
+
+#include "NuPlayerDrm.h"
+
+#include <binder/IServiceManager.h>
+#include <media/IMediaDrmService.h>
+#include <utils/Log.h>
+
+
+namespace android {
+
+// static helpers - internal
+
+sp<IDrm> NuPlayerDrm::CreateDrm(status_t *pstatus)
+{
+ status_t &status = *pstatus;
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.drm"));
+ ALOGV("CreateDrm binder %p", (binder != NULL ? binder.get() : 0));
+
+ sp<IMediaDrmService> service = interface_cast<IMediaDrmService>(binder);
+ if (service == NULL) {
+ ALOGE("CreateDrm failed at IMediaDrmService");
+ return NULL;
+ }
+
+ sp<IDrm> drm = service->makeDrm();
+ if (drm == NULL) {
+ ALOGE("CreateDrm failed at makeDrm");
+ return NULL;
+ }
+
+ // this is before plugin creation so NO_INIT is fine
+ status = drm->initCheck();
+ if (status != OK && status != NO_INIT) {
+ ALOGE("CreateDrm failed drm->initCheck(): %d", status);
+ return NULL;
+ }
+ return drm;
+}
+
+sp<ICrypto> NuPlayerDrm::createCrypto(status_t *pstatus)
+{
+ status_t &status = *pstatus;
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.drm"));
+
+ sp<IMediaDrmService> service = interface_cast<IMediaDrmService>(binder);
+ if (service == NULL) {
+ status = UNKNOWN_ERROR;
+ ALOGE("CreateCrypto failed at IMediaDrmService");
+ return NULL;
+ }
+
+ sp<ICrypto> crypto = service->makeCrypto();
+ if (crypto == NULL) {
+ status = UNKNOWN_ERROR;
+ ALOGE("createCrypto failed");
+ return NULL;
+ }
+
+ // this is before plugin creation so NO_INIT is fine
+ status = crypto->initCheck();
+ if (status != OK && status != NO_INIT) {
+ ALOGE("createCrypto failed crypto->initCheck(): %d", status);
+ return NULL;
+ }
+
+ return crypto;
+}
+
+Vector<DrmUUID> NuPlayerDrm::parsePSSH(const void *pssh, size_t psshsize)
+{
+ Vector<DrmUUID> drmSchemes, empty;
+ const int DATALEN_SIZE = 4;
+
+ // the format of the buffer is 1 or more of:
+ // {
+ // 16 byte uuid
+ // 4 byte data length N
+ // N bytes of data
+ // }
+ // Determine the number of entries in the source data.
+ // Since we got the data from stagefright, we trust it is valid and properly formatted.
+
+ const uint8_t *data = (const uint8_t*)pssh;
+ size_t len = psshsize;
+ size_t numentries = 0;
+ while (len > 0) {
+ if (len < DrmUUID::UUID_SIZE) {
+ ALOGE("ParsePSSH: invalid PSSH data");
+ return empty;
+ }
+
+ const uint8_t *uuidPtr = data;
+
+ // skip uuid
+ data += DrmUUID::UUID_SIZE;
+ len -= DrmUUID::UUID_SIZE;
+
+ // get data length
+ if (len < DATALEN_SIZE) {
+ ALOGE("ParsePSSH: invalid PSSH data");
+ return empty;
+ }
+
+ uint32_t datalen = *((uint32_t*)data);
+ data += DATALEN_SIZE;
+ len -= DATALEN_SIZE;
+
+ if (len < datalen) {
+ ALOGE("ParsePSSH: invalid PSSH data");
+ return empty;
+ }
+
+ // skip the data
+ data += datalen;
+ len -= datalen;
+
+ DrmUUID _uuid(uuidPtr);
+ drmSchemes.add(_uuid);
+
+ ALOGV("ParsePSSH[%zu]: %s: %s", numentries,
+ _uuid.toHexString().string(),
+ DrmUUID::arrayToHex(data, datalen).string()
+ );
+
+ numentries++;
+ }
+
+ return drmSchemes;
+}
+
+Vector<DrmUUID> NuPlayerDrm::getSupportedDrmSchemes(const void *pssh, size_t psshsize)
+{
+ Vector<DrmUUID> psshDRMs = parsePSSH(pssh, psshsize);
+
+ Vector<DrmUUID> supportedDRMs;
+ // temporary DRM object for crypto Scheme enquiry (without creating a plugin)
+ status_t status = OK;
+ sp<IDrm> drm = CreateDrm(&status);
+ if (drm != NULL) {
+ for (size_t i = 0; i < psshDRMs.size(); i++) {
+ DrmUUID uuid = psshDRMs[i];
+ if (drm->isCryptoSchemeSupported(uuid.ptr(), String8()))
+ supportedDRMs.add(uuid);
+ }
+
+ drm.clear();
+ } else {
+ ALOGE("getSupportedDrmSchemes: Can't create Drm obj: %d", status);
+ }
+
+ ALOGV("getSupportedDrmSchemes: psshDRMs: %zu supportedDRMs: %zu",
+ psshDRMs.size(), supportedDRMs.size());
+
+ return supportedDRMs;
+}
+
+// static helpers - public
+
+sp<ICrypto> NuPlayerDrm::createCryptoAndPlugin(const uint8_t uuid[16],
+ const Vector<uint8_t> &drmSessionId, status_t &status)
+{
+ // Extra check
+ if (drmSessionId.isEmpty()) {
+ status = INVALID_OPERATION;
+ ALOGE("createCryptoAndPlugin: Failed. Empty drmSessionId. status: %d", status);
+ return NULL;
+ }
+
+ status = OK;
+ sp<ICrypto> crypto = createCrypto(&status);
+ if (crypto == NULL) {
+ ALOGE("createCryptoAndPlugin: createCrypto failed. status: %d", status);
+ return NULL;
+ }
+ ALOGV("createCryptoAndPlugin: createCrypto succeeded");
+
+ status = crypto->createPlugin(uuid, drmSessionId.array(), drmSessionId.size());
+ if (status != OK) {
+ ALOGE("createCryptoAndPlugin: createCryptoPlugin failed. status: %d", status);
+ // crypto will clean itself when leaving the current scope
+ return NULL;
+ }
+
+ return crypto;
+}
+
+// Parcel has only private copy constructor so passing it in rather than returning
+void NuPlayerDrm::retrieveDrmInfo(const void *pssh, size_t psshsize, Parcel *parcel)
+{
+ // 1) PSSH bytes
+ parcel->writeUint32(psshsize);
+ parcel->writeByteArray(psshsize, (const uint8_t*)pssh);
+
+ ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO PSSH: size: %zu %s", psshsize,
+ DrmUUID::arrayToHex((uint8_t*)pssh, psshsize).string());
+
+ // 2) supportedDRMs
+ Vector<DrmUUID> supportedDRMs = getSupportedDrmSchemes(pssh, psshsize);
+ parcel->writeUint32(supportedDRMs.size());
+ for (size_t i = 0; i < supportedDRMs.size(); i++) {
+ DrmUUID uuid = supportedDRMs[i];
+ parcel->writeByteArray(DrmUUID::UUID_SIZE, uuid.ptr());
+
+ ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO supportedScheme[%zu] %s", i,
+ uuid.toHexString().string());
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+/// Helpers for NuPlayerDecoder
+////////////////////////////////////////////////////////////////////////////////////////////
+
+NuPlayerDrm::CryptoInfo *NuPlayerDrm::makeCryptoInfo(
+ int numSubSamples,
+ uint8_t key[kBlockSize],
+ uint8_t iv[kBlockSize],
+ CryptoPlugin::Mode mode,
+ size_t *clearbytes,
+ size_t *encryptedbytes)
+{
+ // size needed to store all the crypto data
+ size_t cryptosize = sizeof(CryptoInfo) +
+ sizeof(CryptoPlugin::SubSample) * numSubSamples;
+ CryptoInfo *ret = (CryptoInfo*) malloc(cryptosize);
+ if (ret == NULL) {
+ ALOGE("couldn't allocate %zu bytes", cryptosize);
+ return NULL;
+ }
+ ret->numSubSamples = numSubSamples;
+ memcpy(ret->key, key, kBlockSize);
+ memcpy(ret->iv, iv, kBlockSize);
+ ret->mode = mode;
+ ret->pattern.mEncryptBlocks = 0;
+ ret->pattern.mSkipBlocks = 0;
+ ret->subSamples = (CryptoPlugin::SubSample*)(ret + 1);
+ CryptoPlugin::SubSample *subSamples = ret->subSamples;
+
+ for (int i = 0; i < numSubSamples; i++) {
+ subSamples[i].mNumBytesOfClearData = (clearbytes == NULL) ? 0 : clearbytes[i];
+ subSamples[i].mNumBytesOfEncryptedData = (encryptedbytes == NULL) ?
+ 0 :
+ encryptedbytes[i];
+ }
+
+ return ret;
+}
+
+NuPlayerDrm::CryptoInfo *NuPlayerDrm::getSampleCryptoInfo(sp<MetaData> meta)
+{
+ uint32_t type;
+ const void *crypteddata;
+ size_t cryptedsize;
+
+ if (meta == NULL) {
+ ALOGE("getSampleCryptoInfo: Unexpected. No meta data for sample.");
+ return NULL;
+ }
+
+ if (!meta->findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
+ return NULL;
+ }
+ size_t numSubSamples = cryptedsize / sizeof(size_t);
+
+ if (numSubSamples <= 0) {
+ ALOGE("getSampleCryptoInfo INVALID numSubSamples: %zu", numSubSamples);
+ return NULL;
+ }
+
+ const void *cleardata;
+ size_t clearsize;
+ if (meta->findData(kKeyPlainSizes, &type, &cleardata, &clearsize)) {
+ if (clearsize != cryptedsize) {
+ // The two must be of the same length.
+ ALOGE("getSampleCryptoInfo mismatch cryptedsize: %zu != clearsize: %zu",
+ cryptedsize, clearsize);
+ return NULL;
+ }
+ }
+
+ const void *key;
+ size_t keysize;
+ if (meta->findData(kKeyCryptoKey, &type, &key, &keysize)) {
+ if (keysize != kBlockSize) {
+ ALOGE("getSampleCryptoInfo Keys must be %d bytes in length: %zu",
+ kBlockSize, keysize);
+ // Keys must be 16 bytes in length.
+ return NULL;
+ }
+ }
+
+ const void *iv;
+ size_t ivsize;
+ if (meta->findData(kKeyCryptoIV, &type, &iv, &ivsize)) {
+ if (ivsize != kBlockSize) {
+ ALOGE("getSampleCryptoInfo IV must be %d bytes in length: %zu",
+ kBlockSize, ivsize);
+ // IVs must be 16 bytes in length.
+ return NULL;
+ }
+ }
+
+ int32_t mode;
+ if (!meta->findInt32(kKeyCryptoMode, &mode)) {
+ mode = CryptoPlugin::kMode_AES_CTR;
+ }
+
+ return makeCryptoInfo(numSubSamples,
+ (uint8_t*) key,
+ (uint8_t*) iv,
+ (CryptoPlugin::Mode)mode,
+ (size_t*) cleardata,
+ (size_t*) crypteddata);
+}
+
+} // namespace android
+
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
new file mode 100644
index 0000000..6b8a2d9
--- /dev/null
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NUPLAYER_DRM_H_
+#define NUPLAYER_DRM_H_
+
+#include <binder/Parcel.h>
+#include <media/ICrypto.h>
+#include <media/IDrm.h>
+#include <media/stagefright/MetaData.h> // for CryptInfo
+
+
+namespace android {
+
+ struct DrmUUID {
+ static const int UUID_SIZE = 16;
+
+ DrmUUID() {
+ memset(this->uuid, 0, sizeof(uuid));
+ }
+
+ // to allow defining Vector/KeyedVector of UUID type
+ DrmUUID(const DrmUUID &a) {
+ memcpy(this->uuid, a.uuid, sizeof(uuid));
+ }
+
+ // to allow defining Vector/KeyedVector of UUID type
+ DrmUUID(const uint8_t uuid_in[UUID_SIZE]) {
+ memcpy(this->uuid, uuid_in, sizeof(uuid));
+ }
+
+ const uint8_t *ptr() const {
+ return uuid;
+ }
+
+ String8 toHexString() const {
+ return arrayToHex(uuid, UUID_SIZE);
+ }
+
+ static String8 toHexString(const uint8_t uuid_in[UUID_SIZE]) {
+ return arrayToHex(uuid_in, UUID_SIZE);
+ }
+
+ static String8 arrayToHex(const uint8_t *array, int bytes) {
+ String8 result;
+ for (int i = 0; i < bytes; i++) {
+ result.appendFormat("%02x", array[i]);
+ }
+
+ return result;
+ }
+
+ protected:
+ uint8_t uuid[UUID_SIZE];
+ };
+
+
+ struct NuPlayerDrm {
+
+ // static helpers - internal
+
+ protected:
+ static sp<IDrm> CreateDrm(status_t *pstatus);
+ static sp<ICrypto> createCrypto(status_t *pstatus);
+ static Vector<DrmUUID> parsePSSH(const void *pssh, size_t psshsize);
+ static Vector<DrmUUID> getSupportedDrmSchemes(const void *pssh, size_t psshsize);
+
+ // static helpers - public
+
+ public:
+ static sp<ICrypto> createCryptoAndPlugin(const uint8_t uuid[16],
+ const Vector<uint8_t> &drmSessionId, status_t &status);
+ // Parcel has only private copy constructor so passing it in rather than returning
+ static void retrieveDrmInfo(const void *pssh, size_t psshsize, Parcel *parcel);
+
+ ////////////////////////////////////////////////////////////////////////////////////////////
+ /// Helpers for NuPlayerDecoder
+ ////////////////////////////////////////////////////////////////////////////////////////////
+
+ static const uint8_t kBlockSize = 16; // AES_BLOCK_SIZE
+
+ struct CryptoInfo {
+ int numSubSamples;
+ uint8_t key[kBlockSize];
+ uint8_t iv[kBlockSize];
+ CryptoPlugin::Mode mode;
+ CryptoPlugin::Pattern pattern;
+ CryptoPlugin::SubSample *subSamples;
+ };
+
+ static CryptoInfo *makeCryptoInfo(
+ int numSubSamples,
+ uint8_t key[kBlockSize],
+ uint8_t iv[kBlockSize],
+ CryptoPlugin::Mode mode,
+ size_t *clearbytes,
+ size_t *encryptedbytes);
+
+ static CryptoInfo *getSampleCryptoInfo(sp<MetaData> meta);
+
+ }; // NuPlayerDrm
+
+} // android
+
+#endif //NUPLAYER_DRM_H_
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index f8453eb..758db1f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -21,7 +21,6 @@
#include "NuPlayerRenderer.h"
#include <algorithm>
#include <cutils/properties.h>
-#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
@@ -31,6 +30,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
#include <media/stagefright/VideoFrameScheduler.h>
+#include <media/MediaCodecBuffer.h>
#include <inttypes.h>
@@ -144,9 +144,10 @@
// Try to avoid racing condition in case callback is still on.
Mutex::Autolock autoLock(mLock);
- mUseAudioCallback = false;
- flushQueue(&mAudioQueue);
- flushQueue(&mVideoQueue);
+ if (mUseAudioCallback) {
+ flushQueue(&mAudioQueue);
+ flushQueue(&mVideoQueue);
+ }
mWakeLock.clear();
mMediaClock.clear();
mVideoScheduler.clear();
@@ -156,12 +157,12 @@
void NuPlayer::Renderer::queueBuffer(
bool audio,
- const sp<ABuffer> &buffer,
+ const sp<MediaCodecBuffer> &buffer,
const sp<AMessage> ¬ifyConsumed) {
sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
msg->setInt32("queueGeneration", getQueueGeneration(audio));
msg->setInt32("audio", static_cast<int32_t>(audio));
- msg->setBuffer("buffer", buffer);
+ msg->setObject("buffer", buffer);
msg->setMessage("notifyConsumed", notifyConsumed);
msg->post();
}
@@ -297,7 +298,7 @@
++mVideoDrainGeneration;
}
- clearAnchorTime_l();
+ mMediaClock->clearAnchor();
mVideoLateByUs = 0;
mSyncQueues = false;
}
@@ -374,7 +375,8 @@
}
}
-void NuPlayer::Renderer::clearAnchorTime_l() {
+// Called on renderer looper.
+void NuPlayer::Renderer::clearAnchorTime() {
mMediaClock->clearAnchor();
mAnchorTimeMediaUs = -1;
mAnchorNumFramesWritten = -1;
@@ -395,18 +397,20 @@
bool offloadOnly,
bool hasVideo,
uint32_t flags,
- bool *isOffloaded) {
+ bool *isOffloaded,
+ bool isStreaming) {
sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
msg->setMessage("format", format);
msg->setInt32("offload-only", offloadOnly);
msg->setInt32("has-video", hasVideo);
msg->setInt32("flags", flags);
+ msg->setInt32("isStreaming", isStreaming);
sp<AMessage> response;
- msg->postAndAwaitResponse(&response);
+ status_t postStatus = msg->postAndAwaitResponse(&response);
int32_t err;
- if (!response->findInt32("err", &err)) {
+ if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
err = INVALID_OPERATION;
} else if (err == OK && isOffloaded != NULL) {
int32_t offload;
@@ -423,6 +427,27 @@
msg->postAndAwaitResponse(&response);
}
+void NuPlayer::Renderer::changeAudioFormat(
+ const sp<AMessage> &format,
+ bool offloadOnly,
+ bool hasVideo,
+ uint32_t flags,
+ bool isStreaming,
+ const sp<AMessage> ¬ify) {
+ sp<AMessage> meta = new AMessage;
+ meta->setMessage("format", format);
+ meta->setInt32("offload-only", offloadOnly);
+ meta->setInt32("has-video", hasVideo);
+ meta->setInt32("flags", flags);
+ meta->setInt32("isStreaming", isStreaming);
+
+ sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
+ msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
+ msg->setMessage("notify", notify);
+ msg->setMessage("meta", meta);
+ msg->post();
+}
+
void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatOpenAudioSink:
@@ -439,7 +464,10 @@
uint32_t flags;
CHECK(msg->findInt32("flags", (int32_t *)&flags));
- status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
+ uint32_t isStreaming;
+ CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+ status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
@@ -470,6 +498,41 @@
break;
}
+ case kWhatChangeAudioFormat:
+ {
+ int32_t queueGeneration;
+ CHECK(msg->findInt32("queueGeneration", &queueGeneration));
+
+ sp<AMessage> notify;
+ CHECK(msg->findMessage("notify", ¬ify));
+
+ if (offloadingAudio()) {
+ ALOGW("changeAudioFormat should NOT be called in offload mode");
+ notify->setInt32("err", INVALID_OPERATION);
+ notify->post();
+ break;
+ }
+
+ sp<AMessage> meta;
+ CHECK(msg->findMessage("meta", &meta));
+
+ if (queueGeneration != getQueueGeneration(true /* audio */)
+ || mAudioQueue.empty()) {
+ onChangeAudioFormat(meta, notify);
+ break;
+ }
+
+ QueueEntry entry;
+ entry.mNotifyConsumed = notify;
+ entry.mMeta = meta;
+
+ Mutex::Autolock autoLock(mLock);
+ mAudioQueue.push_back(entry);
+ postDrainAudioQueue_l();
+
+ break;
+ }
+
case kWhatDrainAudioQueue:
{
mDrainAudioQueuePending = false;
@@ -869,7 +932,7 @@
while (it != mAudioQueue.end()) {
int32_t eos;
QueueEntry *entry = &*it++;
- if (entry->mBuffer == NULL
+ if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
|| (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
itEOS = it;
foundEOS = true;
@@ -879,9 +942,14 @@
if (foundEOS) {
// post all replies before EOS and drop the samples
for (it = mAudioQueue.begin(); it != itEOS; it++) {
- if (it->mBuffer == NULL) {
- // delay doesn't matter as we don't even have an AudioTrack
- notifyEOS(true /* audio */, it->mFinalResult);
+ if (it->mBuffer == nullptr) {
+ if (it->mNotifyConsumed == nullptr) {
+ // delay doesn't matter as we don't even have an AudioTrack
+ notifyEOS(true /* audio */, it->mFinalResult);
+ } else {
+ // TAG for re-opening audio sink.
+ onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
+ }
} else {
it->mNotifyConsumed->post();
}
@@ -933,10 +1001,22 @@
while (!mAudioQueue.empty()) {
QueueEntry *entry = &*mAudioQueue.begin();
- mLastAudioBufferDrained = entry->mBufferOrdinal;
-
if (entry->mBuffer == NULL) {
+ if (entry->mNotifyConsumed != nullptr) {
+ // TAG for re-open audio sink.
+ onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
+ mAudioQueue.erase(mAudioQueue.begin());
+ continue;
+ }
+
// EOS
+ if (mPaused) {
+ // Do not notify EOS when paused.
+ // This is needed to avoid switch to next clip while in pause.
+ ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
+ return false;
+ }
+
int64_t postEOSDelayUs = 0;
if (mAudioSink->needsTrailingPadding()) {
postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
@@ -956,6 +1036,8 @@
return false;
}
+ mLastAudioBufferDrained = entry->mBufferOrdinal;
+
// ignore 0-sized buffer which could be EOS marker with no data
if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
int64_t mediaTimeUs;
@@ -1291,8 +1373,7 @@
if (!mVideoSampleReceived && !mHasAudio) {
// This will ensure that the first frame after a flush won't be used as anchor
// when renderer is in paused state, because resume can happen any time after seek.
- Mutex::Autolock autoLock(mLock);
- clearAnchorTime_l();
+ clearAnchorTime();
}
}
@@ -1368,8 +1449,9 @@
}
}
- sp<ABuffer> buffer;
- CHECK(msg->findBuffer("buffer", &buffer));
+ sp<RefBase> obj;
+ CHECK(msg->findObject("buffer", &obj));
+ sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
sp<AMessage> notifyConsumed;
CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed));
@@ -1395,8 +1477,8 @@
return;
}
- sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
- sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
+ sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
+ sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
// EOS signalled on either queue.
@@ -1501,8 +1583,8 @@
// Therefore we'll stop syncing the queues if at least one of them
// is flushed.
syncQueuesDone_l();
- clearAnchorTime_l();
}
+ clearAnchorTime();
ALOGV("flushing %s", audio ? "audio" : "video");
if (audio) {
@@ -1573,6 +1655,9 @@
if (entry->mBuffer != NULL) {
entry->mNotifyConsumed->post();
+ } else if (entry->mNotifyConsumed != nullptr) {
+ // Is it needed to open audio sink now?
+ onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
}
queue->erase(queue->begin());
@@ -1610,10 +1695,7 @@
}
CHECK(!mDrainAudioQueuePending);
mNumFramesWritten = 0;
- {
- Mutex::Autolock autoLock(mLock);
- mAnchorNumFramesWritten = -1;
- }
+ mAnchorNumFramesWritten = -1;
uint32_t written;
if (mAudioSink->getFramesWritten(&written) == OK) {
mNumFramesWritten = written;
@@ -1770,7 +1852,8 @@
const sp<AMessage> &format,
bool offloadOnly,
bool hasVideo,
- uint32_t flags) {
+ uint32_t flags,
+ bool isStreaming) {
ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
offloadOnly, offloadingAudio());
bool audioSinkChanged = false;
@@ -1823,7 +1906,7 @@
offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
offloadInfo.bit_rate = avgBitRate;
offloadInfo.has_video = hasVideo;
- offloadInfo.is_streaming = true;
+ offloadInfo.is_streaming = isStreaming;
if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
ALOGV("openAudioSink: no change in offload mode");
@@ -1961,5 +2044,30 @@
mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
}
+void NuPlayer::Renderer::onChangeAudioFormat(
+ const sp<AMessage> &meta, const sp<AMessage> ¬ify) {
+ sp<AMessage> format;
+ CHECK(meta->findMessage("format", &format));
+
+ int32_t offloadOnly;
+ CHECK(meta->findInt32("offload-only", &offloadOnly));
+
+ int32_t hasVideo;
+ CHECK(meta->findInt32("has-video", &hasVideo));
+
+ uint32_t flags;
+ CHECK(meta->findInt32("flags", (int32_t *)&flags));
+
+ uint32_t isStreaming;
+ CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+ status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
+
+ if (err != OK) {
+ notify->setInt32("err", err);
+ }
+ notify->post();
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index fe7f8fa..e6850b5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -25,9 +25,9 @@
namespace android {
-struct ABuffer;
class AWakeLock;
struct MediaClock;
+class MediaCodecBuffer;
struct VideoFrameScheduler;
struct NuPlayer::Renderer : public AHandler {
@@ -46,7 +46,7 @@
void queueBuffer(
bool audio,
- const sp<ABuffer> &buffer,
+ const sp<MediaCodecBuffer> &buffer,
const sp<AMessage> ¬ifyConsumed);
void queueEOS(bool audio, status_t finalResult);
@@ -60,8 +60,6 @@
void signalTimeDiscontinuity();
- void signalAudioSinkChanged();
-
void signalDisableOffloadAudio();
void signalEnableOffloadAudio();
@@ -78,9 +76,19 @@
bool offloadOnly,
bool hasVideo,
uint32_t flags,
- bool *isOffloaded);
+ bool *isOffloaded,
+ bool isStreaming);
void closeAudioSink();
+ // re-open audio sink after all pending audio buffers played.
+ void changeAudioFormat(
+ const sp<AMessage> &format,
+ bool offloadOnly,
+ bool hasVideo,
+ uint32_t flags,
+ bool isStreaming,
+ const sp<AMessage> ¬ify);
+
enum {
kWhatEOS = 'eos ',
kWhatFlushComplete = 'fluC',
@@ -118,14 +126,19 @@
kWhatResume = 'resm',
kWhatOpenAudioSink = 'opnA',
kWhatCloseAudioSink = 'clsA',
+ kWhatChangeAudioFormat = 'chgA',
kWhatStopAudioSink = 'stpA',
kWhatDisableOffloadAudio = 'noOA',
kWhatEnableOffloadAudio = 'enOA',
kWhatSetVideoFrameRate = 'sVFR',
};
+ // if mBuffer != nullptr, it's a buffer containing real data.
+ // else if mNotifyConsumed == nullptr, it's EOS.
+ // else it's a tag for re-opening audio sink in different format.
struct QueueEntry {
- sp<ABuffer> mBuffer;
+ sp<MediaCodecBuffer> mBuffer;
+ sp<AMessage> mMeta;
sp<AMessage> mNotifyConsumed;
size_t mOffset;
status_t mFinalResult;
@@ -220,7 +233,7 @@
int64_t getPendingAudioPlayoutDurationUs(int64_t nowUs);
void postDrainAudioQueue_l(int64_t delayUs = 0);
- void clearAnchorTime_l();
+ void clearAnchorTime();
void clearAudioFirstAnchorTime_l();
void setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs);
void setVideoLateByUs(int64_t lateUs);
@@ -256,8 +269,10 @@
const sp<AMessage> &format,
bool offloadOnly,
bool hasVideo,
- uint32_t flags);
+ uint32_t flags,
+ bool isStreaming);
void onCloseAudioSink();
+ void onChangeAudioFormat(const sp<AMessage> &meta, const sp<AMessage> ¬ify);
void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0);
void notifyFlushComplete(bool audio);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 438db0c..8ba9c0d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -20,9 +20,10 @@
#include "NuPlayer.h"
+#include <media/ICrypto.h>
+#include <media/mediaplayer.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MetaData.h>
-#include <media/mediaplayer.h>
#include <utils/Vector.h>
namespace android {
@@ -37,8 +38,8 @@
FLAG_CAN_SEEK_FORWARD = 4, // the "10 sec forward button"
FLAG_CAN_SEEK = 8, // the "seek bar"
FLAG_DYNAMIC_DURATION = 16,
- FLAG_SECURE = 32,
- FLAG_PROTECTED = 64,
+ FLAG_SECURE = 32, // Secure codec is required.
+ FLAG_PROTECTED = 64, // The screen needs to be protected (screenshot is disabled).
};
enum {
@@ -55,6 +56,8 @@
kWhatQueueDecoderShutdown,
kWhatDrmNoLicense,
kWhatInstantiateSecureDecoders,
+ // Modular DRM
+ kWhatDrmInfo,
};
// The provides message is used to notify the player about various
@@ -63,6 +66,10 @@
: mNotify(notify) {
}
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) = 0;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
+
virtual void prepareAsync() = 0;
virtual void start() = 0;
@@ -108,7 +115,9 @@
return INVALID_OPERATION;
}
- virtual status_t seekTo(int64_t /* seekTimeUs */) {
+ virtual status_t seekTo(
+ int64_t /* seekTimeUs */,
+ MediaPlayerSeekMode /* mode */ = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) {
return INVALID_OPERATION;
}
@@ -126,6 +135,17 @@
virtual void setOffloadAudio(bool /* offload */) {}
+ // Modular DRM
+ virtual status_t prepareDrm(
+ const uint8_t /*uuid*/[16], const Vector<uint8_t> &/*drmSessionId*/,
+ sp<ICrypto> */*crypto*/) {
+ return INVALID_OPERATION;
+ }
+
+ virtual status_t releaseDrm() {
+ return INVALID_OPERATION;
+ }
+
protected:
virtual ~Source() {}
@@ -137,6 +157,8 @@
void notifyVideoSizeChanged(const sp<AMessage> &format = NULL);
void notifyInstantiateSecureDecoders(const sp<AMessage> &reply);
void notifyPrepared(status_t err = OK);
+ // Modular DRM
+ void notifyDrmInfo(const sp<ABuffer> &buffer);
private:
sp<AMessage> mNotify;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 79e157f..8b3d0dc 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -32,11 +32,11 @@
const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
-// Buffer Underflow/Prepare/StartServer/Overflow Marks
-const int64_t NuPlayer::RTSPSource::kUnderflowMarkUs = 1000000ll;
-const int64_t NuPlayer::RTSPSource::kPrepareMarkUs = 3000000ll;
-const int64_t NuPlayer::RTSPSource::kStartServerMarkUs = 5000000ll;
-const int64_t NuPlayer::RTSPSource::kOverflowMarkUs = 10000000ll;
+// Default Buffer Underflow/Prepare/StartServer/Overflow Marks
+static const int kUnderflowMarkMs = 1000; // 1 second
+static const int kPrepareMarkMs = 3000; // 3 seconds
+//static const int kStartServerMarkMs = 5000;
+static const int kOverflowMarkMs = 10000; // 10 seconds
NuPlayer::RTSPSource::RTSPSource(
const sp<AMessage> ¬ify,
@@ -62,6 +62,7 @@
mSeekGeneration(0),
mEOSTimeoutAudio(0),
mEOSTimeoutVideo(0) {
+ getDefaultBufferingSettings(&mBufferingSettings);
if (headers) {
mExtraHeaders = *headers;
@@ -83,6 +84,34 @@
}
}
+status_t NuPlayer::RTSPSource::getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+ buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+ buffering->mInitialWatermarkMs = kPrepareMarkMs;
+ buffering->mRebufferingWatermarkLowMs = kUnderflowMarkMs;
+ buffering->mRebufferingWatermarkHighMs = kOverflowMarkMs;
+
+ return OK;
+}
+
+status_t NuPlayer::RTSPSource::setBufferingSettings(const BufferingSettings& buffering) {
+ if (mLooper == NULL) {
+ mBufferingSettings = buffering;
+ return OK;
+ }
+
+ sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+ writeToAMessage(msg, buffering);
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+ if (err == OK && response != NULL) {
+ CHECK(response->findInt32("err", &err));
+ }
+
+ return err;
+}
+
void NuPlayer::RTSPSource::prepareAsync() {
if (mIsSDP && mHTTPService == NULL) {
notifyPrepared(BAD_VALUE);
@@ -258,7 +287,7 @@
}
status_t NuPlayer::RTSPSource::getDuration(int64_t *durationUs) {
- *durationUs = 0ll;
+ *durationUs = -1ll;
int64_t audioDurationUs;
if (mAudioTrack != NULL
@@ -279,10 +308,11 @@
return OK;
}
-status_t NuPlayer::RTSPSource::seekTo(int64_t seekTimeUs) {
+status_t NuPlayer::RTSPSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
sp<AMessage> msg = new AMessage(kWhatPerformSeek, this);
msg->setInt32("generation", ++mSeekGeneration);
msg->setInt64("timeUs", seekTimeUs);
+ msg->setInt32("mode", mode);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
@@ -327,7 +357,8 @@
int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
// isFinished when duration is 0 checks for EOS result only
- if (bufferedDurationUs > kPrepareMarkUs || src->isFinished(/* duration */ 0)) {
+ if (bufferedDurationUs > mBufferingSettings.mInitialWatermarkMs * 1000
+ || src->isFinished(/* duration */ 0)) {
++preparedCount;
}
@@ -335,13 +366,16 @@
++overflowCount;
++finishedCount;
} else {
- if (bufferedDurationUs < kUnderflowMarkUs) {
+ if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000) {
++underflowCount;
}
- if (bufferedDurationUs > kOverflowMarkUs) {
+ if (bufferedDurationUs > mBufferingSettings.mRebufferingWatermarkHighMs * 1000) {
++overflowCount;
}
- if (bufferedDurationUs < kStartServerMarkUs) {
+ int64_t startServerMarkUs =
+ (mBufferingSettings.mRebufferingWatermarkLowMs
+ + mBufferingSettings.mRebufferingWatermarkHighMs) / 2 * 1000ll;
+ if (bufferedDurationUs < startServerMarkUs) {
++startCount;
}
}
@@ -465,9 +499,12 @@
}
int64_t seekTimeUs;
+ int32_t mode;
CHECK(msg->findInt64("timeUs", &seekTimeUs));
+ CHECK(msg->findInt32("mode", &mode));
- performSeek(seekTimeUs);
+ // TODO: add "mode" to performSeek.
+ performSeek(seekTimeUs/*, (MediaPlayerSeekMode)mode */);
return;
} else if (msg->what() == kWhatPollBuffering) {
onPollBuffering();
@@ -475,6 +512,36 @@
} else if (msg->what() == kWhatSignalEOS) {
onSignalEOS(msg);
return;
+ } else if (msg->what() == kWhatSetBufferingSettings) {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ BufferingSettings buffering;
+ readFromAMessage(msg, &buffering);
+
+ status_t err = OK;
+ if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+ || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+ || (buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs
+ && buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode))) {
+ err = BAD_VALUE;
+ } else {
+ if (buffering.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+ buffering.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+ }
+ if (buffering.mRebufferingMode == BUFFERING_MODE_NONE) {
+ buffering.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+ buffering.mRebufferingWatermarkHighMs = INT32_MAX;
+ }
+
+ mBufferingSettings = buffering;
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+
+ return;
}
CHECK_EQ(msg->what(), kWhatNotify);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index c7834ef..0812991 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -40,6 +40,10 @@
uid_t uid = 0,
bool isSDP = false);
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
virtual void prepareAsync();
virtual void start();
virtual void stop();
@@ -49,7 +53,9 @@
virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
virtual status_t getDuration(int64_t *durationUs);
- virtual status_t seekTo(int64_t seekTimeUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
void onMessageReceived(const sp<AMessage> &msg);
@@ -65,6 +71,7 @@
kWhatPerformSeek = 'seek',
kWhatPollBuffering = 'poll',
kWhatSignalEOS = 'eos ',
+ kWhatSetBufferingSettings = 'sBuS',
};
enum State {
@@ -79,12 +86,6 @@
kFlagIncognito = 1,
};
- // Buffer Prepare/Underflow/Overflow/Resume Marks
- static const int64_t kPrepareMarkUs;
- static const int64_t kUnderflowMarkUs;
- static const int64_t kOverflowMarkUs;
- static const int64_t kStartServerMarkUs;
-
struct TrackInfo {
sp<AnotherPacketSource> mSource;
@@ -108,6 +109,7 @@
bool mBuffering;
bool mInPreparationPhase;
bool mEOSPending;
+ BufferingSettings mBufferingSettings;
sp<ALooper> mLooper;
sp<MyHandler> mHandler;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index d6b1e8c..fc0803b 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -51,6 +51,22 @@
}
}
+status_t NuPlayer::StreamingSource::getDefaultBufferingSettings(
+ BufferingSettings *buffering /* nonnull */) {
+ *buffering = BufferingSettings();
+ return OK;
+}
+
+status_t NuPlayer::StreamingSource::setBufferingSettings(
+ const BufferingSettings &buffering) {
+ if (buffering.mInitialBufferingMode != BUFFERING_MODE_NONE
+ || buffering.mRebufferingMode != BUFFERING_MODE_NONE) {
+ return BAD_VALUE;
+ }
+
+ return OK;
+}
+
void NuPlayer::StreamingSource::prepareAsync() {
if (mLooper == NULL) {
mLooper = new ALooper;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.h b/media/libmediaplayerservice/nuplayer/StreamingSource.h
index db88c7f..2e1d2b3 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.h
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.h
@@ -32,6 +32,10 @@
const sp<AMessage> ¬ify,
const sp<IStreamSource> &source);
+ virtual status_t getDefaultBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
virtual void prepareAsync();
virtual void start();
diff --git a/media/libmediaplayerservice/tests/Android.mk b/media/libmediaplayerservice/tests/Android.mk
index c0b3265..0b9b85f 100644
--- a/media/libmediaplayerservice/tests/Android.mk
+++ b/media/libmediaplayerservice/tests/Android.mk
@@ -14,6 +14,7 @@
libmediaplayerservice \
libmediadrm \
libutils \
+ android.hardware.drm@1.0 \
LOCAL_C_INCLUDES := \
frameworks/av/include \
diff --git a/media/libnbaio/Android.bp b/media/libnbaio/Android.bp
index 615b541..f511876 100644
--- a/media/libnbaio/Android.bp
+++ b/media/libnbaio/Android.bp
@@ -2,16 +2,15 @@
name: "libnbaio",
srcs: [
"AudioBufferProviderSource.cpp",
- "AudioStreamOutSink.cpp",
"AudioStreamInSource.cpp",
- "NBAIO.cpp",
+ "AudioStreamOutSink.cpp",
"MonoPipe.cpp",
"MonoPipeReader.cpp",
+ "NBAIO.cpp",
+ "NBLog.cpp",
"Pipe.cpp",
"PipeReader.cpp",
"SourceAudioBufferProvider.cpp",
-
- "NBLog.cpp",
],
// libsndfile license is incompatible; uncomment to use for local debug only
@@ -33,4 +32,10 @@
"-Werror",
"-Wall",
],
+
+ include_dirs: ["system/media/audio_utils/include"],
+
+ local_include_dirs: ["include"],
+
+ export_include_dirs: ["include"],
}
diff --git a/media/libnbaio/AudioStreamInSource.cpp b/media/libnbaio/AudioStreamInSource.cpp
index 2dc3050..1054b68 100644
--- a/media/libnbaio/AudioStreamInSource.cpp
+++ b/media/libnbaio/AudioStreamInSource.cpp
@@ -19,33 +19,38 @@
#include <cutils/compiler.h>
#include <utils/Log.h>
+#include <media/audiohal/StreamHalInterface.h>
#include <media/nbaio/AudioStreamInSource.h>
namespace android {
-AudioStreamInSource::AudioStreamInSource(audio_stream_in *stream) :
+AudioStreamInSource::AudioStreamInSource(sp<StreamInHalInterface> stream) :
NBAIO_Source(),
mStream(stream),
mStreamBufferSizeBytes(0),
mFramesOverrun(0),
mOverruns(0)
{
- ALOG_ASSERT(stream != NULL);
+ ALOG_ASSERT(stream != 0);
}
AudioStreamInSource::~AudioStreamInSource()
{
+ mStream.clear();
}
ssize_t AudioStreamInSource::negotiate(const NBAIO_Format offers[], size_t numOffers,
NBAIO_Format counterOffers[], size_t& numCounterOffers)
{
if (!Format_isValid(mFormat)) {
- mStreamBufferSizeBytes = mStream->common.get_buffer_size(&mStream->common);
- audio_format_t streamFormat = mStream->common.get_format(&mStream->common);
- uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common);
- audio_channel_mask_t channelMask =
- (audio_channel_mask_t) mStream->common.get_channels(&mStream->common);
+ status_t result;
+ result = mStream->getBufferSize(&mStreamBufferSizeBytes);
+ if (result != OK) return result;
+ audio_format_t streamFormat;
+ uint32_t sampleRate;
+ audio_channel_mask_t channelMask;
+ result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+ if (result != OK) return result;
mFormat = Format_from_SR_C(sampleRate,
audio_channel_count_from_in_mask(channelMask), streamFormat);
mFrameSize = Format_frameSize(mFormat);
@@ -55,11 +60,14 @@
int64_t AudioStreamInSource::framesOverrun()
{
- uint32_t framesOverrun = mStream->get_input_frames_lost(mStream);
- if (framesOverrun > 0) {
+ uint32_t framesOverrun;
+ status_t result = mStream->getInputFramesLost(&framesOverrun);
+ if (result == OK && framesOverrun > 0) {
mFramesOverrun += framesOverrun;
// FIXME only increment for contiguous ranges
++mOverruns;
+ } else if (result != OK) {
+ ALOGE("Error when retrieving lost frames count from HAL: %d", result);
}
return mFramesOverrun;
}
@@ -69,12 +77,14 @@
if (CC_UNLIKELY(!Format_isValid(mFormat))) {
return NEGOTIATE;
}
- ssize_t bytesRead = mStream->read(mStream, buffer, count * mFrameSize);
- if (bytesRead > 0) {
+ size_t bytesRead;
+ status_t result = mStream->read(buffer, count * mFrameSize, &bytesRead);
+ if (result == OK && bytesRead > 0) {
size_t framesRead = bytesRead / mFrameSize;
mFramesRead += framesRead;
return framesRead;
} else {
+ ALOGE_IF(result != OK, "Error while reading data from HAL: %d", result);
return bytesRead;
}
}
diff --git a/media/libnbaio/AudioStreamOutSink.cpp b/media/libnbaio/AudioStreamOutSink.cpp
index ee44678..cbff87d 100644
--- a/media/libnbaio/AudioStreamOutSink.cpp
+++ b/media/libnbaio/AudioStreamOutSink.cpp
@@ -18,31 +18,36 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <media/audiohal/StreamHalInterface.h>
#include <media/nbaio/AudioStreamOutSink.h>
namespace android {
-AudioStreamOutSink::AudioStreamOutSink(audio_stream_out *stream) :
+AudioStreamOutSink::AudioStreamOutSink(sp<StreamOutHalInterface> stream) :
NBAIO_Sink(),
mStream(stream),
mStreamBufferSizeBytes(0)
{
- ALOG_ASSERT(stream != NULL);
+ ALOG_ASSERT(stream != 0);
}
AudioStreamOutSink::~AudioStreamOutSink()
{
+ mStream.clear();
}
ssize_t AudioStreamOutSink::negotiate(const NBAIO_Format offers[], size_t numOffers,
NBAIO_Format counterOffers[], size_t& numCounterOffers)
{
if (!Format_isValid(mFormat)) {
- mStreamBufferSizeBytes = mStream->common.get_buffer_size(&mStream->common);
- audio_format_t streamFormat = mStream->common.get_format(&mStream->common);
- uint32_t sampleRate = mStream->common.get_sample_rate(&mStream->common);
- audio_channel_mask_t channelMask =
- (audio_channel_mask_t) mStream->common.get_channels(&mStream->common);
+ status_t result;
+ result = mStream->getBufferSize(&mStreamBufferSizeBytes);
+ if (result != OK) return result;
+ audio_format_t streamFormat;
+ uint32_t sampleRate;
+ audio_channel_mask_t channelMask;
+ result = mStream->getAudioProperties(&sampleRate, &channelMask, &streamFormat);
+ if (result != OK) return result;
mFormat = Format_from_SR_C(sampleRate,
audio_channel_count_from_out_mask(channelMask), streamFormat);
mFrameSize = Format_frameSize(mFormat);
@@ -56,25 +61,24 @@
return NEGOTIATE;
}
ALOG_ASSERT(Format_isValid(mFormat));
- ssize_t ret = mStream->write(mStream, buffer, count * mFrameSize);
- if (ret > 0) {
- ret /= mFrameSize;
- mFramesWritten += ret;
+ size_t written;
+ status_t ret = mStream->write(buffer, count * mFrameSize, &written);
+ if (ret == OK && written > 0) {
+ written /= mFrameSize;
+ mFramesWritten += written;
+ return written;
} else {
// FIXME verify HAL implementations are returning the correct error codes e.g. WOULD_BLOCK
+ ALOGE_IF(ret != OK, "Error while writing data to HAL: %d", ret);
+ return ret;
}
- return ret;
}
status_t AudioStreamOutSink::getTimestamp(ExtendedTimestamp ×tamp)
{
- if (mStream->get_presentation_position == NULL) {
- return INVALID_OPERATION;
- }
-
uint64_t position64;
struct timespec time;
- if (mStream->get_presentation_position(mStream, &position64, &time) != OK) {
+ if (mStream->getPresentationPosition(&position64, &time) != OK) {
return INVALID_OPERATION;
}
timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position64;
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 8d1cb0f..3c5df1a 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -19,7 +19,6 @@
#define LOG_TAG "MonoPipe"
//#define LOG_NDEBUG 0
-#include <cutils/atomic.h>
#include <cutils/compiler.h>
#include <utils/Log.h>
#include <utils/Trace.h>
@@ -32,11 +31,11 @@
MonoPipe::MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock) :
NBAIO_Sink(format),
- mReqFrames(reqFrames),
+ // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
mMaxFrames(roundup(reqFrames)),
mBuffer(malloc(mMaxFrames * Format_frameSize(format))),
- mFront(0),
- mRear(0),
+ mFifo(mMaxFrames, Format_frameSize(format), mBuffer, true /*throttlesWriter*/),
+ mFifoWriter(mFifo),
mWriteTsValid(false),
// mWriteTs
mSetpoint((reqFrames * 11) / 16),
@@ -53,14 +52,14 @@
free(mBuffer);
}
-ssize_t MonoPipe::availableToWrite() const
+ssize_t MonoPipe::availableToWrite()
{
if (CC_UNLIKELY(!mNegotiated)) {
return NEGOTIATE;
}
- // uses mMaxFrames not mReqFrames, so allows "over-filling" the pipe beyond requested limit
- ssize_t ret = mMaxFrames - (mRear - android_atomic_acquire_load(&mFront));
- ALOG_ASSERT((0 <= ret) && (ret <= mMaxFrames));
+ // uses mMaxFrames not reqFrames, so allows "over-filling" the pipe beyond requested limit
+ ssize_t ret = mFifoWriter.available();
+ ALOG_ASSERT(ret <= mMaxFrames);
return ret;
}
@@ -71,38 +70,33 @@
}
size_t totalFramesWritten = 0;
while (count > 0) {
- // can't return a negative value, as we already checked for !mNegotiated
- size_t avail = availableToWrite();
- size_t written = avail;
- if (CC_LIKELY(written > count)) {
- written = count;
- }
- size_t rear = mRear & (mMaxFrames - 1);
- size_t part1 = mMaxFrames - rear;
- if (part1 > written) {
- part1 = written;
- }
- if (CC_LIKELY(part1 > 0)) {
- memcpy((char *) mBuffer + (rear * mFrameSize), buffer, part1 * mFrameSize);
- if (CC_UNLIKELY(rear + part1 == mMaxFrames)) {
- size_t part2 = written - part1;
- if (CC_LIKELY(part2 > 0)) {
- memcpy(mBuffer, (char *) buffer + (part1 * mFrameSize), part2 * mFrameSize);
- }
+ ssize_t actual = mFifoWriter.write(buffer, count);
+ ALOG_ASSERT(actual <= count);
+ if (actual < 0) {
+ if (totalFramesWritten == 0) {
+ return actual;
}
- android_atomic_release_store(written + mRear, &mRear);
- totalFramesWritten += written;
+ break;
}
+ size_t written = (size_t) actual;
+ totalFramesWritten += written;
if (!mWriteCanBlock || mIsShutdown) {
break;
}
count -= written;
buffer = (char *) buffer + (written * mFrameSize);
+ // TODO Replace this whole section by audio_util_fifo's setpoint feature.
// Simulate blocking I/O by sleeping at different rates, depending on a throttle.
// The throttle tries to keep the mean pipe depth near the setpoint, with a slight jitter.
uint32_t ns;
if (written > 0) {
- size_t filled = (mMaxFrames - avail) + written;
+ ssize_t avail = mFifoWriter.available();
+ ALOG_ASSERT(avail <= mMaxFrames);
+ if (avail < 0) {
+ // don't return avail as status, because totalFramesWritten > 0
+ break;
+ }
+ size_t filled = mMaxFrames - (size_t) avail;
// FIXME cache these values to avoid re-computation
if (filled <= mSetpoint / 2) {
// pipe is (nearly) empty, fill quickly
diff --git a/media/libnbaio/MonoPipeReader.cpp b/media/libnbaio/MonoPipeReader.cpp
index 01dc524..a9b4d18 100644
--- a/media/libnbaio/MonoPipeReader.cpp
+++ b/media/libnbaio/MonoPipeReader.cpp
@@ -25,7 +25,7 @@
MonoPipeReader::MonoPipeReader(MonoPipe* pipe) :
NBAIO_Source(pipe->mFormat),
- mPipe(pipe)
+ mPipe(pipe), mFifoReader(mPipe->mFifo, true /*throttlesWriter*/)
{
}
@@ -38,38 +38,21 @@
if (CC_UNLIKELY(!mNegotiated)) {
return NEGOTIATE;
}
- ssize_t ret = android_atomic_acquire_load(&mPipe->mRear) - mPipe->mFront;
- ALOG_ASSERT((0 <= ret) && ((size_t) ret <= mPipe->mMaxFrames));
+ ssize_t ret = mFifoReader.available();
+ ALOG_ASSERT(ret <= mPipe->mMaxFrames);
return ret;
}
ssize_t MonoPipeReader::read(void *buffer, size_t count)
{
// count == 0 is unlikely and not worth checking for explicitly; will be handled automatically
- ssize_t red = availableToRead();
- if (CC_UNLIKELY(red <= 0)) {
- return red;
+ ssize_t actual = mFifoReader.read(buffer, count);
+ ALOG_ASSERT(actual <= count);
+ if (CC_UNLIKELY(actual <= 0)) {
+ return actual;
}
- if (CC_LIKELY((size_t) red > count)) {
- red = count;
- }
- size_t front = mPipe->mFront & (mPipe->mMaxFrames - 1);
- size_t part1 = mPipe->mMaxFrames - front;
- if (part1 > (size_t) red) {
- part1 = red;
- }
- if (CC_LIKELY(part1 > 0)) {
- memcpy(buffer, (char *) mPipe->mBuffer + (front * mFrameSize), part1 * mFrameSize);
- if (CC_UNLIKELY(front + part1 == mPipe->mMaxFrames)) {
- size_t part2 = red - part1;
- if (CC_LIKELY(part2 > 0)) {
- memcpy((char *) buffer + (part1 * mFrameSize), mPipe->mBuffer, part2 * mFrameSize);
- }
- }
- android_atomic_release_store(red + mPipe->mFront, &mPipe->mFront);
- mFramesRead += red;
- }
- return red;
+ mFramesRead += (size_t) actual;
+ return actual;
}
void MonoPipeReader::onTimestamp(const ExtendedTimestamp ×tamp)
diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp
index 4d14904..9cccfc4 100644
--- a/media/libnbaio/NBLog.cpp
+++ b/media/libnbaio/NBLog.cpp
@@ -17,17 +17,21 @@
#define LOG_TAG "NBLog"
//#define LOG_NDEBUG 0
+#include <climits>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
+#include <sys/prctl.h>
#include <time.h>
#include <new>
-#include <cutils/atomic.h>
+#include <audio_utils/roundup.h>
#include <media/nbaio/NBLog.h>
#include <utils/Log.h>
#include <utils/String8.h>
+#include <queue>
+
namespace android {
int NBLog::Entry::readAt(size_t offset) const
@@ -47,6 +51,144 @@
// ---------------------------------------------------------------------------
+NBLog::FormatEntry::FormatEntry(const uint8_t *entry) : mEntry(entry) {
+ ALOGW_IF(entry[offsetof(struct entry, type)] != EVENT_START_FMT,
+ "Created format entry with invalid event type %d", entry[offsetof(struct entry, type)]);
+}
+
+NBLog::FormatEntry::FormatEntry(const NBLog::FormatEntry::iterator &it) : FormatEntry(it.ptr) {}
+
+const char *NBLog::FormatEntry::formatString() const {
+ return (const char*) mEntry + offsetof(entry, data);
+}
+
+size_t NBLog::FormatEntry::formatStringLength() const {
+ return mEntry[offsetof(entry, length)];
+}
+
+NBLog::FormatEntry::iterator NBLog::FormatEntry::args() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ // skip timestamp
+ ++it;
+ // Skip author if present
+ if (it->type == EVENT_AUTHOR) {
+ ++it;
+ }
+ return it;
+}
+
+timespec NBLog::FormatEntry::timestamp() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ return it.payload<timespec>();
+}
+
+pid_t NBLog::FormatEntry::author() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ // skip timestamp
+ ++it;
+ // if there is an author entry, return it, return -1 otherwise
+ if (it->type == EVENT_AUTHOR) {
+ return it.payload<int>();
+ }
+ return -1;
+}
+
+NBLog::FormatEntry::iterator NBLog::FormatEntry::copyWithAuthor(
+ std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+ auto it = begin();
+ // copy fmt start entry
+ it.copyTo(dst);
+ // copy timestamp
+ (++it).copyTo(dst);
+ // insert author entry
+ size_t authorEntrySize = NBLog::Entry::kOverhead + sizeof(author);
+ uint8_t authorEntry[authorEntrySize];
+ authorEntry[offsetof(entry, type)] = EVENT_AUTHOR;
+ authorEntry[offsetof(entry, length)] =
+ authorEntry[authorEntrySize + NBLog::Entry::kPreviousLengthOffset] =
+ sizeof(author);
+ *(int*) (&authorEntry[offsetof(entry, data)]) = author;
+ dst->write(authorEntry, authorEntrySize);
+ // copy rest of entries
+ while ((++it)->type != EVENT_END_FMT) {
+ it.copyTo(dst);
+ }
+ it.copyTo(dst);
+ ++it;
+ return it;
+}
+
+void NBLog::FormatEntry::iterator::copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const {
+ size_t length = ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
+ dst->write(ptr, length);
+}
+
+void NBLog::FormatEntry::iterator::copyData(uint8_t *dst) const {
+ memcpy((void*) dst, ptr + offsetof(entry, data), ptr[offsetof(entry, length)]);
+}
+
+NBLog::FormatEntry::iterator NBLog::FormatEntry::begin() const {
+ return iterator(mEntry);
+}
+
+NBLog::FormatEntry::iterator::iterator()
+ : ptr(nullptr) {}
+
+NBLog::FormatEntry::iterator::iterator(const uint8_t *entry)
+ : ptr(entry) {}
+
+NBLog::FormatEntry::iterator::iterator(const NBLog::FormatEntry::iterator &other)
+ : ptr(other.ptr) {}
+
+const NBLog::FormatEntry::entry& NBLog::FormatEntry::iterator::operator*() const {
+ return *(entry*) ptr;
+}
+
+const NBLog::FormatEntry::entry* NBLog::FormatEntry::iterator::operator->() const {
+ return (entry*) ptr;
+}
+
+NBLog::FormatEntry::iterator& NBLog::FormatEntry::iterator::operator++() {
+ ptr += ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
+ return *this;
+}
+
+NBLog::FormatEntry::iterator& NBLog::FormatEntry::iterator::operator--() {
+ ptr -= ptr[NBLog::Entry::kPreviousLengthOffset] + NBLog::Entry::kOverhead;
+ return *this;
+}
+
+NBLog::FormatEntry::iterator NBLog::FormatEntry::iterator::next() const {
+ iterator aux(*this);
+ return ++aux;
+}
+
+NBLog::FormatEntry::iterator NBLog::FormatEntry::iterator::prev() const {
+ iterator aux(*this);
+ return --aux;
+}
+
+int NBLog::FormatEntry::iterator::operator-(const NBLog::FormatEntry::iterator &other) const {
+ return ptr - other.ptr;
+}
+
+bool NBLog::FormatEntry::iterator::operator!=(const iterator &other) const {
+ return ptr != other.ptr;
+}
+
+bool NBLog::FormatEntry::iterator::hasConsistentLength() const {
+ return ptr[offsetof(entry, length)] == ptr[ptr[offsetof(entry, length)] +
+ NBLog::Entry::kOverhead + NBLog::Entry::kPreviousLengthOffset];
+}
+
+// ---------------------------------------------------------------------------
+
#if 0 // FIXME see note in NBLog.h
NBLog::Timeline::Timeline(size_t size, void *shared)
: mSize(roundup(size)), mOwn(shared == NULL),
@@ -67,25 +209,50 @@
/*static*/
size_t NBLog::Timeline::sharedSize(size_t size)
{
+ // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
return sizeof(Shared) + roundup(size);
}
// ---------------------------------------------------------------------------
NBLog::Writer::Writer()
- : mSize(0), mShared(NULL), mRear(0), mEnabled(false)
+ : mShared(NULL), mFifo(NULL), mFifoWriter(NULL), mEnabled(false), mPidTag(NULL), mPidTagSize(0)
{
}
-NBLog::Writer::Writer(size_t size, void *shared)
- : mSize(roundup(size)), mShared((Shared *) shared), mRear(0), mEnabled(mShared != NULL)
+NBLog::Writer::Writer(void *shared, size_t size)
+ : mShared((Shared *) shared),
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL),
+ mEnabled(mFifoWriter != NULL)
{
+ // caching pid and process name
+ pid_t id = ::getpid();
+ char procName[16];
+ int status = prctl(PR_GET_NAME, procName);
+ if (status) { // error getting process name
+ procName[0] = '\0';
+ }
+ size_t length = strlen(procName);
+ mPidTagSize = length + sizeof(pid_t);
+ mPidTag = new char[mPidTagSize];
+ memcpy(mPidTag, &id, sizeof(pid_t));
+ memcpy(mPidTag + sizeof(pid_t), procName, length);
}
-NBLog::Writer::Writer(size_t size, const sp<IMemory>& iMemory)
- : mSize(roundup(size)), mShared(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL),
- mIMemory(iMemory), mRear(0), mEnabled(mShared != NULL)
+NBLog::Writer::Writer(const sp<IMemory>& iMemory, size_t size)
+ : Writer(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
{
+ mIMemory = iMemory;
+}
+
+NBLog::Writer::~Writer()
+{
+ delete mFifoWriter;
+ delete mFifo;
+ delete[] mPidTag;
}
void NBLog::Writer::log(const char *string)
@@ -93,9 +260,10 @@
if (!mEnabled) {
return;
}
+ LOG_ALWAYS_FATAL_IF(string == NULL, "Attempted to log NULL string");
size_t length = strlen(string);
- if (length > 255) {
- length = 255;
+ if (length > Entry::kMaxLength) {
+ length = Entry::kMaxLength;
}
log(EVENT_STRING, string, length);
}
@@ -116,7 +284,7 @@
if (!mEnabled) {
return;
}
- char buffer[256];
+ char buffer[Entry::kMaxLength + 1 /*NUL*/];
int length = vsnprintf(buffer, sizeof(buffer), fmt, ap);
if (length >= (int) sizeof(buffer)) {
length = sizeof(buffer) - 1;
@@ -135,16 +303,130 @@
}
struct timespec ts;
if (!clock_gettime(CLOCK_MONOTONIC, &ts)) {
- log(EVENT_TIMESTAMP, &ts, sizeof(struct timespec));
+ log(EVENT_TIMESTAMP, &ts, sizeof(ts));
}
}
-void NBLog::Writer::logTimestamp(const struct timespec& ts)
+void NBLog::Writer::logTimestamp(const struct timespec &ts)
{
if (!mEnabled) {
return;
}
- log(EVENT_TIMESTAMP, &ts, sizeof(struct timespec));
+ log(EVENT_TIMESTAMP, &ts, sizeof(ts));
+}
+
+void NBLog::Writer::logInteger(const int x)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_INTEGER, &x, sizeof(x));
+}
+
+void NBLog::Writer::logFloat(const float x)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_FLOAT, &x, sizeof(x));
+}
+
+void NBLog::Writer::logPID()
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_PID, mPidTag, mPidTagSize);
+}
+
+void NBLog::Writer::logStart(const char *fmt)
+{
+ if (!mEnabled) {
+ return;
+ }
+ size_t length = strlen(fmt);
+ if (length > Entry::kMaxLength) {
+ length = Entry::kMaxLength;
+ }
+ log(EVENT_START_FMT, fmt, length);
+}
+
+void NBLog::Writer::logEnd()
+{
+ if (!mEnabled) {
+ return;
+ }
+ Entry entry = Entry(EVENT_END_FMT, NULL, 0);
+ log(&entry, true);
+}
+
+void NBLog::Writer::logFormat(const char *fmt, ...)
+{
+ if (!mEnabled) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, fmt);
+ Writer::logVFormat(fmt, ap);
+ va_end(ap);
+}
+
+void NBLog::Writer::logVFormat(const char *fmt, va_list argp)
+{
+ if (!mEnabled) {
+ return;
+ }
+ Writer::logStart(fmt);
+ int i;
+ double f;
+ char* s;
+ struct timespec t;
+ Writer::logTimestamp();
+ for (const char *p = fmt; *p != '\0'; p++) {
+ // TODO: implement more complex formatting such as %.3f
+ if (*p != '%') {
+ continue;
+ }
+ switch(*++p) {
+ case 's': // string
+ s = va_arg(argp, char *);
+ Writer::log(s);
+ break;
+
+ case 't': // timestamp
+ t = va_arg(argp, struct timespec);
+ Writer::logTimestamp(t);
+ break;
+
+ case 'd': // integer
+ i = va_arg(argp, int);
+ Writer::logInteger(i);
+ break;
+
+ case 'f': // float
+ f = va_arg(argp, double); // float arguments are promoted to double in vararg lists
+ Writer::logFloat((float)f);
+ break;
+
+ case 'p': // pid
+ Writer::logPID();
+ break;
+
+ // the "%\0" case finishes parsing
+ case '\0':
+ --p;
+ break;
+
+ case '%':
+ break;
+
+ default:
+ ALOGW("NBLog Writer parsed invalid format specifier: %c", *p);
+ break;
+ }
+ }
+ Writer::logEnd();
}
void NBLog::Writer::log(Event event, const void *data, size_t length)
@@ -152,12 +434,19 @@
if (!mEnabled) {
return;
}
- if (data == NULL || length > 255) {
+ if (data == NULL || length > Entry::kMaxLength) {
+ // TODO Perhaps it makes sense to display truncated data or at least a
+ // message that the data is too long? The current behavior can create
+ // a confusion for a programmer debugging their code.
return;
}
switch (event) {
case EVENT_STRING:
case EVENT_TIMESTAMP:
+ case EVENT_INTEGER:
+ case EVENT_FLOAT:
+ case EVENT_PID:
+ case EVENT_START_FMT:
break;
case EVENT_RESERVED:
default:
@@ -176,26 +465,16 @@
log(entry->mEvent, entry->mData, entry->mLength);
return;
}
- size_t rear = mRear & (mSize - 1);
- size_t written = mSize - rear; // written = number of bytes that have been written so far
- size_t need = entry->mLength + 3; // mEvent, mLength, data[length], mLength
- // need = number of bytes remaining to write
- if (written > need) {
- written = need;
- }
- size_t i;
+ size_t need = entry->mLength + Entry::kOverhead; // mEvent, mLength, data[length], mLength
+ // need = number of bytes remaining to write
+
// FIXME optimize this using memcpy for the data part of the Entry.
// The Entry could have a method copyTo(ptr, offset, size) to optimize the copy.
- for (i = 0; i < written; ++i) {
- mShared->mBuffer[rear + i] = entry->readAt(i);
+ uint8_t temp[Entry::kMaxLength + Entry::kOverhead];
+ for (size_t i = 0; i < need; i++) {
+ temp[i] = entry->readAt(i);
}
- if (rear + written == mSize && (need -= written) > 0) {
- for (i = 0; i < need; ++i) {
- mShared->mBuffer[i] = entry->readAt(written + i);
- }
- written += need;
- }
- android_atomic_release_store(mRear += written, &mShared->mRear);
+ mFifoWriter->write(temp, need);
}
bool NBLog::Writer::isEnabled() const
@@ -217,8 +496,8 @@
{
}
-NBLog::LockedWriter::LockedWriter(size_t size, void *shared)
- : Writer(size, shared)
+NBLog::LockedWriter::LockedWriter(void *shared, size_t size)
+ : Writer(shared, size)
{
}
@@ -252,12 +531,43 @@
Writer::logTimestamp();
}
-void NBLog::LockedWriter::logTimestamp(const struct timespec& ts)
+void NBLog::LockedWriter::logTimestamp(const struct timespec &ts)
{
Mutex::Autolock _l(mLock);
Writer::logTimestamp(ts);
}
+void NBLog::LockedWriter::logInteger(const int x)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logInteger(x);
+}
+
+void NBLog::LockedWriter::logFloat(const float x)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logFloat(x);
+}
+
+void NBLog::LockedWriter::logPID()
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logPID();
+}
+
+void NBLog::LockedWriter::logStart(const char *fmt)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logStart(fmt);
+}
+
+
+void NBLog::LockedWriter::logEnd()
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logEnd();
+}
+
bool NBLog::LockedWriter::isEnabled() const
{
Mutex::Autolock _l(mLock);
@@ -272,82 +582,144 @@
// ---------------------------------------------------------------------------
-NBLog::Reader::Reader(size_t size, const void *shared)
- : mSize(roundup(size)), mShared((const Shared *) shared), mFront(0)
+NBLog::Reader::Reader(const void *shared, size_t size)
+ : mShared((/*const*/ Shared *) shared), /*mIMemory*/
+ mFd(-1), mIndent(0),
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoReader(mFifo != NULL ? new audio_utils_fifo_reader(*mFifo) : NULL)
{
}
-NBLog::Reader::Reader(size_t size, const sp<IMemory>& iMemory)
- : mSize(roundup(size)), mShared(iMemory != 0 ? (const Shared *) iMemory->pointer() : NULL),
- mIMemory(iMemory), mFront(0)
+NBLog::Reader::Reader(const sp<IMemory>& iMemory, size_t size)
+ : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
{
+ mIMemory = iMemory;
}
-void NBLog::Reader::dump(int fd, size_t indent)
+NBLog::Reader::~Reader()
{
- int32_t rear = android_atomic_acquire_load(&mShared->mRear);
- size_t avail = rear - mFront;
- if (avail == 0) {
- return;
+ delete mFifoReader;
+ delete mFifo;
+}
+
+uint8_t *NBLog::Reader::findLastEntryOfType(uint8_t *front, uint8_t *back, uint8_t type) {
+ while (back + Entry::kPreviousLengthOffset >= front) {
+ uint8_t *prev = back - back[Entry::kPreviousLengthOffset] - Entry::kOverhead;
+ if (prev < front || prev + prev[offsetof(FormatEntry::entry, length)] +
+ Entry::kOverhead != back) {
+
+ // prev points to an out of limits or inconsistent entry
+ return nullptr;
+ }
+ if (prev[offsetof(FormatEntry::entry, type)] == type) {
+ return prev;
+ }
+ back = prev;
}
- size_t lost = 0;
- if (avail > mSize) {
- lost = avail - mSize;
- mFront += lost;
- avail = mSize;
- }
- size_t remaining = avail; // remaining = number of bytes left to read
- size_t front = mFront & (mSize - 1);
- size_t read = mSize - front; // read = number of bytes that have been read so far
- if (read > remaining) {
- read = remaining;
+ return nullptr; // no entry found
+}
+
+std::unique_ptr<NBLog::Reader::Snapshot> NBLog::Reader::getSnapshot()
+{
+ if (mFifoReader == NULL) {
+ return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
}
// make a copy to avoid race condition with writer
- uint8_t *copy = new uint8_t[avail];
- // copy first part of circular buffer up until the wraparound point
- memcpy(copy, &mShared->mBuffer[front], read);
- if (front + read == mSize) {
- if ((remaining -= read) > 0) {
- // copy second part of circular buffer starting at beginning
- memcpy(©[read], mShared->mBuffer, remaining);
- read += remaining;
- // remaining = 0 but not necessary
+ size_t capacity = mFifo->capacity();
+
+ // This emulates the behaviour of audio_utils_fifo_reader::read, but without incrementing the
+ // reader index. The index is incremented after handling corruption, to after the last complete
+ // entry of the buffer
+ size_t lost;
+ audio_utils_iovec iovec[2];
+ ssize_t availToRead = mFifoReader->obtain(iovec, capacity, NULL /*timeout*/, &lost);
+ if (availToRead <= 0) {
+ return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
+ }
+
+ std::unique_ptr<Snapshot> snapshot(new Snapshot(availToRead));
+ memcpy(snapshot->mData, (const char *) mFifo->buffer() + iovec[0].mOffset, iovec[0].mLength);
+ if (iovec[1].mLength > 0) {
+ memcpy(snapshot->mData + (iovec[0].mLength),
+ (const char *) mFifo->buffer() + iovec[1].mOffset, iovec[1].mLength);
+ }
+
+ // Handle corrupted buffer
+ // Potentially, a buffer has corrupted data on both beginning (due to overflow) and end
+ // (due to incomplete format entry). But even if the end format entry is incomplete,
+ // it ends in a complete entry (which is not an END_FMT). So is safe to traverse backwards.
+ // TODO: handle client corruption (in the middle of a buffer)
+
+ uint8_t *back = snapshot->mData + availToRead;
+ uint8_t *front = snapshot->mData;
+
+ // Find last END_FMT. <back> is sitting on an entry which might be the middle of a FormatEntry.
+ // We go backwards until we find an EVENT_END_FMT.
+ uint8_t *lastEnd = findLastEntryOfType(front, back, EVENT_END_FMT);
+ if (lastEnd == nullptr) {
+ snapshot->mEnd = snapshot->mBegin = FormatEntry::iterator(front);
+ } else {
+ // end of snapshot points to after last END_FMT entry
+ snapshot->mEnd = FormatEntry::iterator(lastEnd + Entry::kOverhead);
+ // find first START_FMT
+ uint8_t *firstStart = nullptr;
+ uint8_t *firstStartTmp = lastEnd;
+ while ((firstStartTmp = findLastEntryOfType(front, firstStartTmp, EVENT_START_FMT))
+ != nullptr) {
+ firstStart = firstStartTmp;
+ }
+ // firstStart is null if no START_FMT entry was found before lastEnd
+ if (firstStart == nullptr) {
+ snapshot->mBegin = snapshot->mEnd;
+ } else {
+ snapshot->mBegin = FormatEntry::iterator(firstStart);
}
}
- mFront += read;
- size_t i = avail;
- Event event;
- size_t length;
+
+ // advance fifo reader index to after last entry read.
+ mFifoReader->release(snapshot->mEnd - front);
+
+ snapshot->mLost = lost;
+ return snapshot;
+
+}
+
+void NBLog::Reader::dump(int fd, size_t indent, NBLog::Reader::Snapshot &snapshot)
+{
+#if 0
struct timespec ts;
time_t maxSec = -1;
- while (i >= 3) {
- length = copy[i - 1];
- if (length + 3 > i || copy[i - length - 2] != length) {
+ while (entry - start >= (int) Entry::kOverhead) {
+ if (prevEntry - start < 0 || !prevEntry.hasConsistentLength()) {
break;
}
- event = (Event) copy[i - length - 3];
- if (event == EVENT_TIMESTAMP) {
- if (length != sizeof(struct timespec)) {
+ if (prevEntry->type == EVENT_TIMESTAMP) {
+ if (prevEntry->length != sizeof(struct timespec)) {
// corrupt
break;
}
- memcpy(&ts, ©[i - length - 1], sizeof(struct timespec));
+ prevEntry.copyData((uint8_t*) &ts);
if (ts.tv_sec > maxSec) {
maxSec = ts.tv_sec;
}
}
- i -= length + 3;
+ --entry;
+ --prevEntry;
}
+#endif
mFd = fd;
mIndent = indent;
String8 timestamp, body;
- lost += i;
+ size_t lost = snapshot.lost() + (snapshot.begin() - FormatEntry::iterator(snapshot.data()));
if (lost > 0) {
body.appendFormat("warning: lost %zu bytes worth of events", lost);
// TODO timestamp empty here, only other choice to wait for the first timestamp event in the
// log to push it out. Consider keeping the timestamp/body between calls to readAt().
dumpLine(timestamp, body);
}
+#if 0
size_t width = 1;
while (maxSec >= 10) {
++width;
@@ -357,30 +729,28 @@
timestamp.appendFormat("[%*s]", (int) width + 4, "");
}
bool deferredTimestamp = false;
- while (i < avail) {
- event = (Event) copy[i];
- length = copy[i + 1];
- const void *data = ©[i + 2];
- size_t advance = length + 3;
- switch (event) {
+#endif
+ for (auto entry = snapshot.begin(); entry != snapshot.end();) {
+ switch (entry->type) {
+#if 0
case EVENT_STRING:
- body.appendFormat("%.*s", (int) length, (const char *) data);
+ body.appendFormat("%.*s", (int) entry.length(), entry.data());
break;
case EVENT_TIMESTAMP: {
// already checked that length == sizeof(struct timespec);
- memcpy(&ts, data, sizeof(struct timespec));
+ entry.copyData((const uint8_t*) &ts);
long prevNsec = ts.tv_nsec;
long deltaMin = LONG_MAX;
long deltaMax = -1;
long deltaTotal = 0;
- size_t j = i;
+ auto aux(entry);
for (;;) {
- j += sizeof(struct timespec) + 3;
- if (j >= avail || (Event) copy[j] != EVENT_TIMESTAMP) {
+ ++aux;
+ if (end - aux >= 0 || aux.type() != EVENT_TIMESTAMP) {
break;
}
struct timespec tsNext;
- memcpy(&tsNext, ©[j + 2], sizeof(struct timespec));
+ aux.copyData((const uint8_t*) &tsNext);
if (tsNext.tv_sec != ts.tv_sec) {
break;
}
@@ -397,7 +767,7 @@
deltaTotal += delta;
prevNsec = tsNext.tv_nsec;
}
- size_t n = (j - i) / (sizeof(struct timespec) + 3);
+ size_t n = (aux - entry) / (sizeof(struct timespec) + 3 /*Entry::kOverhead?*/);
if (deferredTimestamp) {
dumpLine(timestamp, body);
deferredTimestamp = false;
@@ -408,34 +778,58 @@
(int) ts.tv_sec, (int) (ts.tv_nsec / 1000000),
(int) ((ts.tv_nsec + deltaTotal) / 1000000),
(int) (deltaMin / 1000000), (int) (deltaMax / 1000000));
- i = j;
- advance = 0;
+ entry = aux;
+ // advance = 0;
break;
}
timestamp.appendFormat("[%d.%03d]", (int) ts.tv_sec,
(int) (ts.tv_nsec / 1000000));
deferredTimestamp = true;
- } break;
+ }
+ break;
+ case EVENT_INTEGER:
+ appendInt(&body, entry.data());
+ break;
+ case EVENT_FLOAT:
+ appendFloat(&body, entry.data());
+ break;
+ case EVENT_PID:
+ appendPID(&body, entry.data(), entry.length());
+ break;
+#endif
+ case EVENT_START_FMT:
+ // right now, this is the only supported case
+ entry = handleFormat(FormatEntry(entry), ×tamp, &body);
+ break;
+ case EVENT_END_FMT:
+ body.appendFormat("warning: got to end format event");
+ ++entry;
+ break;
case EVENT_RESERVED:
default:
- body.appendFormat("warning: unknown event %d", event);
+ body.appendFormat("warning: unexpected event %d", entry->type);
+ ++entry;
break;
}
- i += advance;
if (!body.isEmpty()) {
dumpLine(timestamp, body);
- deferredTimestamp = false;
+ // deferredTimestamp = false;
}
}
- if (deferredTimestamp) {
- dumpLine(timestamp, body);
- }
- // FIXME it would be more efficient to put a char mCopy[256] as a member variable of the dumper
- delete[] copy;
+ // if (deferredTimestamp) {
+ // dumpLine(timestamp, body);
+ // }
}
-void NBLog::Reader::dumpLine(const String8& timestamp, String8& body)
+void NBLog::Reader::dump(int fd, size_t indent)
+{
+ // get a snapshot, dump it
+ std::unique_ptr<Snapshot> snap = getSnapshot();
+ dump(fd, indent, *snap);
+}
+
+void NBLog::Reader::dumpLine(const String8 ×tamp, String8 &body)
{
if (mFd >= 0) {
dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
@@ -450,4 +844,234 @@
return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
}
+void NBLog::appendTimestamp(String8 *body, const void *data) {
+ struct timespec ts;
+ memcpy(&ts, data, sizeof(struct timespec));
+ body->appendFormat("[%d.%03d]", (int) ts.tv_sec,
+ (int) (ts.tv_nsec / 1000000));
+}
+
+void NBLog::appendInt(String8 *body, const void *data) {
+ int x = *((int*) data);
+ body->appendFormat("<%d>", x);
+}
+
+void NBLog::appendFloat(String8 *body, const void *data) {
+ float f;
+ memcpy(&f, data, sizeof(float));
+ body->appendFormat("<%f>", f);
+}
+
+void NBLog::appendPID(String8 *body, const void* data, size_t length) {
+ pid_t id = *((pid_t*) data);
+ char * name = &((char*) data)[sizeof(pid_t)];
+ body->appendFormat("<PID: %d, name: %.*s>", id, (int) (length - sizeof(pid_t)), name);
+}
+
+NBLog::FormatEntry::iterator NBLog::Reader::handleFormat(const FormatEntry &fmtEntry,
+ String8 *timestamp,
+ String8 *body) {
+ // log timestamp
+ struct timespec ts = fmtEntry.timestamp();
+ timestamp->clear();
+ timestamp->appendFormat("[%d.%03d]", (int) ts.tv_sec,
+ (int) (ts.tv_nsec / 1000000));
+
+ // log author (if present)
+ handleAuthor(fmtEntry, body);
+
+ // log string
+ NBLog::FormatEntry::iterator arg = fmtEntry.args();
+
+ const char* fmt = fmtEntry.formatString();
+ size_t fmt_length = fmtEntry.formatStringLength();
+
+ for (size_t fmt_offset = 0; fmt_offset < fmt_length; ++fmt_offset) {
+ if (fmt[fmt_offset] != '%') {
+ body->append(&fmt[fmt_offset], 1); // TODO optimize to write consecutive strings at once
+ continue;
+ }
+ // case "%%""
+ if (fmt[++fmt_offset] == '%') {
+ body->append("%");
+ continue;
+ }
+ // case "%\0"
+ if (fmt_offset == fmt_length) {
+ continue;
+ }
+
+ NBLog::Event event = (NBLog::Event) arg->type;
+ size_t length = arg->length;
+
+ // TODO check length for event type is correct
+
+ if (event == EVENT_END_FMT) {
+ break;
+ }
+
+ // TODO: implement more complex formatting such as %.3f
+ const uint8_t *datum = arg->data; // pointer to the current event args
+ switch(fmt[fmt_offset])
+ {
+ case 's': // string
+ ALOGW_IF(event != EVENT_STRING,
+ "NBLog Reader incompatible event for string specifier: %d", event);
+ body->append((const char*) datum, length);
+ break;
+
+ case 't': // timestamp
+ ALOGW_IF(event != EVENT_TIMESTAMP,
+ "NBLog Reader incompatible event for timestamp specifier: %d", event);
+ appendTimestamp(body, datum);
+ break;
+
+ case 'd': // integer
+ ALOGW_IF(event != EVENT_INTEGER,
+ "NBLog Reader incompatible event for integer specifier: %d", event);
+ appendInt(body, datum);
+ break;
+
+ case 'f': // float
+ ALOGW_IF(event != EVENT_FLOAT,
+ "NBLog Reader incompatible event for float specifier: %d", event);
+ appendFloat(body, datum);
+ break;
+
+ case 'p': // pid
+ ALOGW_IF(event != EVENT_PID,
+ "NBLog Reader incompatible event for pid specifier: %d", event);
+ appendPID(body, datum, length);
+ break;
+
+ default:
+ ALOGW("NBLog Reader encountered unknown character %c", fmt[fmt_offset]);
+ }
+ ++arg;
+ }
+ ALOGW_IF(arg->type != EVENT_END_FMT, "Expected end of format, got %d", arg->type);
+ ++arg;
+ return arg;
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::Merger::Merger(const void *shared, size_t size):
+ mShared((Shared *) shared),
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL)
+ {}
+
+void NBLog::Merger::addReader(const NBLog::NamedReader &reader) {
+ mNamedReaders.push_back(reader);
+}
+
+// items placed in priority queue during merge
+// composed by a timestamp and the index of the snapshot where the timestamp came from
+struct MergeItem
+{
+ struct timespec ts;
+ int index;
+ MergeItem(struct timespec ts, int index): ts(ts), index(index) {}
+};
+
+// operators needed for priority queue in merge
+bool operator>(const struct timespec &t1, const struct timespec &t2) {
+ return t1.tv_sec > t2.tv_sec || (t1.tv_sec == t2.tv_sec && t1.tv_nsec > t2.tv_nsec);
+}
+
+bool operator>(const struct MergeItem &i1, const struct MergeItem &i2) {
+ return i1.ts > i2.ts ||
+ (i1.ts.tv_sec == i2.ts.tv_sec && i1.ts.tv_nsec == i2.ts.tv_nsec && i1.index > i2.index);
+}
+
+// Merge registered readers, sorted by timestamp
+void NBLog::Merger::merge() {
+ int nLogs = mNamedReaders.size();
+ std::vector<std::unique_ptr<NBLog::Reader::Snapshot>> snapshots(nLogs);
+ std::vector<NBLog::FormatEntry::iterator> offsets(nLogs);
+ for (int i = 0; i < nLogs; ++i) {
+ snapshots[i] = mNamedReaders[i].reader()->getSnapshot();
+ offsets[i] = snapshots[i]->begin();
+ }
+ // initialize offsets
+ // TODO custom heap implementation could allow to update top, improving performance
+ // for bursty buffers
+ std::priority_queue<MergeItem, std::vector<MergeItem>, std::greater<MergeItem>> timestamps;
+ for (int i = 0; i < nLogs; ++i)
+ {
+ if (offsets[i] != snapshots[i]->end()) {
+ timespec ts = FormatEntry(offsets[i]).timestamp();
+ timestamps.emplace(ts, i);
+ }
+ }
+
+ while (!timestamps.empty()) {
+ // find minimum timestamp
+ int index = timestamps.top().index;
+ // copy it to the log, increasing offset
+ offsets[index] = FormatEntry(offsets[index]).copyWithAuthor(mFifoWriter, index);
+ // update data structures
+ timestamps.pop();
+ if (offsets[index] != snapshots[index]->end()) {
+ timespec ts = FormatEntry(offsets[index]).timestamp();
+ timestamps.emplace(ts, index);
+ }
+ }
+}
+
+const std::vector<NBLog::NamedReader> *NBLog::Merger::getNamedReaders() const {
+ return &mNamedReaders;
+}
+
+NBLog::MergeReader::MergeReader(const void *shared, size_t size, Merger &merger)
+ : Reader(shared, size), mNamedReaders(merger.getNamedReaders()) {}
+
+size_t NBLog::MergeReader::handleAuthor(const NBLog::FormatEntry &fmtEntry, String8 *body) {
+ int author = fmtEntry.author();
+ const char* name = (*mNamedReaders)[author].name();
+ body->appendFormat("%s: ", name);
+ return NBLog::Entry::kOverhead + sizeof(author);
+}
+
+NBLog::MergeThread::MergeThread(NBLog::Merger &merger)
+ : mMerger(merger),
+ mTimeoutUs(0) {}
+
+NBLog::MergeThread::~MergeThread() {
+ // set exit flag, set timeout to 0 to force threadLoop to exit and wait for the thread to join
+ requestExit();
+ setTimeoutUs(0);
+ join();
+}
+
+bool NBLog::MergeThread::threadLoop() {
+ bool doMerge;
+ {
+ AutoMutex _l(mMutex);
+ // If mTimeoutUs is negative, wait on the condition variable until it's positive.
+ // If it's positive, wait kThreadSleepPeriodUs and then merge
+ nsecs_t waitTime = mTimeoutUs > 0 ? kThreadSleepPeriodUs * 1000 : LLONG_MAX;
+ mCond.waitRelative(mMutex, waitTime);
+ doMerge = mTimeoutUs > 0;
+ mTimeoutUs -= kThreadSleepPeriodUs;
+ }
+ if (doMerge) {
+ mMerger.merge();
+ }
+ return true;
+}
+
+void NBLog::MergeThread::wakeup() {
+ setTimeoutUs(kThreadWakeupPeriodUs);
+}
+
+void NBLog::MergeThread::setTimeoutUs(int time) {
+ AutoMutex _l(mMutex);
+ mTimeoutUs = time;
+ mCond.signal();
+}
+
} // namespace android
diff --git a/media/libnbaio/Pipe.cpp b/media/libnbaio/Pipe.cpp
index 13f211d..39df3f4 100644
--- a/media/libnbaio/Pipe.cpp
+++ b/media/libnbaio/Pipe.cpp
@@ -27,9 +27,11 @@
Pipe::Pipe(size_t maxFrames, const NBAIO_Format& format, void *buffer) :
NBAIO_Sink(format),
+ // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
mMaxFrames(roundup(maxFrames)),
mBuffer(buffer == NULL ? malloc(mMaxFrames * Format_frameSize(format)) : buffer),
- mRear(0),
+ mFifo(mMaxFrames, Format_frameSize(format), mBuffer, false /*throttlesWriter*/),
+ mFifoWriter(mFifo),
mReaders(0),
mFreeBufferInDestructor(buffer == NULL)
{
@@ -49,25 +51,13 @@
if (CC_UNLIKELY(!mNegotiated)) {
return NEGOTIATE;
}
- // write() is not multi-thread safe w.r.t. itself, so no mutex or atomic op needed to read mRear
- size_t rear = mRear & (mMaxFrames - 1);
- size_t written = mMaxFrames - rear;
- if (CC_LIKELY(written > count)) {
- written = count;
+ ssize_t actual = mFifoWriter.write(buffer, count);
+ ALOG_ASSERT(actual <= count);
+ if (actual <= 0) {
+ return actual;
}
- memcpy((char *) mBuffer + (rear * mFrameSize), buffer, written * mFrameSize);
- if (CC_UNLIKELY(rear + written == mMaxFrames)) {
- if (CC_UNLIKELY((count -= written) > rear)) {
- count = rear;
- }
- if (CC_LIKELY(count > 0)) {
- memcpy(mBuffer, (char *) buffer + (written * mFrameSize), count * mFrameSize);
- written += count;
- }
- }
- android_atomic_release_store(written + mRear, &mRear);
- mFramesWritten += written;
- return written;
+ mFramesWritten += (size_t) actual;
+ return actual;
}
} // namespace android
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index a879647..2486b76 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include <cutils/compiler.h>
+#include <cutils/atomic.h>
#include <utils/Log.h>
#include <media/nbaio/PipeReader.h>
@@ -25,9 +26,7 @@
PipeReader::PipeReader(Pipe& pipe) :
NBAIO_Source(pipe.mFormat),
- mPipe(pipe),
- // any data already in the pipe is not visible to this PipeReader
- mFront(android_atomic_acquire_load(&pipe.mRear)),
+ mPipe(pipe), mFifoReader(mPipe.mFifo, false /*throttlesWriter*/, true /*flush*/),
mFramesOverrun(0),
mOverruns(0)
{
@@ -50,51 +49,50 @@
if (CC_UNLIKELY(!mNegotiated)) {
return NEGOTIATE;
}
- int32_t rear = android_atomic_acquire_load(&mPipe.mRear);
- // read() is not multi-thread safe w.r.t. itself, so no mutex or atomic op needed to read mFront
- size_t avail = rear - mFront;
- if (CC_UNLIKELY(avail > mPipe.mMaxFrames)) {
- // Discard 1/16 of the most recent data in pipe to avoid another overrun immediately
- int32_t oldFront = mFront;
- mFront = rear - mPipe.mMaxFrames + (mPipe.mMaxFrames >> 4);
- mFramesOverrun += (size_t) (mFront - oldFront);
+ size_t lost;
+ ssize_t avail = mFifoReader.available(&lost);
+ if (avail == -EOVERFLOW || lost > 0) {
+ mFramesOverrun += lost;
++mOverruns;
- return OVERRUN;
+ avail = OVERRUN;
}
return avail;
}
ssize_t PipeReader::read(void *buffer, size_t count)
{
- ssize_t avail = availableToRead();
- if (CC_UNLIKELY(avail <= 0)) {
- return avail;
+ size_t lost;
+ ssize_t actual = mFifoReader.read(buffer, count, NULL /*timeout*/, &lost);
+ ALOG_ASSERT(actual <= count);
+ if (actual == -EOVERFLOW || lost > 0) {
+ mFramesOverrun += lost;
+ ++mOverruns;
+ actual = OVERRUN;
}
- // An overrun can occur from here on and be silently ignored,
- // but it will be caught at next read()
- if (CC_LIKELY(count > (size_t) avail)) {
- count = avail;
+ if (actual <= 0) {
+ return actual;
}
- size_t front = mFront & (mPipe.mMaxFrames - 1);
- size_t red = mPipe.mMaxFrames - front;
- if (CC_LIKELY(red > count)) {
- red = count;
+ mFramesRead += (size_t) actual;
+ return actual;
+}
+
+ssize_t PipeReader::flush()
+{
+ if (CC_UNLIKELY(!mNegotiated)) {
+ return NEGOTIATE;
}
- // In particular, an overrun during the memcpy will result in reading corrupt data
- memcpy(buffer, (char *) mPipe.mBuffer + (front * mFrameSize), red * mFrameSize);
- // We could re-read the rear pointer here to detect the corruption, but why bother?
- if (CC_UNLIKELY(front + red == mPipe.mMaxFrames)) {
- if (CC_UNLIKELY((count -= red) > front)) {
- count = front;
- }
- if (CC_LIKELY(count > 0)) {
- memcpy((char *) buffer + (red * mFrameSize), mPipe.mBuffer, count * mFrameSize);
- red += count;
- }
+ size_t lost;
+ ssize_t flushed = mFifoReader.flush(&lost);
+ if (flushed == -EOVERFLOW || lost > 0) {
+ mFramesOverrun += lost;
+ ++mOverruns;
+ flushed = OVERRUN;
}
- mFront += red;
- mFramesRead += red;
- return red;
+ if (flushed <= 0) {
+ return flushed;
+ }
+ mFramesRead += (size_t) flushed; // we consider flushed frames as read, but not lost frames
+ return flushed;
}
} // namespace android
diff --git a/include/media/nbaio/AudioBufferProviderSource.h b/media/libnbaio/include/AudioBufferProviderSource.h
similarity index 100%
rename from include/media/nbaio/AudioBufferProviderSource.h
rename to media/libnbaio/include/AudioBufferProviderSource.h
diff --git a/media/libnbaio/include/AudioStreamInSource.h b/media/libnbaio/include/AudioStreamInSource.h
new file mode 100644
index 0000000..508e0fe
--- /dev/null
+++ b/media/libnbaio/include/AudioStreamInSource.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_STREAM_IN_SOURCE_H
+#define ANDROID_AUDIO_STREAM_IN_SOURCE_H
+
+#include "NBAIO.h"
+
+namespace android {
+
+class StreamInHalInterface;
+
+// not multi-thread safe
+class AudioStreamInSource : public NBAIO_Source {
+
+public:
+ AudioStreamInSource(sp<StreamInHalInterface> stream);
+ virtual ~AudioStreamInSource();
+
+ // NBAIO_Port interface
+
+ virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
+ NBAIO_Format counterOffers[], size_t& numCounterOffers);
+ //virtual NBAIO_Format format() const;
+
+ // NBAIO_Sink interface
+
+ //virtual size_t framesRead() const;
+ virtual int64_t framesOverrun();
+ virtual int64_t overruns() { (void) framesOverrun(); return mOverruns; }
+
+ // This is an over-estimate, and could dupe the caller into making a blocking read()
+ // FIXME Use an audio HAL API to query the buffer filling status when it's available.
+ virtual ssize_t availableToRead() { return mStreamBufferSizeBytes / mFrameSize; }
+
+ virtual ssize_t read(void *buffer, size_t count);
+
+ // NBAIO_Sink end
+
+#if 0 // until necessary
+ sp<StreamInHalInterface> stream() const { return mStream; }
+#endif
+
+private:
+ sp<StreamInHalInterface> mStream;
+ size_t mStreamBufferSizeBytes; // as reported by get_buffer_size()
+ int64_t mFramesOverrun;
+ int64_t mOverruns;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_STREAM_IN_SOURCE_H
diff --git a/media/libnbaio/include/AudioStreamOutSink.h b/media/libnbaio/include/AudioStreamOutSink.h
new file mode 100644
index 0000000..56a2a38
--- /dev/null
+++ b/media/libnbaio/include/AudioStreamOutSink.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_STREAM_OUT_SINK_H
+#define ANDROID_AUDIO_STREAM_OUT_SINK_H
+
+#include "NBAIO.h"
+
+namespace android {
+
+class StreamOutHalInterface;
+
+// not multi-thread safe
+class AudioStreamOutSink : public NBAIO_Sink {
+
+public:
+ AudioStreamOutSink(sp<StreamOutHalInterface> stream);
+ virtual ~AudioStreamOutSink();
+
+ // NBAIO_Port interface
+
+ virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
+ NBAIO_Format counterOffers[], size_t& numCounterOffers);
+ //virtual NBAIO_Format format();
+
+ // NBAIO_Sink interface
+
+ //virtual size_t framesWritten() const;
+ //virtual size_t framesUnderrun() const;
+ //virtual size_t underruns() const;
+
+ // This is an over-estimate, and could dupe the caller into making a blocking write()
+ // FIXME Use an audio HAL API to query the buffer emptying status when it's available.
+ virtual ssize_t availableToWrite() { return mStreamBufferSizeBytes / mFrameSize; }
+
+ virtual ssize_t write(const void *buffer, size_t count);
+
+ virtual status_t getTimestamp(ExtendedTimestamp ×tamp);
+
+ // NBAIO_Sink end
+
+#if 0 // until necessary
+ sp<StreamOutHalInterface> stream() const { return mStream; }
+#endif
+
+private:
+ sp<StreamOutHalInterface> mStream;
+ size_t mStreamBufferSizeBytes; // as reported by get_buffer_size()
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_STREAM_OUT_SINK_H
diff --git a/media/libnbaio/include/LibsndfileSink.h b/media/libnbaio/include/LibsndfileSink.h
new file mode 100644
index 0000000..97a57e03
--- /dev/null
+++ b/media/libnbaio/include/LibsndfileSink.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_LIBSNDFILE_SINK_H
+#define ANDROID_AUDIO_LIBSNDFILE_SINK_H
+
+#include "NBAIO.h"
+#include "sndfile.h"
+
+// Implementation of NBAIO_Sink that wraps a libsndfile opened in SFM_WRITE mode
+
+namespace android {
+
+class LibsndfileSink : public NBAIO_Sink {
+
+public:
+ LibsndfileSink(SNDFILE *sndfile, const SF_INFO &sfinfo);
+ virtual ~LibsndfileSink();
+
+ // NBAIO_Port interface
+
+ //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
+ // NBAIO_Format counterOffers[], size_t& numCounterOffers);
+ //virtual NBAIO_Format format() const;
+
+ // NBAIO_Sink interface
+
+ //virtual size_t framesWritten() const;
+ //virtual size_t framesUnderrun() const;
+ //virtual size_t underruns() const;
+ //virtual ssize_t availableToWrite();
+ virtual ssize_t write(const void *buffer, size_t count);
+ //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
+
+private:
+ SNDFILE * mSndfile;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_LIBSNDFILE_SINK_H
diff --git a/include/media/nbaio/LibsndfileSource.h b/media/libnbaio/include/LibsndfileSource.h
similarity index 100%
rename from include/media/nbaio/LibsndfileSource.h
rename to media/libnbaio/include/LibsndfileSource.h
diff --git a/media/libnbaio/include/MonoPipe.h b/media/libnbaio/include/MonoPipe.h
new file mode 100644
index 0000000..60ae92e
--- /dev/null
+++ b/media/libnbaio/include/MonoPipe.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_MONO_PIPE_H
+#define ANDROID_AUDIO_MONO_PIPE_H
+
+#include <time.h>
+#include <audio_utils/fifo.h>
+#include <media/SingleStateQueue.h>
+#include "NBAIO.h"
+
+namespace android {
+
+typedef SingleStateQueue<ExtendedTimestamp> ExtendedTimestampSingleStateQueue;
+
+// MonoPipe is similar to Pipe except:
+// - supports only a single reader, called MonoPipeReader
+// - write() cannot overrun; instead it will return a short actual count if insufficient space
+// - write() can optionally block if the pipe is full
+// Like Pipe, it is not multi-thread safe for either writer or reader
+// but writer and reader can be different threads.
+class MonoPipe : public NBAIO_Sink {
+
+ friend class MonoPipeReader;
+
+public:
+ // reqFrames will be rounded up to a power of 2, and all slots are available. Must be >= 2.
+ // Note: whatever shares this object with another thread needs to do so in an SMP-safe way (like
+ // creating it the object before creating the other thread, or storing the object with a
+ // release_store). Otherwise the other thread could see a partially-constructed object.
+ MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock = false);
+ virtual ~MonoPipe();
+
+ // NBAIO_Port interface
+
+ //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
+ // NBAIO_Format counterOffers[], size_t& numCounterOffers);
+ //virtual NBAIO_Format format() const;
+
+ // NBAIO_Sink interface
+
+ //virtual int64_t framesWritten() const;
+ //virtual int64_t framesUnderrun() const;
+ //virtual int64_t underruns() const;
+
+ // returns n where 0 <= n <= mMaxFrames, or a negative status_t
+ // including the private status codes in NBAIO.h
+ virtual ssize_t availableToWrite();
+
+ virtual ssize_t write(const void *buffer, size_t count);
+ //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
+
+ // average number of frames present in the pipe under normal conditions.
+ // See throttling mechanism in MonoPipe::write()
+ size_t getAvgFrames() const { return mSetpoint; }
+ void setAvgFrames(size_t setpoint);
+ size_t maxFrames() const { return mMaxFrames; }
+
+ // Set the shutdown state for the write side of a pipe.
+ // This may be called by an unrelated thread. When shutdown state is 'true',
+ // a write that would otherwise block instead returns a short transfer count.
+ // There is no guarantee how long it will take for the shutdown to be recognized,
+ // but it will not be an unbounded amount of time.
+ // The state can be restored to normal by calling shutdown(false).
+ void shutdown(bool newState = true);
+
+ // Return true if the write side of a pipe is currently shutdown.
+ bool isShutdown();
+
+ // Return NO_ERROR if there is a timestamp available
+ status_t getTimestamp(ExtendedTimestamp ×tamp);
+
+private:
+ const size_t mMaxFrames; // as requested in constructor, rounded up to a power of 2
+ void * const mBuffer;
+ audio_utils_fifo mFifo;
+ audio_utils_fifo_writer mFifoWriter;
+ bool mWriteTsValid; // whether mWriteTs is valid
+ struct timespec mWriteTs; // time that the previous write() completed
+ size_t mSetpoint; // target value for pipe fill depth
+ const bool mWriteCanBlock; // whether write() should block if the pipe is full
+
+ bool mIsShutdown; // whether shutdown(true) was called, no barriers are needed
+
+ ExtendedTimestampSingleStateQueue::Shared mTimestampShared;
+ ExtendedTimestampSingleStateQueue::Mutator mTimestampMutator;
+ ExtendedTimestampSingleStateQueue::Observer mTimestampObserver;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_MONO_PIPE_H
diff --git a/media/libnbaio/include/MonoPipeReader.h b/media/libnbaio/include/MonoPipeReader.h
new file mode 100644
index 0000000..0776ecd
--- /dev/null
+++ b/media/libnbaio/include/MonoPipeReader.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_MONO_PIPE_READER_H
+#define ANDROID_AUDIO_MONO_PIPE_READER_H
+
+#include "MonoPipe.h"
+
+namespace android {
+
+// MonoPipeReader is safe for only a single reader thread
+class MonoPipeReader : public NBAIO_Source {
+
+public:
+
+ // Construct a MonoPipeReader and associate it with a MonoPipe;
+ // any data already in the pipe is visible to this MonoPipeReader.
+ // There can be only a single MonoPipeReader per MonoPipe.
+ // FIXME make this constructor a factory method of MonoPipe.
+ MonoPipeReader(MonoPipe* pipe);
+ virtual ~MonoPipeReader();
+
+ // NBAIO_Port interface
+
+ //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
+ // NBAIO_Format counterOffers[], size_t& numCounterOffers);
+ //virtual NBAIO_Format format() const;
+
+ // NBAIO_Source interface
+
+ //virtual size_t framesRead() const;
+ //virtual size_t framesOverrun();
+ //virtual size_t overruns();
+
+ virtual ssize_t availableToRead();
+
+ virtual ssize_t read(void *buffer, size_t count);
+
+ virtual void onTimestamp(const ExtendedTimestamp ×tamp);
+
+ // NBAIO_Source end
+
+#if 0 // until necessary
+ MonoPipe* pipe() const { return mPipe; }
+#endif
+
+private:
+ MonoPipe * const mPipe;
+ audio_utils_fifo_reader mFifoReader;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_MONO_PIPE_READER_H
diff --git a/media/libnbaio/include/NBAIO.h b/media/libnbaio/include/NBAIO.h
new file mode 100644
index 0000000..f8ec38b
--- /dev/null
+++ b/media/libnbaio/include/NBAIO.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_NBAIO_H
+#define ANDROID_AUDIO_NBAIO_H
+
+// Non-blocking audio I/O interface
+//
+// This header file has the abstract interfaces only. Concrete implementation classes are declared
+// elsewhere. Implementations _should_ be non-blocking for all methods, especially read() and
+// write(), but this is not enforced. In general, implementations do not need to be multi-thread
+// safe, and any exceptions are noted in the particular implementation.
+
+#include <limits.h>
+#include <stdlib.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <media/AudioTimestamp.h>
+#include <system/audio.h>
+
+namespace android {
+
+// In addition to the usual status_t
+enum {
+ NEGOTIATE = (UNKNOWN_ERROR + 0x100), // Must (re-)negotiate format. For negotiate() only,
+ // the offeree doesn't accept offers, and proposes
+ // counter-offers
+ OVERRUN = (UNKNOWN_ERROR + 0x101), // availableToRead(), read(), or readVia() detected
+ // lost input due to overrun; an event is counted and
+ // the caller should re-try
+ UNDERRUN = (UNKNOWN_ERROR + 0x102), // availableToWrite(), write(), or writeVia() detected
+ // a gap in output due to underrun (not being called
+ // often enough, or with enough data); an event is
+ // counted and the caller should re-try
+};
+
+// Negotiation of format is based on the data provider and data sink, or the data consumer and
+// data source, exchanging prioritized arrays of offers and counter-offers until a single offer is
+// mutually agreed upon. Each offer is an NBAIO_Format. For simplicity and performance,
+// NBAIO_Format is a typedef that ties together the most important combinations of the various
+// attributes, rather than a struct with separate fields for format, sample rate, channel count,
+// interleave, packing, alignment, etc. The reason is that NBAIO_Format tries to abstract out only
+// the combinations that are actually needed within AudioFlinger. If the list of combinations grows
+// too large, then this decision should be re-visited.
+// Sample rate and channel count are explicit, PCM interleaved 16-bit is assumed.
+struct NBAIO_Format {
+// FIXME make this a class, and change Format_... global methods to class methods
+//private:
+ unsigned mSampleRate;
+ unsigned mChannelCount;
+ audio_format_t mFormat;
+ size_t mFrameSize;
+};
+
+extern const NBAIO_Format Format_Invalid;
+
+// Return the frame size of an NBAIO_Format in bytes
+size_t Format_frameSize(const NBAIO_Format& format);
+
+// Convert a sample rate in Hz and channel count to an NBAIO_Format
+// FIXME rename
+NBAIO_Format Format_from_SR_C(unsigned sampleRate, unsigned channelCount, audio_format_t format);
+
+// Return the sample rate in Hz of an NBAIO_Format
+unsigned Format_sampleRate(const NBAIO_Format& format);
+
+// Return the channel count of an NBAIO_Format
+unsigned Format_channelCount(const NBAIO_Format& format);
+
+// Callbacks used by NBAIO_Sink::writeVia() and NBAIO_Source::readVia() below.
+typedef ssize_t (*writeVia_t)(void *user, void *buffer, size_t count);
+typedef ssize_t (*readVia_t)(void *user, const void *buffer, size_t count);
+
+// Check whether an NBAIO_Format is valid
+bool Format_isValid(const NBAIO_Format& format);
+
+// Compare two NBAIO_Format values
+bool Format_isEqual(const NBAIO_Format& format1, const NBAIO_Format& format2);
+
+// Abstract class (interface) representing a data port.
+class NBAIO_Port : public RefBase {
+
+public:
+
+ // negotiate() must called first. The purpose of negotiate() is to check compatibility of
+ // formats, not to automatically adapt if they are incompatible. It's the responsibility of
+ // whoever sets up the graph connections to make sure formats are compatible, and this method
+ // just verifies that. The edges are "dumb" and don't attempt to adapt to bad connections.
+ // How it works: offerer proposes an array of formats, in descending order of preference from
+ // offers[0] to offers[numOffers - 1]. If offeree accepts one of these formats, it returns
+ // the index of that offer. Otherwise, offeree sets numCounterOffers to the number of
+ // counter-offers (up to a maximumum of the entry value of numCounterOffers), fills in the
+ // provided array counterOffers[] with its counter-offers, in descending order of preference
+ // from counterOffers[0] to counterOffers[numCounterOffers - 1], and returns NEGOTIATE.
+ // Note that since the offerer allocates space for counter-offers, but only the offeree knows
+ // how many counter-offers it has, there may be insufficient space for all counter-offers.
+ // In that case, the offeree sets numCounterOffers to the requested number of counter-offers
+ // (which is greater than the entry value of numCounterOffers), fills in as many of the most
+ // important counterOffers as will fit, and returns NEGOTIATE. As this implies a re-allocation,
+ // it should be used as a last resort. It is preferable for the offerer to simply allocate a
+ // larger space to begin with, and/or for the offeree to tolerate a smaller space than desired.
+ // Alternatively, the offerer can pass NULL for offers and counterOffers, and zero for
+ // numOffers. This indicates that it has not allocated space for any counter-offers yet.
+ // In this case, the offerree should set numCounterOffers appropriately and return NEGOTIATE.
+ // Then the offerer will allocate the correct amount of memory and retry.
+ // Format_Invalid is not allowed as either an offer or counter-offer.
+ // Returns:
+ // >= 0 Offer accepted.
+ // NEGOTIATE No offer accepted, and counter-offer(s) optionally made. See above for details.
+ virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
+ NBAIO_Format counterOffers[], size_t& numCounterOffers);
+
+ // Return the current negotiated format, or Format_Invalid if negotiation has not been done,
+ // or if re-negotiation is required.
+ virtual NBAIO_Format format() const { return mNegotiated ? mFormat : Format_Invalid; }
+
+protected:
+ NBAIO_Port(const NBAIO_Format& format) : mNegotiated(false), mFormat(format),
+ mFrameSize(Format_frameSize(format)) { }
+ virtual ~NBAIO_Port() { }
+
+ // Implementations are free to ignore these if they don't need them
+
+ bool mNegotiated; // mNegotiated implies (mFormat != Format_Invalid)
+ NBAIO_Format mFormat; // (mFormat != Format_Invalid) does not imply mNegotiated
+ size_t mFrameSize; // assign in parallel with any assignment to mFormat
+};
+
+// Abstract class (interface) representing a non-blocking data sink, for use by a data provider.
+class NBAIO_Sink : public NBAIO_Port {
+
+public:
+
+ // For the next two APIs:
+ // 32 bits rolls over after 27 hours at 44.1 kHz; if that concerns you then poll periodically.
+
+ // Return the number of frames written successfully since construction.
+ virtual int64_t framesWritten() const { return mFramesWritten; }
+
+ // Number of frames lost due to underrun since construction.
+ virtual int64_t framesUnderrun() const { return 0; }
+
+ // Number of underruns since construction, where a set of contiguous lost frames is one event.
+ virtual int64_t underruns() const { return 0; }
+
+ // Estimate of number of frames that could be written successfully now without blocking.
+ // When a write() is actually attempted, the implementation is permitted to return a smaller or
+ // larger transfer count, however it will make a good faith effort to give an accurate estimate.
+ // Errors:
+ // NEGOTIATE (Re-)negotiation is needed.
+ // UNDERRUN write() has not been called frequently enough, or with enough frames to keep up.
+ // An underrun event is counted, and the caller should re-try this operation.
+ // WOULD_BLOCK Determining how many frames can be written without blocking would itself block.
+ virtual ssize_t availableToWrite() {
+ if (!mNegotiated) {
+ return NEGOTIATE;
+ }
+ return SSIZE_MAX;
+ }
+
+ // Transfer data to sink from single input buffer. Implies a copy.
+ // Inputs:
+ // buffer Non-NULL buffer owned by provider.
+ // count Maximum number of frames to transfer.
+ // Return value:
+ // > 0 Number of frames successfully transferred prior to first error.
+ // = 0 Count was zero.
+ // < 0 status_t error occurred prior to the first frame transfer.
+ // Errors:
+ // NEGOTIATE (Re-)negotiation is needed.
+ // WOULD_BLOCK No frames can be transferred without blocking.
+ // UNDERRUN write() has not been called frequently enough, or with enough frames to keep up.
+ // An underrun event is counted, and the caller should re-try this operation.
+ virtual ssize_t write(const void *buffer, size_t count) = 0;
+
+ // Transfer data to sink using a series of callbacks. More suitable for zero-fill, synthesis,
+ // and non-contiguous transfers (e.g. circular buffer or writev).
+ // Inputs:
+ // via Callback function that the sink will call as many times as needed to consume data.
+ // total Estimate of the number of frames the provider has available. This is an estimate,
+ // and it can provide a different number of frames during the series of callbacks.
+ // user Arbitrary void * reserved for data provider.
+ // block Number of frames per block, that is a suggested value for 'count' in each callback.
+ // Zero means no preference. This parameter is a hint only, and may be ignored.
+ // Return value:
+ // > 0 Total number of frames successfully transferred prior to first error.
+ // = 0 Count was zero.
+ // < 0 status_t error occurred prior to the first frame transfer.
+ // Errors:
+ // NEGOTIATE (Re-)negotiation is needed.
+ // WOULD_BLOCK No frames can be transferred without blocking.
+ // UNDERRUN write() has not been called frequently enough, or with enough frames to keep up.
+ // An underrun event is counted, and the caller should re-try this operation.
+ //
+ // The 'via' callback is called by the data sink as follows:
+ // Inputs:
+ // user Arbitrary void * reserved for data provider.
+ // buffer Non-NULL buffer owned by sink that callback should fill in with data,
+ // up to a maximum of 'count' frames.
+ // count Maximum number of frames to transfer during this callback.
+ // Return value:
+ // > 0 Number of frames successfully transferred during this callback prior to first error.
+ // = 0 Count was zero.
+ // < 0 status_t error occurred prior to the first frame transfer during this callback.
+ virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block = 0);
+
+ // Returns NO_ERROR if a timestamp is available. The timestamp includes the total number
+ // of frames presented to an external observer, together with the value of CLOCK_MONOTONIC
+ // as of this presentation count. The timestamp parameter is undefined if error is returned.
+ virtual status_t getTimestamp(ExtendedTimestamp& /*timestamp*/) { return INVALID_OPERATION; }
+
+protected:
+ NBAIO_Sink(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0)
+ { }
+ virtual ~NBAIO_Sink() { }
+
+ // Implementations are free to ignore these if they don't need them
+ int64_t mFramesWritten;
+};
+
+// Abstract class (interface) representing a non-blocking data source, for use by a data consumer.
+class NBAIO_Source : public NBAIO_Port {
+
+public:
+
+ // For the next two APIs:
+ // 32 bits rolls over after 27 hours at 44.1 kHz; if that concerns you then poll periodically.
+
+ // Number of frames read successfully since construction.
+ virtual int64_t framesRead() const { return mFramesRead; }
+
+ // Number of frames lost due to overrun since construction.
+ // Not const because implementations may need to do I/O.
+ virtual int64_t framesOverrun() /*const*/ { return 0; }
+
+ // Number of overruns since construction, where a set of contiguous lost frames is one event.
+ // Not const because implementations may need to do I/O.
+ virtual int64_t overruns() /*const*/ { return 0; }
+
+ // Estimate of number of frames that could be read successfully now.
+ // When a read() is actually attempted, the implementation is permitted to return a smaller or
+ // larger transfer count, however it will make a good faith effort to give an accurate estimate.
+ // Errors:
+ // NEGOTIATE (Re-)negotiation is needed.
+ // OVERRUN One or more frames were lost due to overrun, try again to read more recent data.
+ // WOULD_BLOCK Determining how many frames can be read without blocking would itself block.
+ virtual ssize_t availableToRead() { return SSIZE_MAX; }
+
+ // Transfer data from source into single destination buffer. Implies a copy.
+ // Inputs:
+ // buffer Non-NULL destination buffer owned by consumer.
+ // count Maximum number of frames to transfer.
+ // Return value:
+ // > 0 Number of frames successfully transferred prior to first error.
+ // = 0 Count was zero.
+ // < 0 status_t error occurred prior to the first frame transfer.
+ // Errors:
+ // NEGOTIATE (Re-)negotiation is needed.
+ // WOULD_BLOCK No frames can be transferred without blocking.
+ // OVERRUN read() has not been called frequently enough, or with enough frames to keep up.
+ // One or more frames were lost due to overrun, try again to read more recent data.
+ virtual ssize_t read(void *buffer, size_t count) = 0;
+
+ // Flush data from buffer. There is no notion of overrun as all data is dropped.
+ // Flushed frames also count towards frames read.
+ //
+ // Return value:
+ // >= 0 Number of frames successfully flushed
+ // < 0 status_t error occurred
+ // Errors:
+ // NEGOTIATE (Re-)negotiation is needed.
+ // INVALID_OPERATION Not implemented
+ virtual ssize_t flush() { return INVALID_OPERATION; }
+
+ // Transfer data from source using a series of callbacks. More suitable for zero-fill,
+ // synthesis, and non-contiguous transfers (e.g. circular buffer or readv).
+ // Inputs:
+ // via Callback function that the source will call as many times as needed to provide data.
+ // total Estimate of the number of frames the consumer desires. This is an estimate,
+ // and it can consume a different number of frames during the series of callbacks.
+ // user Arbitrary void * reserved for data consumer.
+ // block Number of frames per block, that is a suggested value for 'count' in each callback.
+ // Zero means no preference. This parameter is a hint only, and may be ignored.
+ // Return value:
+ // > 0 Total number of frames successfully transferred prior to first error.
+ // = 0 Count was zero.
+ // < 0 status_t error occurred prior to the first frame transfer.
+ // Errors:
+ // NEGOTIATE (Re-)negotiation is needed.
+ // WOULD_BLOCK No frames can be transferred without blocking.
+ // OVERRUN read() has not been called frequently enough, or with enough frames to keep up.
+ // One or more frames were lost due to overrun, try again to read more recent data.
+ //
+ // The 'via' callback is called by the data source as follows:
+ // Inputs:
+ // user Arbitrary void * reserved for data consumer.
+ // dest Non-NULL buffer owned by source that callback should consume data from,
+ // up to a maximum of 'count' frames.
+ // count Maximum number of frames to transfer during this callback.
+ // Return value:
+ // > 0 Number of frames successfully transferred during this callback prior to first error.
+ // = 0 Count was zero.
+ // < 0 status_t error occurred prior to the first frame transfer during this callback.
+ virtual ssize_t readVia(readVia_t via, size_t total, void *user, size_t block = 0);
+
+ // Invoked asynchronously by corresponding sink when a new timestamp is available.
+ // Default implementation ignores the timestamp.
+ virtual void onTimestamp(const ExtendedTimestamp& /*timestamp*/) { }
+
+protected:
+ NBAIO_Source(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0)
+ { }
+ virtual ~NBAIO_Source() { }
+
+ // Implementations are free to ignore these if they don't need them
+ int64_t mFramesRead;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_NBAIO_H
diff --git a/media/libnbaio/include/NBLog.h b/media/libnbaio/include/NBLog.h
new file mode 100644
index 0000000..bcebe9e
--- /dev/null
+++ b/media/libnbaio/include/NBLog.h
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Non-blocking event logger intended for safe communication between processes via shared memory
+
+#ifndef ANDROID_MEDIA_NBLOG_H
+#define ANDROID_MEDIA_NBLOG_H
+
+#include <binder/IMemory.h>
+#include <audio_utils/fifo.h>
+#include <utils/Mutex.h>
+#include <utils/threads.h>
+
+#include <vector>
+
+namespace android {
+
+class String8;
+
+class NBLog {
+
+public:
+
+class Writer;
+class Reader;
+
+private:
+
+enum Event {
+ EVENT_RESERVED,
+ EVENT_STRING, // ASCII string, not NUL-terminated
+ // TODO: make timestamp optional
+ EVENT_TIMESTAMP, // clock_gettime(CLOCK_MONOTONIC)
+ EVENT_INTEGER, // integer value entry
+ EVENT_FLOAT, // floating point value entry
+ EVENT_PID, // process ID and process name
+ EVENT_AUTHOR, // author index (present in merged logs) tracks entry's original log
+ EVENT_START_FMT, // logFormat start event: entry includes format string, following
+ // entries contain format arguments
+ EVENT_END_FMT, // end of logFormat argument list
+};
+
+
+// ---------------------------------------------------------------------------
+// API for handling format entry operations
+
+// a formatted entry has the following structure:
+// * START_FMT entry, containing the format string
+// * TIMESTAMP entry
+// * author entry of the thread that generated it (optional, present in merged log)
+// * format arg1
+// * format arg2
+// * ...
+// * END_FMT entry
+
+class FormatEntry {
+public:
+ // build a Format Entry starting in the given pointer
+ class iterator;
+ explicit FormatEntry(const uint8_t *entry);
+ explicit FormatEntry(const iterator &it);
+ virtual ~FormatEntry() {}
+
+ // entry representation in memory
+ struct entry {
+ const uint8_t type;
+ const uint8_t length;
+ const uint8_t data[0];
+ };
+
+ // entry tail representation (after data)
+ struct ending {
+ uint8_t length;
+ uint8_t next[0];
+ };
+
+ // entry iterator
+ class iterator {
+ public:
+ iterator();
+ iterator(const uint8_t *entry);
+ iterator(const iterator &other);
+
+ // dereference underlying entry
+ const entry& operator*() const;
+ const entry* operator->() const;
+ // advance to next entry
+ iterator& operator++(); // ++i
+ // back to previous entry
+ iterator& operator--(); // --i
+ iterator next() const;
+ iterator prev() const;
+ bool operator!=(const iterator &other) const;
+ int operator-(const iterator &other) const;
+
+ bool hasConsistentLength() const;
+ void copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const;
+ void copyData(uint8_t *dst) const;
+
+ template<typename T>
+ inline const T& payload() {
+ return *reinterpret_cast<const T *>(ptr + offsetof(entry, data));
+ }
+
+ private:
+ friend class FormatEntry;
+ const uint8_t *ptr;
+ };
+
+ // Entry's format string
+ const char* formatString() const;
+
+ // Enrty's format string length
+ size_t formatStringLength() const;
+
+ // Format arguments (excluding format string, timestamp and author)
+ iterator args() const;
+
+ // get format entry timestamp
+ timespec timestamp() const;
+
+ // entry's author index (-1 if none present)
+ // a Merger has a vector of Readers, author simply points to the index of the
+ // Reader that originated the entry
+ int author() const;
+
+ // copy entry, adding author before timestamp, returns size of original entry
+ iterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const;
+
+ iterator begin() const;
+
+private:
+ // copies ordinary entry from src to dst, and returns length of entry
+ // size_t copyEntry(audio_utils_fifo_writer *dst, const iterator &it);
+ const uint8_t *mEntry;
+};
+
+// ---------------------------------------------------------------------------
+
+// representation of a single log entry in private memory
+struct Entry {
+ Entry(Event event, const void *data, size_t length)
+ : mEvent(event), mLength(length), mData(data) { }
+ /*virtual*/ ~Entry() { }
+
+ int readAt(size_t offset) const;
+
+private:
+ friend class Writer;
+ Event mEvent; // event type
+ uint8_t mLength; // length of additional data, 0 <= mLength <= kMaxLength
+ const void *mData; // event type-specific data
+ static const size_t kMaxLength = 255;
+public:
+ // mEvent, mLength, mData[...], duplicate mLength
+ static const size_t kOverhead = sizeof(FormatEntry::entry) + sizeof(FormatEntry::ending);
+ // endind length of previous entry
+ static const size_t kPreviousLengthOffset = - sizeof(FormatEntry::ending) +
+ offsetof(FormatEntry::ending, length);
+};
+
+// representation of a single log entry in shared memory
+// byte[0] mEvent
+// byte[1] mLength
+// byte[2] mData[0]
+// ...
+// byte[2+i] mData[i]
+// ...
+// byte[2+mLength-1] mData[mLength-1]
+// byte[2+mLength] duplicate copy of mLength to permit reverse scan
+// byte[3+mLength] start of next log entry
+
+ static void appendInt(String8 *body, const void *data);
+ static void appendFloat(String8 *body, const void *data);
+ static void appendPID(String8 *body, const void *data, size_t length);
+ static void appendTimestamp(String8 *body, const void *data);
+ static size_t fmtEntryLength(const uint8_t *data);
+
+public:
+
+// Located in shared memory, must be POD.
+// Exactly one process must explicitly call the constructor or use placement new.
+// Since this is a POD, the destructor is empty and unnecessary to call it explicitly.
+struct Shared {
+ Shared() /* mRear initialized via default constructor */ { }
+ /*virtual*/ ~Shared() { }
+
+ audio_utils_fifo_index mRear; // index one byte past the end of most recent Entry
+ char mBuffer[0]; // circular buffer for entries
+};
+
+public:
+
+// ---------------------------------------------------------------------------
+
+// FIXME Timeline was intended to wrap Writer and Reader, but isn't actually used yet.
+// For now it is just a namespace for sharedSize().
+class Timeline : public RefBase {
+public:
+#if 0
+ Timeline(size_t size, void *shared = NULL);
+ virtual ~Timeline();
+#endif
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
+ static size_t sharedSize(size_t size);
+
+#if 0
+private:
+ friend class Writer;
+ friend class Reader;
+
+ const size_t mSize; // circular buffer size in bytes, must be a power of 2
+ bool mOwn; // whether I own the memory at mShared
+ Shared* const mShared; // pointer to shared memory
+#endif
+};
+
+// ---------------------------------------------------------------------------
+
+// Writer is thread-safe with respect to Reader, but not with respect to multiple threads
+// calling Writer methods. If you need multi-thread safety for writing, use LockedWriter.
+class Writer : public RefBase {
+public:
+ Writer(); // dummy nop implementation without shared memory
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
+ Writer(void *shared, size_t size);
+ Writer(const sp<IMemory>& iMemory, size_t size);
+
+ virtual ~Writer();
+
+ virtual void log(const char *string);
+ virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+ virtual void logvf(const char *fmt, va_list ap);
+ virtual void logTimestamp();
+ virtual void logTimestamp(const struct timespec &ts);
+ virtual void logInteger(const int x);
+ virtual void logFloat(const float x);
+ virtual void logPID();
+ virtual void logFormat(const char *fmt, ...);
+ virtual void logVFormat(const char *fmt, va_list ap);
+ virtual void logStart(const char *fmt);
+ virtual void logEnd();
+
+
+ virtual bool isEnabled() const;
+
+ // return value for all of these is the previous isEnabled()
+ virtual bool setEnabled(bool enabled); // but won't enable if no shared memory
+ bool enable() { return setEnabled(true); }
+ bool disable() { return setEnabled(false); }
+
+ sp<IMemory> getIMemory() const { return mIMemory; }
+
+private:
+ // 0 <= length <= kMaxLength
+ void log(Event event, const void *data, size_t length);
+ void log(const Entry *entry, bool trusted = false);
+
+ Shared* const mShared; // raw pointer to shared memory
+ sp<IMemory> mIMemory; // ref-counted version, initialized in constructor and then const
+ audio_utils_fifo * const mFifo; // FIFO itself,
+ // non-NULL unless constructor fails
+ audio_utils_fifo_writer * const mFifoWriter; // used to write to FIFO,
+ // non-NULL unless dummy constructor used
+ bool mEnabled; // whether to actually log
+
+ // cached pid and process name to use in %p format specifier
+ // total tag length is mPidTagSize and process name is not zero terminated
+ char *mPidTag;
+ size_t mPidTagSize;
+};
+
+// ---------------------------------------------------------------------------
+
+// Similar to Writer, but safe for multiple threads to call concurrently
+class LockedWriter : public Writer {
+public:
+ LockedWriter();
+ LockedWriter(void *shared, size_t size);
+
+ virtual void log(const char *string);
+ virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+ virtual void logvf(const char *fmt, va_list ap);
+ virtual void logTimestamp();
+ virtual void logTimestamp(const struct timespec &ts);
+ virtual void logInteger(const int x);
+ virtual void logFloat(const float x);
+ virtual void logPID();
+ virtual void logStart(const char *fmt);
+ virtual void logEnd();
+
+ virtual bool isEnabled() const;
+ virtual bool setEnabled(bool enabled);
+
+private:
+ mutable Mutex mLock;
+};
+
+// ---------------------------------------------------------------------------
+
+class Reader : public RefBase {
+public:
+
+ // A snapshot of a readers buffer
+ class Snapshot {
+ public:
+ Snapshot() : mData(NULL), mLost(0) {}
+
+ Snapshot(size_t bufferSize) : mData(new uint8_t[bufferSize]) {}
+
+ ~Snapshot() { delete[] mData; }
+
+ // copy of the buffer
+ uint8_t *data() const { return mData; }
+
+ // amount of data lost (given by audio_utils_fifo_reader)
+ size_t lost() const { return mLost; }
+
+ // iterator to beginning of readable segment of snapshot
+ // data between begin and end has valid entries
+ FormatEntry::iterator begin() { return mBegin; }
+
+ // iterator to end of readable segment of snapshot
+ FormatEntry::iterator end() { return mEnd; }
+
+
+ private:
+ friend class Reader;
+ uint8_t *mData;
+ size_t mLost;
+ FormatEntry::iterator mBegin;
+ FormatEntry::iterator mEnd;
+ };
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
+ Reader(const void *shared, size_t size);
+ Reader(const sp<IMemory>& iMemory, size_t size);
+
+ virtual ~Reader();
+
+ // get snapshot of readers fifo buffer, effectively consuming the buffer
+ std::unique_ptr<Snapshot> getSnapshot();
+ // dump a particular snapshot of the reader
+ void dump(int fd, size_t indent, Snapshot & snap);
+ // dump the current content of the reader's buffer
+ void dump(int fd, size_t indent = 0);
+ bool isIMemory(const sp<IMemory>& iMemory) const;
+
+private:
+ /*const*/ Shared* const mShared; // raw pointer to shared memory, actually const but not
+ // declared as const because audio_utils_fifo() constructor
+ sp<IMemory> mIMemory; // ref-counted version, assigned only in constructor
+ int mFd; // file descriptor
+ int mIndent; // indentation level
+ audio_utils_fifo * const mFifo; // FIFO itself,
+ // non-NULL unless constructor fails
+ audio_utils_fifo_reader * const mFifoReader; // used to read from FIFO,
+ // non-NULL unless constructor fails
+
+ void dumpLine(const String8& timestamp, String8& body);
+
+ FormatEntry::iterator handleFormat(const FormatEntry &fmtEntry,
+ String8 *timestamp,
+ String8 *body);
+ // dummy method for handling absent author entry
+ virtual size_t handleAuthor(const FormatEntry& /*fmtEntry*/, String8* /*body*/) { return 0; }
+
+ // Searches for the last entry of type <type> in the range [front, back)
+ // back has to be entry-aligned. Returns nullptr if none enconuntered.
+ static uint8_t *findLastEntryOfType(uint8_t *front, uint8_t *back, uint8_t type);
+
+ static const size_t kSquashTimestamp = 5; // squash this many or more adjacent timestamps
+};
+
+// Wrapper for a reader with a name. Contains a pointer to the reader and a pointer to the name
+class NamedReader {
+public:
+ NamedReader() { mName[0] = '\0'; } // for Vector
+ NamedReader(const sp<NBLog::Reader>& reader, const char *name) :
+ mReader(reader)
+ { strlcpy(mName, name, sizeof(mName)); }
+ ~NamedReader() { }
+ const sp<NBLog::Reader>& reader() const { return mReader; }
+ const char* name() const { return mName; }
+
+private:
+ sp<NBLog::Reader> mReader;
+ static const size_t kMaxName = 32;
+ char mName[kMaxName];
+};
+
+// ---------------------------------------------------------------------------
+
+class Merger : public RefBase {
+public:
+ Merger(const void *shared, size_t size);
+
+ virtual ~Merger() {}
+
+ void addReader(const NamedReader &reader);
+ // TODO add removeReader
+ void merge();
+ const std::vector<NamedReader> *getNamedReaders() const;
+private:
+ // vector of the readers the merger is supposed to merge from.
+ // every reader reads from a writer's buffer
+ std::vector<NamedReader> mNamedReaders;
+ Shared * const mShared;
+ std::unique_ptr<audio_utils_fifo> mFifo;
+ std::unique_ptr<audio_utils_fifo_writer> mFifoWriter;
+
+ static struct timespec getTimestamp(const uint8_t *data);
+};
+
+class MergeReader : public Reader {
+public:
+ MergeReader(const void *shared, size_t size, Merger &merger);
+private:
+ const std::vector<NamedReader> *mNamedReaders;
+ // handle author entry by looking up the author's name and appending it to the body
+ // returns number of bytes read from fmtEntry
+ size_t handleAuthor(const FormatEntry &fmtEntry, String8 *body);
+};
+
+// MergeThread is a thread that contains a Merger. It works as a retriggerable one-shot:
+// when triggered, it awakes for a lapse of time, during which it periodically merges; if
+// retriggered, the timeout is reset.
+// The thread is triggered on AudioFlinger binder activity.
+class MergeThread : public Thread {
+public:
+ MergeThread(Merger &merger);
+ virtual ~MergeThread() override;
+
+ // Reset timeout and activate thread to merge periodically if it's idle
+ void wakeup();
+
+ // Set timeout period until the merging thread goes idle again
+ void setTimeoutUs(int time);
+
+private:
+ virtual bool threadLoop() override;
+
+ // the merger who actually does the work of merging the logs
+ Merger& mMerger;
+
+ // mutex for the condition variable
+ Mutex mMutex;
+
+ // condition variable to activate merging on timeout >= 0
+ Condition mCond;
+
+ // time left until the thread blocks again (in microseconds)
+ int mTimeoutUs;
+
+ // merging period when the thread is awake
+ static const int kThreadSleepPeriodUs = 1000000 /*1s*/;
+
+ // initial timeout value when triggered
+ static const int kThreadWakeupPeriodUs = 3000000 /*3s*/;
+};
+
+}; // class NBLog
+
+} // namespace android
+
+#endif // ANDROID_MEDIA_NBLOG_H
diff --git a/media/libnbaio/include/Pipe.h b/media/libnbaio/include/Pipe.h
new file mode 100644
index 0000000..58b9750
--- /dev/null
+++ b/media/libnbaio/include/Pipe.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_PIPE_H
+#define ANDROID_AUDIO_PIPE_H
+
+#include <audio_utils/fifo.h>
+#include "NBAIO.h"
+
+namespace android {
+
+// Pipe is multi-thread safe for readers (see PipeReader), but safe for only a single writer thread.
+// It cannot UNDERRUN on write, unless we allow designation of a master reader that provides the
+// time-base. Readers can be added and removed dynamically, and it's OK to have no readers.
+class Pipe : public NBAIO_Sink {
+
+ friend class PipeReader;
+
+public:
+ // maxFrames will be rounded up to a power of 2, and all slots are available. Must be >= 2.
+ // buffer is an optional parameter specifying the virtual address of the pipe buffer,
+ // which must be of size roundup(maxFrames) * Format_frameSize(format) bytes.
+ Pipe(size_t maxFrames, const NBAIO_Format& format, void *buffer = NULL);
+
+ // If a buffer was specified in the constructor, it is not automatically freed by destructor.
+ virtual ~Pipe();
+
+ // NBAIO_Port interface
+
+ //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
+ // NBAIO_Format counterOffers[], size_t& numCounterOffers);
+ //virtual NBAIO_Format format() const;
+
+ // NBAIO_Sink interface
+
+ //virtual int64_t framesWritten() const;
+ //virtual int64_t framesUnderrun() const;
+ //virtual int64_t underruns() const;
+
+ // The write side of a pipe permits overruns; flow control is the caller's responsibility.
+ // It doesn't return +infinity because that would guarantee an overrun.
+ virtual ssize_t availableToWrite() { return mMaxFrames; }
+
+ virtual ssize_t write(const void *buffer, size_t count);
+ //virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
+
+private:
+ const size_t mMaxFrames; // always a power of 2
+ void * const mBuffer;
+ audio_utils_fifo mFifo;
+ audio_utils_fifo_writer mFifoWriter;
+ volatile int32_t mReaders; // number of PipeReader clients currently attached to this Pipe
+ const bool mFreeBufferInDestructor;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_PIPE_H
diff --git a/media/libnbaio/include/PipeReader.h b/media/libnbaio/include/PipeReader.h
new file mode 100644
index 0000000..70ecb34
--- /dev/null
+++ b/media/libnbaio/include/PipeReader.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_PIPE_READER_H
+#define ANDROID_AUDIO_PIPE_READER_H
+
+#include "Pipe.h"
+
+namespace android {
+
+// PipeReader is safe for only a single thread
+class PipeReader : public NBAIO_Source {
+
+public:
+
+ // Construct a PipeReader and associate it with a Pipe
+ // FIXME make this constructor a factory method of Pipe.
+ PipeReader(Pipe& pipe);
+ virtual ~PipeReader();
+
+ // NBAIO_Port interface
+
+ //virtual ssize_t negotiate(const NBAIO_Format offers[], size_t numOffers,
+ // NBAIO_Format counterOffers[], size_t& numCounterOffers);
+ //virtual NBAIO_Format format() const;
+
+ // NBAIO_Source interface
+
+ //virtual size_t framesRead() const;
+ virtual int64_t framesOverrun() { return mFramesOverrun; }
+ virtual int64_t overruns() { return mOverruns; }
+
+ virtual ssize_t availableToRead();
+
+ virtual ssize_t read(void *buffer, size_t count);
+
+ virtual ssize_t flush();
+
+ // NBAIO_Source end
+
+#if 0 // until necessary
+ Pipe& pipe() const { return mPipe; }
+#endif
+
+private:
+ Pipe& mPipe;
+ audio_utils_fifo_reader mFifoReader;
+ int64_t mFramesOverrun;
+ int64_t mOverruns;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_PIPE_READER_H
diff --git a/include/media/nbaio/SourceAudioBufferProvider.h b/media/libnbaio/include/SourceAudioBufferProvider.h
similarity index 100%
rename from include/media/nbaio/SourceAudioBufferProvider.h
rename to media/libnbaio/include/SourceAudioBufferProvider.h
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index c0aac16..f6107fd 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -44,6 +44,10 @@
#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/SurfaceUtils.h>
#include <media/hardware/HardwareAPI.h>
+#include <media/OMXBuffer.h>
+#include <media/omx/1.0/WOmxNode.h>
+
+#include <hidlmemory/mapping.h>
#include <OMX_AudioExt.h>
#include <OMX_VideoExt.h>
@@ -52,11 +56,19 @@
#include <OMX_AsString.h>
#include "include/avc_utils.h"
+#include "include/ACodecBufferChannel.h"
#include "include/DataConverter.h"
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
#include "omx/OMXUtils.h"
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+
namespace android {
+using binder::Status;
+
enum {
kMaxIndicesToCheck = 32, // used when enumerating supported formats and profiles
};
@@ -90,6 +102,21 @@
}
}
+static inline status_t statusFromBinderStatus(const Status &status) {
+ if (status.isOk()) {
+ return OK;
+ }
+ status_t err;
+ if ((err = status.serviceSpecificErrorCode()) != OK) {
+ return err;
+ }
+ if ((err = status.transactionError()) != OK) {
+ return err;
+ }
+ // Other exception
+ return UNKNOWN_ERROR;
+}
+
// checks and converts status_t to a non-side-effect status_t
static inline status_t makeNoSideEffectStatus(status_t err) {
switch (err) {
@@ -136,15 +163,10 @@
}
sp<AMessage> notify = mNotify->dup();
- bool first = true;
sp<MessageList> msgList = new MessageList();
for (std::list<omx_message>::const_iterator it = messages.cbegin();
it != messages.cend(); ++it) {
const omx_message &omx_msg = *it;
- if (first) {
- notify->setInt32("node", omx_msg.node);
- first = false;
- }
sp<AMessage> msg = new AMessage;
msg->setInt32("type", omx_msg.type);
@@ -229,6 +251,7 @@
virtual PortMode getPortMode(OMX_U32 portIndex);
+ virtual void stateExited();
virtual bool onMessageReceived(const sp<AMessage> &msg);
virtual bool onOMXEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2);
@@ -266,7 +289,9 @@
////////////////////////////////////////////////////////////////////////////////
-struct ACodec::DeathNotifier : public IBinder::DeathRecipient {
+struct ACodec::DeathNotifier :
+ public IBinder::DeathRecipient,
+ public ::android::hardware::hidl_death_recipient {
explicit DeathNotifier(const sp<AMessage> ¬ify)
: mNotify(notify) {
}
@@ -275,6 +300,12 @@
mNotify->post();
}
+ virtual void serviceDied(
+ uint64_t /* cookie */,
+ const wp<::android::hidl::base::V1_0::IBase>& /* who */) {
+ mNotify->post();
+ }
+
protected:
virtual ~DeathNotifier() {}
@@ -495,8 +526,7 @@
ACodec::ACodec()
: mSampleRate(0),
- mQuirks(0),
- mNode(0),
+ mNodeGeneration(0),
mUsingNativeWindow(false),
mNativeWindowUsageBits(0),
mLastNativeWindowDataSpace(HAL_DATASPACE_UNKNOWN),
@@ -512,20 +542,20 @@
mChannelMaskPresent(false),
mChannelMask(0),
mDequeueCounter(0),
- mInputMetadataType(kMetadataBufferTypeInvalid),
- mOutputMetadataType(kMetadataBufferTypeInvalid),
- mLegacyAdaptiveExperiment(false),
mMetadataBuffersToSubmit(0),
mNumUndequeuedBuffers(0),
mRepeatFrameDelayUs(-1ll),
mMaxPtsGapUs(-1ll),
mMaxFps(-1),
- mTimePerFrameUs(-1ll),
- mTimePerCaptureUs(-1ll),
+ mFps(-1.0),
+ mCaptureFps(-1.0),
mCreateInputBuffersSuspended(false),
+ mLatency(0),
mTunneled(false),
mDescribeColorAspectsIndex((OMX_INDEXTYPE)0),
- mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0) {
+ mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0),
+ mStateGeneration(0),
+ mVendorExtensionsStatus(kExtensionsUnchecked) {
mUninitializedState = new UninitializedState(this);
mLoadedState = new LoadedState(this);
mLoadedToIdleState = new LoadedToIdleState(this);
@@ -542,24 +572,34 @@
mPortEOS[kPortIndexInput] = mPortEOS[kPortIndexOutput] = false;
mInputEOSResult = OK;
+ mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+ mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
+
memset(&mLastNativeWindowCrop, 0, sizeof(mLastNativeWindowCrop));
changeState(mUninitializedState);
+
+ mTrebleFlag = false;
}
ACodec::~ACodec() {
}
-void ACodec::setNotificationMessage(const sp<AMessage> &msg) {
- mNotify = msg;
-}
-
void ACodec::initiateSetup(const sp<AMessage> &msg) {
msg->setWhat(kWhatSetup);
msg->setTarget(this);
msg->post();
}
+std::shared_ptr<BufferChannelBase> ACodec::getBufferChannel() {
+ if (!mBufferChannel) {
+ mBufferChannel = std::make_shared<ACodecBufferChannel>(
+ new AMessage(kWhatInputBufferFilled, this),
+ new AMessage(kWhatOutputBufferDrained, this));
+ }
+ return mBufferChannel;
+}
+
void ACodec::signalSetParameters(const sp<AMessage> ¶ms) {
sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
msg->setMessage("params", params);
@@ -677,8 +717,7 @@
int usageBits = 0;
// no need to reconnect as we will not dequeue all buffers
status_t err = setupNativeWindowSizeFormatAndUsage(
- nativeWindow, &usageBits,
- !storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment /* reconnect */);
+ nativeWindow, &usageBits, !storingMetadataInDecodedBuffers());
if (err != OK) {
return err;
}
@@ -728,7 +767,6 @@
const BufferInfo &info = buffers[i];
// skip undequeued buffers for meta data mode
if (storingMetadataInDecodedBuffers()
- && !mLegacyAdaptiveExperiment
&& info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
ALOGV("skipping buffer");
continue;
@@ -745,7 +783,7 @@
}
// cancel undequeued buffers to new surface
- if (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment) {
+ if (!storingMetadataInDecodedBuffers()) {
for (size_t i = 0; i < buffers.size(); ++i) {
BufferInfo &info = buffers.editItemAt(i);
if (info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
@@ -775,10 +813,29 @@
return OK;
}
+status_t ACodec::setPortMode(int32_t portIndex, IOMX::PortMode mode) {
+ status_t err = mOMXNode->setPortMode(portIndex, mode);
+ if (err != OK) {
+ ALOGE("[%s] setPortMode on %s to %s failed w/ err %d",
+ mComponentName.c_str(),
+ portIndex == kPortIndexInput ? "input" : "output",
+ asString(mode),
+ err);
+ return err;
+ }
+
+ mPortMode[portIndex] = mode;
+ return OK;
+}
+
status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
- CHECK(mDealer[portIndex] == NULL);
+ if (getTrebleFlag()) {
+ CHECK(mAllocator[portIndex] == NULL);
+ } else {
+ CHECK(mDealer[portIndex] == NULL);
+ }
CHECK(mBuffers[portIndex].isEmpty());
status_t err;
@@ -793,30 +850,21 @@
InitOMXParams(&def);
def.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err == OK) {
- MetadataBufferType type =
- portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
+ const IOMX::PortMode &mode = mPortMode[portIndex];
size_t bufSize = def.nBufferSize;
- if (type == kMetadataBufferTypeANWBuffer) {
+ // Always allocate VideoNativeMetadata if using ANWBuffer.
+ // OMX might use gralloc source internally, but we don't share
+ // metadata buffer with OMX, OMX has its own headers.
+ if (mode == IOMX::kPortModeDynamicANWBuffer) {
bufSize = sizeof(VideoNativeMetadata);
- } else if (type == kMetadataBufferTypeNativeHandleSource) {
+ } else if (mode == IOMX::kPortModeDynamicNativeHandle) {
bufSize = sizeof(VideoNativeHandleMetadata);
}
- // If using gralloc or native source input metadata buffers, allocate largest
- // metadata size as we prefer to generate native source metadata, but component
- // may require gralloc source. For camera source, allocate at least enough
- // size for native metadata buffers.
- size_t allottedSize = bufSize;
- if (portIndex == kPortIndexInput && type == kMetadataBufferTypeANWBuffer) {
- bufSize = max(sizeof(VideoGrallocMetadata), sizeof(VideoNativeMetadata));
- } else if (portIndex == kPortIndexInput && type == kMetadataBufferTypeCameraSource) {
- bufSize = max(bufSize, sizeof(VideoNativeMetadata));
- }
-
size_t conversionBufferSize = 0;
sp<DataConverter> converter = mConverter[portIndex];
@@ -831,9 +879,9 @@
size_t alignment = MemoryDealer::getAllocationAlignment();
- ALOGV("[%s] Allocating %u buffers of size %zu/%zu (from %u using %s) on %s port",
+ ALOGV("[%s] Allocating %u buffers of size %zu (from %u using %s) on %s port",
mComponentName.c_str(),
- def.nBufferCountActual, bufSize, allottedSize, def.nBufferSize, asString(type),
+ def.nBufferCountActual, bufSize, def.nBufferSize, asString(mode),
portIndex == kPortIndexInput ? "input" : "output");
// verify buffer sizes to avoid overflow in align()
@@ -850,74 +898,127 @@
return NO_MEMORY;
}
- size_t totalSize = def.nBufferCountActual * (alignedSize + alignedConvSize);
- mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
-
- for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
- sp<IMemory> mem = mDealer[portIndex]->allocate(bufSize);
- if (mem == NULL || mem->pointer() == NULL) {
- return NO_MEMORY;
+ if (mode != IOMX::kPortModePresetSecureBuffer) {
+ if (getTrebleFlag()) {
+ mAllocator[portIndex] = TAllocator::getService("ashmem");
+ if (mAllocator[portIndex] == nullptr) {
+ ALOGE("hidl allocator on port %d is null",
+ (int)portIndex);
+ return NO_MEMORY;
+ }
+ } else {
+ size_t totalSize = def.nBufferCountActual *
+ (alignedSize + alignedConvSize);
+ mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
}
+ }
+
+ const sp<AMessage> &format =
+ portIndex == kPortIndexInput ? mInputFormat : mOutputFormat;
+ for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
+ hidl_memory hidlMemToken;
+ sp<TMemory> hidlMem;
+ sp<IMemory> mem;
BufferInfo info;
info.mStatus = BufferInfo::OWNED_BY_US;
info.mFenceFd = -1;
info.mRenderInfo = NULL;
- info.mNativeHandle = NULL;
+ info.mGraphicBuffer = NULL;
+ info.mNewGraphicBuffer = false;
- uint32_t requiresAllocateBufferBit =
- (portIndex == kPortIndexInput)
- ? kRequiresAllocateBufferOnInputPorts
- : kRequiresAllocateBufferOnOutputPorts;
-
- if (portIndex == kPortIndexInput && (mFlags & kFlagIsSecure)) {
- mem.clear();
-
+ if (mode == IOMX::kPortModePresetSecureBuffer) {
void *ptr = NULL;
sp<NativeHandle> native_handle;
- err = mOMX->allocateSecureBuffer(
- mNode, portIndex, bufSize, &info.mBufferID,
+ err = mOMXNode->allocateSecureBuffer(
+ portIndex, bufSize, &info.mBufferID,
&ptr, &native_handle);
- // TRICKY: this representation is unorthodox, but ACodec requires
- // an ABuffer with a proper size to validate range offsets and lengths.
- // Since mData is never referenced for secure input, it is used to store
- // either the pointer to the secure buffer, or the opaque handle as on
- // some devices ptr is actually an opaque handle, not a pointer.
-
- // TRICKY2: use native handle as the base of the ABuffer if received one,
- // because Widevine source only receives these base addresses.
- const native_handle_t *native_handle_ptr =
- native_handle == NULL ? NULL : native_handle->handle();
- info.mData = new ABuffer(
- ptr != NULL ? ptr : (void *)native_handle_ptr, bufSize);
- info.mNativeHandle = native_handle;
+ info.mData = (native_handle == NULL)
+ ? new SecureBuffer(format, ptr, bufSize)
+ : new SecureBuffer(format, native_handle, bufSize);
info.mCodecData = info.mData;
- } else if (mQuirks & requiresAllocateBufferBit) {
- err = mOMX->allocateBufferWithBackup(
- mNode, portIndex, mem, &info.mBufferID, allottedSize);
} else {
- err = mOMX->useBuffer(mNode, portIndex, mem, &info.mBufferID, allottedSize);
- }
+ if (getTrebleFlag()) {
+ bool success;
+ auto transStatus = mAllocator[portIndex]->allocate(
+ bufSize,
+ [&success, &hidlMemToken](
+ bool s,
+ hidl_memory const& m) {
+ success = s;
+ hidlMemToken = m;
+ });
- if (mem != NULL) {
- info.mCodecData = new ABuffer(mem->pointer(), bufSize);
- info.mCodecRef = mem;
+ if (!transStatus.isOk()) {
+ ALOGE("hidl's AshmemAllocator failed at the "
+ "transport: %s",
+ transStatus.description().c_str());
+ return NO_MEMORY;
+ }
+ if (!success) {
+ return NO_MEMORY;
+ }
+ hidlMem = mapMemory(hidlMemToken);
- if (type == kMetadataBufferTypeANWBuffer) {
- ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
+ err = mOMXNode->useBuffer(
+ portIndex, hidlMemToken, &info.mBufferID);
+ } else {
+ mem = mDealer[portIndex]->allocate(bufSize);
+ if (mem == NULL || mem->pointer() == NULL) {
+ return NO_MEMORY;
+ }
+
+ err = mOMXNode->useBuffer(
+ portIndex, mem, &info.mBufferID);
+ }
+
+ if (mode == IOMX::kPortModeDynamicANWBuffer) {
+ VideoNativeMetadata* metaData = (VideoNativeMetadata*)(
+ getTrebleFlag() ?
+ (void*)hidlMem->getPointer() : mem->pointer());
+ metaData->nFenceFd = -1;
+ }
+
+ if (getTrebleFlag()) {
+ info.mCodecData = new SharedMemoryBuffer(
+ format, hidlMem);
+ info.mCodecRef = hidlMem;
+ } else {
+ info.mCodecData = new SharedMemoryBuffer(
+ format, mem);
+ info.mCodecRef = mem;
}
// if we require conversion, allocate conversion buffer for client use;
// otherwise, reuse codec buffer
if (mConverter[portIndex] != NULL) {
CHECK_GT(conversionBufferSize, (size_t)0);
- mem = mDealer[portIndex]->allocate(conversionBufferSize);
- if (mem == NULL|| mem->pointer() == NULL) {
- return NO_MEMORY;
+ if (getTrebleFlag()) {
+ bool success;
+ mAllocator[portIndex]->allocate(
+ conversionBufferSize,
+ [&success, &hidlMemToken](
+ bool s,
+ hidl_memory const& m) {
+ success = s;
+ hidlMemToken = m;
+ });
+ if (!success) {
+ return NO_MEMORY;
+ }
+ hidlMem = mapMemory(hidlMemToken);
+ info.mData = new SharedMemoryBuffer(format, hidlMem);
+ info.mMemRef = hidlMem;
+ } else {
+ mem = mDealer[portIndex]->allocate(
+ conversionBufferSize);
+ if (mem == NULL|| mem->pointer() == NULL) {
+ return NO_MEMORY;
+ }
+ info.mData = new SharedMemoryBuffer(format, mem);
+ info.mMemRef = mem;
}
- info.mData = new ABuffer(mem->pointer(), conversionBufferSize);
- info.mMemRef = mem;
} else {
info.mData = info.mCodecData;
info.mMemRef = info.mCodecRef;
@@ -933,20 +1034,17 @@
return err;
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
-
- notify->setInt32("portIndex", portIndex);
-
- sp<PortDescription> desc = new PortDescription;
-
+ std::vector<ACodecBufferChannel::BufferAndId> array(mBuffers[portIndex].size());
for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
- const BufferInfo &info = mBuffers[portIndex][i];
- desc->addBuffer(info.mBufferID, info.mData, info.mNativeHandle, info.mMemRef);
+ array[i] = {mBuffers[portIndex][i].mData, mBuffers[portIndex][i].mBufferID};
}
-
- notify->setObject("portDesc", desc);
- notify->post();
+ if (portIndex == kPortIndexInput) {
+ mBufferChannel->setInputBufferArray(array);
+ } else if (portIndex == kPortIndexOutput) {
+ mBufferChannel->setOutputBufferArray(array);
+ } else {
+ TRESPASS();
+ }
return OK;
}
@@ -958,15 +1056,43 @@
InitOMXParams(&def);
def.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
+ OMX_INDEXTYPE index;
+ err = mOMXNode->getExtensionIndex(
+ "OMX.google.android.index.AndroidNativeBufferConsumerUsage",
+ &index);
+
+ if (err != OK) {
+ // allow failure
+ err = OK;
+ } else {
+ int usageBits = 0;
+ if (nativeWindow->query(
+ nativeWindow,
+ NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+ &usageBits) == OK) {
+ OMX_PARAM_U32TYPE params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexOutput;
+ params.nU32 = (OMX_U32)usageBits;
+
+ err = mOMXNode->setParameter(index, ¶ms, sizeof(params));
+
+ if (err != OK) {
+ ALOGE("Fail to set AndroidNativeBufferConsumerUsage: %d", err);
+ return err;
+ }
+ }
+ }
+
OMX_U32 usage = 0;
- err = mOMX->getGraphicBufferUsage(mNode, kPortIndexOutput, &usage);
+ err = mOMXNode->getGraphicBufferUsage(kPortIndexOutput, &usage);
if (err != 0) {
ALOGW("querying usage flags from OMX IL component failed: %d", err);
// XXX: Currently this error is logged, but not fatal.
@@ -1003,12 +1129,13 @@
InitOMXParams(&def);
def.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err == OK) {
err = setupNativeWindowSizeFormatAndUsage(
- mNativeWindow.get(), &mNativeWindowUsageBits, preregister /* reconnect */);
+ mNativeWindow.get(), &mNativeWindowUsageBits,
+ preregister && !mTunneled /* reconnect */);
}
if (err != OK) {
mNativeWindowUsageBits = 0;
@@ -1021,8 +1148,8 @@
if (mTunneled) {
ALOGV("Tunneled Playback: skipping native window buffer allocation.");
def.nBufferCountActual = 0;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
*minUndequeuedBuffers = 0;
*bufferCount = 0;
@@ -1057,8 +1184,8 @@
OMX_U32 newBufferCount =
def.nBufferCountMin + *minUndequeuedBuffers + extraBuffers;
def.nBufferCountActual = newBufferCount;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err == OK) {
*minUndequeuedBuffers += extraBuffers;
@@ -1088,6 +1215,10 @@
}
status_t ACodec::allocateOutputBuffersFromNativeWindow() {
+ // This method only handles the non-metadata mode (or simulating legacy
+ // mode with metadata, which is transparent to ACodec).
+ CHECK(!storingMetadataInDecodedBuffers());
+
OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
status_t err = configureOutputBuffersFromNativeWindow(
&bufferCount, &bufferSize, &minUndequeuedBuffers, true /* preregister */);
@@ -1095,10 +1226,8 @@
return err;
mNumUndequeuedBuffers = minUndequeuedBuffers;
- if (!storingMetadataInDecodedBuffers()) {
- static_cast<Surface*>(mNativeWindow.get())
- ->getIGraphicBufferProducer()->allowAllocation(true);
- }
+ static_cast<Surface*>(mNativeWindow.get())
+ ->getIGraphicBufferProducer()->allowAllocation(true);
ALOGV("[%s] Allocating %u buffers from a native window of size %u on "
"output port",
@@ -1114,20 +1243,25 @@
break;
}
- sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
+ sp<GraphicBuffer> graphicBuffer(GraphicBuffer::from(buf));
BufferInfo info;
info.mStatus = BufferInfo::OWNED_BY_US;
info.mFenceFd = fenceFd;
info.mIsReadFence = false;
info.mRenderInfo = NULL;
- info.mData = new ABuffer(NULL /* data */, bufferSize /* capacity */);
- info.mCodecData = info.mData;
info.mGraphicBuffer = graphicBuffer;
+ info.mNewGraphicBuffer = false;
+
+ // TODO: We shouln't need to create MediaCodecBuffer. In metadata mode
+ // OMX doesn't use the shared memory buffer, but some code still
+ // access info.mData. Create an ABuffer as a placeholder.
+ info.mData = new MediaCodecBuffer(mOutputFormat, new ABuffer(bufferSize));
+ info.mCodecData = info.mData;
+
mBuffers[kPortIndexOutput].push(info);
IOMX::buffer_id bufferId;
- err = mOMX->useGraphicBuffer(mNode, kPortIndexOutput, graphicBuffer,
- &bufferId);
+ err = mOMXNode->useBuffer(kPortIndexOutput, graphicBuffer, &bufferId);
if (err != 0) {
ALOGE("registering GraphicBuffer %u with OMX IL component failed: "
"%d", i, err);
@@ -1144,9 +1278,9 @@
OMX_U32 cancelStart;
OMX_U32 cancelEnd;
- if (err != 0) {
+ if (err != OK) {
// If an error occurred while dequeuing we need to cancel any buffers
- // that were dequeued.
+ // that were dequeued. Also cancel all if we're in legacy metadata mode.
cancelStart = 0;
cancelEnd = mBuffers[kPortIndexOutput].size();
} else {
@@ -1165,102 +1299,47 @@
}
}
- if (!storingMetadataInDecodedBuffers()) {
- static_cast<Surface*>(mNativeWindow.get())
- ->getIGraphicBufferProducer()->allowAllocation(false);
- }
+ static_cast<Surface*>(mNativeWindow.get())
+ ->getIGraphicBufferProducer()->allowAllocation(false);
return err;
}
status_t ACodec::allocateOutputMetadataBuffers() {
+ CHECK(storingMetadataInDecodedBuffers());
+
OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
status_t err = configureOutputBuffersFromNativeWindow(
&bufferCount, &bufferSize, &minUndequeuedBuffers,
- mLegacyAdaptiveExperiment /* preregister */);
- if (err != 0)
+ false /* preregister */);
+ if (err != OK)
return err;
mNumUndequeuedBuffers = minUndequeuedBuffers;
ALOGV("[%s] Allocating %u meta buffers on output port",
mComponentName.c_str(), bufferCount);
- size_t bufSize = mOutputMetadataType == kMetadataBufferTypeANWBuffer ?
- sizeof(struct VideoNativeMetadata) : sizeof(struct VideoGrallocMetadata);
- size_t totalSize = bufferCount * align(bufSize, MemoryDealer::getAllocationAlignment());
- mDealer[kPortIndexOutput] = new MemoryDealer(totalSize, "ACodec");
-
- // Dequeue buffers and send them to OMX
for (OMX_U32 i = 0; i < bufferCount; i++) {
BufferInfo info;
info.mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
info.mFenceFd = -1;
info.mRenderInfo = NULL;
info.mGraphicBuffer = NULL;
+ info.mNewGraphicBuffer = false;
info.mDequeuedAt = mDequeueCounter;
- sp<IMemory> mem = mDealer[kPortIndexOutput]->allocate(bufSize);
- if (mem == NULL || mem->pointer() == NULL) {
- return NO_MEMORY;
- }
- if (mOutputMetadataType == kMetadataBufferTypeANWBuffer) {
- ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
- }
- info.mData = new ABuffer(mem->pointer(), mem->size());
- info.mMemRef = mem;
- info.mCodecData = info.mData;
- info.mCodecRef = mem;
+ info.mData = new MediaCodecBuffer(mOutputFormat, new ABuffer(bufferSize));
- // we use useBuffer for metadata regardless of quirks
- err = mOMX->useBuffer(
- mNode, kPortIndexOutput, mem, &info.mBufferID, mem->size());
+ // Initialize fence fd to -1 to avoid warning in freeBuffer().
+ ((VideoNativeMetadata *)info.mData->base())->nFenceFd = -1;
+
+ info.mCodecData = info.mData;
+
+ err = mOMXNode->useBuffer(kPortIndexOutput, OMXBuffer::sPreset, &info.mBufferID);
mBuffers[kPortIndexOutput].push(info);
- ALOGV("[%s] allocated meta buffer with ID %u (pointer = %p)",
- mComponentName.c_str(), info.mBufferID, mem->pointer());
- }
-
- if (mLegacyAdaptiveExperiment) {
- // preallocate and preregister buffers
- static_cast<Surface *>(mNativeWindow.get())
- ->getIGraphicBufferProducer()->allowAllocation(true);
-
- ALOGV("[%s] Allocating %u buffers from a native window of size %u on "
- "output port",
- mComponentName.c_str(), bufferCount, bufferSize);
-
- // Dequeue buffers then cancel them all
- for (OMX_U32 i = 0; i < bufferCount; i++) {
- BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
-
- ANativeWindowBuffer *buf;
- int fenceFd;
- err = mNativeWindow->dequeueBuffer(mNativeWindow.get(), &buf, &fenceFd);
- if (err != 0) {
- ALOGE("dequeueBuffer failed: %s (%d)", strerror(-err), -err);
- break;
- }
-
- sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
- mOMX->updateGraphicBufferInMeta(
- mNode, kPortIndexOutput, graphicBuffer, info->mBufferID);
- info->mStatus = BufferInfo::OWNED_BY_US;
- info->setWriteFence(fenceFd, "allocateOutputMetadataBuffers for legacy");
- info->mGraphicBuffer = graphicBuffer;
- }
-
- for (OMX_U32 i = 0; i < mBuffers[kPortIndexOutput].size(); i++) {
- BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
- if (info->mStatus == BufferInfo::OWNED_BY_US) {
- status_t error = cancelBufferToNativeWindow(info);
- if (err == OK) {
- err = error;
- }
- }
- }
-
- static_cast<Surface*>(mNativeWindow.get())
- ->getIGraphicBufferProducer()->allowAllocation(false);
+ ALOGV("[%s] allocated meta buffer with ID %u",
+ mComponentName.c_str(), info.mBufferID);
}
mMetadataBuffersToSubmit = bufferCount - minUndequeuedBuffers;
@@ -1278,17 +1357,11 @@
}
ALOGV("[%s] submitting output meta buffer ID %u for graphic buffer %p",
- mComponentName.c_str(), info->mBufferID, info->mGraphicBuffer.get());
+ mComponentName.c_str(), info->mBufferID, info->mGraphicBuffer->handle);
--mMetadataBuffersToSubmit;
info->checkWriteFence("submitOutputMetadataBuffer");
- status_t err = mOMX->fillBuffer(mNode, info->mBufferID, info->mFenceFd);
- info->mFenceFd = -1;
- if (err == OK) {
- info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
- }
-
- return err;
+ return fillBuffer(info);
}
status_t ACodec::waitForFence(int fd, const char *dbg ) {
@@ -1364,8 +1437,6 @@
}
void ACodec::notifyOfRenderedFrames(bool dropIncomplete, FrameRenderTracker::Info *until) {
- sp<AMessage> msg = mNotify->dup();
- msg->setInt32("what", CodecBase::kWhatOutputFramesRendered);
std::list<FrameRenderTracker::Info> done =
mRenderTracker.checkFencesAndGetRenderedFrames(until, dropIncomplete);
@@ -1381,9 +1452,7 @@
}
}
- if (MediaCodec::CreateFramesRenderedMessage(done, msg)) {
- msg->post();
- }
+ mCallback->onOutputFramesRendered(done);
}
ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
@@ -1429,7 +1498,11 @@
break;
}
- ALOGV("dequeued buffer %p", info->mGraphicBuffer->getNativeBuffer());
+ ALOGV("dequeued buffer #%u with age %u, graphicBuffer %p",
+ (unsigned)(info - &mBuffers[kPortIndexOutput][0]),
+ mDequeueCounter - info->mDequeuedAt,
+ info->mGraphicBuffer->handle);
+
info->mStatus = BufferInfo::OWNED_BY_US;
info->setWriteFence(fenceFd, "dequeueBufferFromNativeWindow");
updateRenderInfoForDequeuedBuffer(buf, fenceFd, info);
@@ -1442,7 +1515,7 @@
// same is possible in meta mode, in which case, it will be treated
// as a normal buffer, which is not desirable.
// TODO: fix this.
- if (!stale && (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment)) {
+ if (!stale && !storingMetadataInDecodedBuffers()) {
ALOGI("dequeued unrecognized (stale) buffer %p. discarding", buf);
stale = true;
}
@@ -1474,39 +1547,29 @@
CHECK(storingMetadataInDecodedBuffers());
// discard buffer in LRU info and replace with new buffer
- oldest->mGraphicBuffer = new GraphicBuffer(buf, false);
+ oldest->mGraphicBuffer = GraphicBuffer::from(buf);
+ oldest->mNewGraphicBuffer = true;
oldest->mStatus = BufferInfo::OWNED_BY_US;
oldest->setWriteFence(fenceFd, "dequeueBufferFromNativeWindow for oldest");
mRenderTracker.untrackFrame(oldest->mRenderInfo);
oldest->mRenderInfo = NULL;
- mOMX->updateGraphicBufferInMeta(
- mNode, kPortIndexOutput, oldest->mGraphicBuffer,
- oldest->mBufferID);
-
- if (mOutputMetadataType == kMetadataBufferTypeGrallocSource) {
- VideoGrallocMetadata *grallocMeta =
- reinterpret_cast<VideoGrallocMetadata *>(oldest->mData->base());
- ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
- (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
- mDequeueCounter - oldest->mDequeuedAt,
- (void *)(uintptr_t)grallocMeta->pHandle,
- oldest->mGraphicBuffer->handle, oldest->mData->base());
- } else if (mOutputMetadataType == kMetadataBufferTypeANWBuffer) {
- VideoNativeMetadata *nativeMeta =
- reinterpret_cast<VideoNativeMetadata *>(oldest->mData->base());
- ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
- (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
- mDequeueCounter - oldest->mDequeuedAt,
- (void *)(uintptr_t)nativeMeta->pBuffer,
- oldest->mGraphicBuffer->getNativeBuffer(), oldest->mData->base());
- }
+ ALOGV("replaced oldest buffer #%u with age %u, graphicBuffer %p",
+ (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
+ mDequeueCounter - oldest->mDequeuedAt,
+ oldest->mGraphicBuffer->handle);
updateRenderInfoForDequeuedBuffer(buf, fenceFd, oldest);
return oldest;
}
status_t ACodec::freeBuffersOnPort(OMX_U32 portIndex) {
+ if (portIndex == kPortIndexInput) {
+ mBufferChannel->setInputBufferArray({});
+ } else {
+ mBufferChannel->setOutputBufferArray({});
+ }
+
status_t err = OK;
for (size_t i = mBuffers[portIndex].size(); i > 0;) {
i--;
@@ -1516,8 +1579,11 @@
}
}
- // clear mDealer even on an error
- mDealer[portIndex].clear();
+ if (getTrebleFlag()) {
+ mAllocator[portIndex].clear();
+ } else {
+ mDealer[portIndex].clear();
+ }
return err;
}
@@ -1547,11 +1613,9 @@
status_t err = OK;
// there should not be any fences in the metadata
- MetadataBufferType type =
- portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
- if (type == kMetadataBufferTypeANWBuffer && info->mData != NULL
- && info->mData->size() >= sizeof(VideoNativeMetadata)) {
- int fenceFd = ((VideoNativeMetadata *)info->mData->data())->nFenceFd;
+ if (mPortMode[portIndex] == IOMX::kPortModeDynamicANWBuffer && info->mCodecData != NULL
+ && info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
+ int fenceFd = ((VideoNativeMetadata *)info->mCodecData->base())->nFenceFd;
if (fenceFd >= 0) {
ALOGW("unreleased fence (%d) in %s metadata buffer %zu",
fenceFd, portIndex == kPortIndexInput ? "input" : "output", i);
@@ -1566,7 +1630,7 @@
// fall through
case BufferInfo::OWNED_BY_NATIVE_WINDOW:
- err = mOMX->freeBuffer(mNode, portIndex, info->mBufferID);
+ err = mOMXNode->freeBuffer(portIndex, info->mBufferID);
break;
default:
@@ -1584,7 +1648,7 @@
info->mRenderInfo = NULL;
}
- // remove buffer even if mOMX->freeBuffer fails
+ // remove buffer even if mOMXNode->freeBuffer fails
mBuffers[portIndex].removeAt(i);
return err;
}
@@ -1606,13 +1670,37 @@
return NULL;
}
+status_t ACodec::fillBuffer(BufferInfo *info) {
+ status_t err;
+ // Even in dynamic ANW buffer mode, if the graphic buffer is not changing,
+ // send sPreset instead of the same graphic buffer, so that OMX server
+ // side doesn't update the meta. In theory it should make no difference,
+ // however when the same buffer is parcelled again, a new handle could be
+ // created on server side, and some decoder doesn't recognize the handle
+ // even if it's the same buffer.
+ if (!storingMetadataInDecodedBuffers() || !info->mNewGraphicBuffer) {
+ err = mOMXNode->fillBuffer(
+ info->mBufferID, OMXBuffer::sPreset, info->mFenceFd);
+ } else {
+ err = mOMXNode->fillBuffer(
+ info->mBufferID, info->mGraphicBuffer, info->mFenceFd);
+ }
+
+ info->mNewGraphicBuffer = false;
+ info->mFenceFd = -1;
+ if (err == OK) {
+ info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+ }
+ return err;
+}
+
status_t ACodec::setComponentRole(
bool isEncoder, const char *mime) {
const char *role = GetComponentRole(isEncoder, mime);
if (role == NULL) {
return BAD_VALUE;
}
- status_t err = SetComponentRole(mOMX, mNode, role);
+ status_t err = SetComponentRole(mOMXNode, role);
if (err != OK) {
ALOGW("[%s] Failed to set standard component role '%s'.",
mComponentName.c_str(), role);
@@ -1633,8 +1721,8 @@
mIsEncoder = encoder;
- mInputMetadataType = kMetadataBufferTypeInvalid;
- mOutputMetadataType = kMetadataBufferTypeInvalid;
+ mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+ mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
status_t err = setComponentRole(encoder /* isEncoder */, mime);
@@ -1661,23 +1749,23 @@
if (encoder
&& msg->findInt32("android._input-metadata-buffer-type", &storeMeta)
&& storeMeta != kMetadataBufferTypeInvalid) {
- mInputMetadataType = (MetadataBufferType)storeMeta;
- err = mOMX->storeMetaDataInBuffers(
- mNode, kPortIndexInput, OMX_TRUE, &mInputMetadataType);
+ IOMX::PortMode mode;
+ if (storeMeta == kMetadataBufferTypeNativeHandleSource) {
+ mode = IOMX::kPortModeDynamicNativeHandle;
+ } else if (storeMeta == kMetadataBufferTypeANWBuffer ||
+ storeMeta == kMetadataBufferTypeGrallocSource) {
+ mode = IOMX::kPortModeDynamicANWBuffer;
+ } else {
+ return BAD_VALUE;
+ }
+ err = setPortMode(kPortIndexInput, mode);
if (err != OK) {
- ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
- mComponentName.c_str(), err);
-
return err;
- } else if (storeMeta == kMetadataBufferTypeANWBuffer
- && mInputMetadataType == kMetadataBufferTypeGrallocSource) {
- // IOMX translates ANWBuffers to gralloc source already.
- mInputMetadataType = (MetadataBufferType)storeMeta;
}
uint32_t usageBits;
- if (mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+ if (mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
&usageBits, sizeof(usageBits)) == OK) {
inputFormat->setInt32(
"using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
@@ -1689,18 +1777,15 @@
&& msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS)
&& prependSPSPPS != 0) {
OMX_INDEXTYPE index;
- err = mOMX->getExtensionIndex(
- mNode,
- "OMX.google.android.index.prependSPSPPSToIDRFrames",
- &index);
+ err = mOMXNode->getExtensionIndex(
+ "OMX.google.android.index.prependSPSPPSToIDRFrames", &index);
if (err == OK) {
PrependSPSPPSToIDRFramesParams params;
InitOMXParams(¶ms);
params.bEnable = OMX_TRUE;
- err = mOMX->setParameter(
- mNode, index, ¶ms, sizeof(params));
+ err = mOMXNode->setParameter(index, ¶ms, sizeof(params));
}
if (err != OK) {
@@ -1720,12 +1805,14 @@
OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
&& msg->findInt32("android._store-metadata-in-buffers-output", &storeMeta)
&& storeMeta != 0);
+ if (mFlags & kFlagIsSecure) {
+ enable = OMX_TRUE;
+ }
- mOutputMetadataType = kMetadataBufferTypeNativeHandleSource;
- err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable, &mOutputMetadataType);
+ err = setPortMode(kPortIndexOutput, enable ?
+ IOMX::kPortModePresetSecureBuffer : IOMX::kPortModePresetByteBuffer);
if (err != OK) {
- ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d",
- mComponentName.c_str(), err);
+ return err;
}
if (!msg->findInt64(
@@ -1734,16 +1821,20 @@
mRepeatFrameDelayUs = -1ll;
}
+ // only allow 32-bit value, since we pass it as U32 to OMX.
if (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) {
mMaxPtsGapUs = -1ll;
+ } else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < 0) {
+ ALOGW("Unsupported value for max pts gap %lld", (long long) mMaxPtsGapUs);
+ mMaxPtsGapUs = -1ll;
}
if (!msg->findFloat("max-fps-to-encoder", &mMaxFps)) {
mMaxFps = -1;
}
- if (!msg->findInt64("time-lapse", &mTimePerCaptureUs)) {
- mTimePerCaptureUs = -1ll;
+ if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
+ mCaptureFps = -1.0;
}
if (!msg->findInt32(
@@ -1758,7 +1849,6 @@
bool haveNativeWindow = msg->findObject("native-window", &obj)
&& obj != NULL && video && !encoder;
mUsingNativeWindow = haveNativeWindow;
- mLegacyAdaptiveExperiment = false;
if (video && !encoder) {
inputFormat->setInt32("adaptive-playback", false);
@@ -1774,10 +1864,13 @@
if (mFlags & kFlagIsSecure) {
// use native_handles for secure input buffers
- err = mOMX->enableNativeBuffers(
- mNode, kPortIndexInput, OMX_FALSE /* graphic */, OMX_TRUE);
- ALOGI_IF(err != OK, "falling back to non-native_handles");
- err = OK; // ignore error for now
+ err = setPortMode(kPortIndexInput, IOMX::kPortModePresetSecureBuffer);
+
+ if (err != OK) {
+ ALOGI("falling back to non-native_handles");
+ setPortMode(kPortIndexInput, IOMX::kPortModePresetByteBuffer);
+ err = OK; // ignore error for now
+ }
}
}
if (haveNativeWindow) {
@@ -1791,8 +1884,8 @@
OMX_CONFIG_BOOLEANTYPE config;
InitOMXParams(&config);
config.bEnabled = (OMX_BOOL)enabled;
- status_t temp = mOMX->setConfig(
- mNode, (OMX_INDEXTYPE)OMX_IndexConfigAutoFramerateConversion,
+ status_t temp = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAutoFramerateConversion,
&config, sizeof(config));
if (temp == OK) {
outputFormat->setInt32("auto-frc", enabled);
@@ -1823,8 +1916,8 @@
if (msg->findInt32("max-width", &maxWidth) &&
msg->findInt32("max-height", &maxHeight)) {
- err = mOMX->prepareForAdaptivePlayback(
- mNode, kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
+ err = mOMXNode->prepareForAdaptivePlayback(
+ kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
if (err != OK) {
ALOGW("[%s] prepareForAdaptivePlayback failed w/ err %d",
mComponentName.c_str(), err);
@@ -1849,14 +1942,8 @@
return err;
}
- // Always try to enable dynamic output buffers on native surface
- mOutputMetadataType = kMetadataBufferTypeANWBuffer;
- err = mOMX->storeMetaDataInBuffers(
- mNode, kPortIndexOutput, OMX_TRUE, &mOutputMetadataType);
+ err = setPortMode(kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer);
if (err != OK) {
- ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
- mComponentName.c_str(), err);
-
// if adaptive playback has been requested, try JB fallback
// NOTE: THIS FALLBACK MECHANISM WILL BE REMOVED DUE TO ITS
// LARGE MEMORY REQUIREMENT
@@ -1886,9 +1973,8 @@
ALOGV("[%s] prepareForAdaptivePlayback(%dx%d)",
mComponentName.c_str(), maxWidth, maxHeight);
- err = mOMX->prepareForAdaptivePlayback(
- mNode, kPortIndexOutput, OMX_TRUE, maxWidth,
- maxHeight);
+ err = mOMXNode->prepareForAdaptivePlayback(
+ kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
ALOGW_IF(err != OK,
"[%s] prepareForAdaptivePlayback failed w/ err %d",
mComponentName.c_str(), err);
@@ -1902,12 +1988,9 @@
// allow failure
err = OK;
} else {
- ALOGV("[%s] storeMetaDataInBuffers succeeded",
- mComponentName.c_str());
+ ALOGV("[%s] setPortMode on output to %s succeeded",
+ mComponentName.c_str(), asString(IOMX::kPortModeDynamicANWBuffer));
CHECK(storingMetadataInDecodedBuffers());
- mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled(
- "legacy-adaptive", !msg->contains("no-experiments"));
-
inputFormat->setInt32("adaptive-playback", true);
}
@@ -1936,6 +2019,12 @@
if (haveNativeWindow && mComponentName.startsWith("OMX.google.")) {
usingSwRenderer = true;
haveNativeWindow = false;
+ (void)setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
+ } else if (haveNativeWindow && !storingMetadataInDecodedBuffers()) {
+ err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetANWBuffer);
+ if (err != OK) {
+ return err;
+ }
}
if (encoder) {
@@ -1950,17 +2039,8 @@
if (haveNativeWindow) {
mNativeWindow = static_cast<Surface *>(obj.get());
- }
- // initialize native window now to get actual output format
- // TODO: this is needed for some encoders even though they don't use native window
- err = initNativeWindow();
- if (err != OK) {
- return err;
- }
-
- // fallback for devices that do not handle flex-YUV for native buffers
- if (haveNativeWindow) {
+ // fallback for devices that do not handle flex-YUV for native buffers
int32_t requestedColorFormat = OMX_COLOR_FormatUnused;
if (msg->findInt32("color-format", &requestedColorFormat) &&
requestedColorFormat == OMX_COLOR_FormatYUV420Flexible) {
@@ -1977,7 +2057,7 @@
ALOGD("[%s] Requested output format %#x and got %#x.",
mComponentName.c_str(), requestedColorFormat, colorFormat);
if (!IsFlexibleColorFormat(
- mOMX, mNode, colorFormat, haveNativeWindow, &flexibleEquivalent)
+ mOMXNode, colorFormat, haveNativeWindow, &flexibleEquivalent)
|| flexibleEquivalent != (OMX_U32)requestedColorFormat) {
// device did not handle flex-YUV request for native window, fall back
// to SW renderer
@@ -1986,18 +2066,10 @@
mNativeWindowUsageBits = 0;
haveNativeWindow = false;
usingSwRenderer = true;
- if (storingMetadataInDecodedBuffers()) {
- err = mOMX->storeMetaDataInBuffers(
- mNode, kPortIndexOutput, OMX_FALSE, &mOutputMetadataType);
- mOutputMetadataType = kMetadataBufferTypeInvalid; // just in case
- // TODO: implement adaptive-playback support for bytebuffer mode.
- // This is done by SW codecs, but most HW codecs don't support it.
- inputFormat->setInt32("adaptive-playback", false);
- }
- if (err == OK) {
- err = mOMX->enableNativeBuffers(
- mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_FALSE);
- }
+ // TODO: implement adaptive-playback support for bytebuffer mode.
+ // This is done by SW codecs, but most HW codecs don't support it.
+ err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
+ inputFormat->setInt32("adaptive-playback", false);
if (mFlags & kFlagIsGrallocUsageProtected) {
// fallback is not supported for protected playback
err = PERMISSION_DENIED;
@@ -2177,13 +2249,16 @@
int32_t maxInputSize;
if (msg->findInt32("max-input-size", &maxInputSize)) {
err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize);
+ err = OK; // ignore error
} else if (!strcmp("OMX.Nvidia.aac.decoder", mComponentName.c_str())) {
err = setMinBufferSize(kPortIndexInput, 8192); // XXX
+ err = OK; // ignore error
}
int32_t priority;
if (msg->findInt32("priority", &priority)) {
err = setPriority(priority);
+ err = OK; // ignore error
}
int32_t rateInt = -1;
@@ -2194,11 +2269,18 @@
}
if (rateFloat > 0) {
err = setOperatingRate(rateFloat, video);
+ err = OK; // ignore errors
+ }
+
+ if (err == OK) {
+ err = setVendorParameters(msg);
+ if (err != OK) {
+ return err;
+ }
}
// NOTE: both mBaseOutputFormat and mOutputFormat are outputFormat to signal first frame.
mBaseOutputFormat = outputFormat;
- // trigger a kWhatOutputFormatChanged msg on first buffer
mLastOutputFormat.clear();
err = getPortFormat(kPortIndexInput, inputFormat);
@@ -2231,6 +2313,30 @@
return err;
}
+status_t ACodec::setLatency(uint32_t latency) {
+ OMX_PARAM_U32TYPE config;
+ InitOMXParams(&config);
+ config.nPortIndex = kPortIndexInput;
+ config.nU32 = (OMX_U32)latency;
+ status_t err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigLatency,
+ &config, sizeof(config));
+ return err;
+}
+
+status_t ACodec::getLatency(uint32_t *latency) {
+ OMX_PARAM_U32TYPE config;
+ InitOMXParams(&config);
+ config.nPortIndex = kPortIndexInput;
+ status_t err = mOMXNode->getConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigLatency,
+ &config, sizeof(config));
+ if (err == OK) {
+ *latency = config.nU32;
+ }
+ return err;
+}
+
status_t ACodec::setPriority(int32_t priority) {
if (priority < 0) {
return BAD_VALUE;
@@ -2238,8 +2344,8 @@
OMX_PARAM_U32TYPE config;
InitOMXParams(&config);
config.nU32 = (OMX_U32)priority;
- status_t temp = mOMX->setConfig(
- mNode, (OMX_INDEXTYPE)OMX_IndexConfigPriority,
+ status_t temp = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigPriority,
&config, sizeof(config));
if (temp != OK) {
ALOGI("codec does not support config priority (err %d)", temp);
@@ -2266,8 +2372,8 @@
OMX_PARAM_U32TYPE config;
InitOMXParams(&config);
config.nU32 = rate;
- status_t err = mOMX->setConfig(
- mNode, (OMX_INDEXTYPE)OMX_IndexConfigOperatingRate,
+ status_t err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigOperatingRate,
&config, sizeof(config));
if (err != OK) {
ALOGI("codec does not support config operating rate (err %d)", err);
@@ -2279,8 +2385,8 @@
OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
InitOMXParams(¶ms);
params.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getConfig(
- mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, ¶ms, sizeof(params));
+ status_t err = mOMXNode->getConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, ¶ms, sizeof(params));
if (err == OK) {
*intraRefreshPeriod = params.nRefreshPeriod;
return OK;
@@ -2291,8 +2397,8 @@
InitOMXParams(&refreshParams);
refreshParams.nPortIndex = kPortIndexOutput;
refreshParams.eRefreshMode = OMX_VIDEO_IntraRefreshCyclic;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamVideoIntraRefresh, &refreshParams, sizeof(refreshParams));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamVideoIntraRefresh, &refreshParams, sizeof(refreshParams));
if (err != OK || refreshParams.nCirMBs == 0) {
*intraRefreshPeriod = 0;
return OK;
@@ -2304,8 +2410,8 @@
InitOMXParams(&def);
OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
def.nPortIndex = kPortIndexOutput;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
*intraRefreshPeriod = 0;
return err;
@@ -2323,8 +2429,8 @@
InitOMXParams(¶ms);
params.nPortIndex = kPortIndexOutput;
params.nRefreshPeriod = intraRefreshPeriod;
- status_t err = mOMX->setConfig(
- mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, ¶ms, sizeof(params));
+ status_t err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh, ¶ms, sizeof(params));
if (err == OK) {
return OK;
}
@@ -2351,8 +2457,8 @@
InitOMXParams(&def);
OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
def.nPortIndex = kPortIndexOutput;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
@@ -2362,8 +2468,9 @@
refreshParams.nCirMBs = divUp((divUp(width, 16u) * divUp(height, 16u)), intraRefreshPeriod);
}
- err = mOMX->setParameter(mNode, OMX_IndexParamVideoIntraRefresh,
- &refreshParams, sizeof(refreshParams));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamVideoIntraRefresh,
+ &refreshParams, sizeof(refreshParams));
if (err != OK) {
return err;
}
@@ -2406,9 +2513,9 @@
InitOMXParams(&layerParams);
layerParams.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
- &layerParams, sizeof(layerParams));
+ status_t err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+ &layerParams, sizeof(layerParams));
if (err != OK) {
return err;
@@ -2428,8 +2535,8 @@
layerConfig.nBLayerCountActual = numBLayers;
layerConfig.bBitrateRatiosSpecified = OMX_FALSE;
- err = mOMX->setConfig(
- mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering,
+ err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering,
&layerConfig, sizeof(layerConfig));
} else {
layerParams.ePattern = pattern;
@@ -2437,8 +2544,8 @@
layerParams.nBLayerCountActual = numBLayers;
layerParams.bBitrateRatiosSpecified = OMX_FALSE;
- err = mOMX->setParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+ err = mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
&layerParams, sizeof(layerParams));
}
@@ -2455,8 +2562,8 @@
return err;
}
- err = mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
&layerParams, sizeof(layerParams));
if (err == OK) {
@@ -2479,8 +2586,8 @@
InitOMXParams(&def);
def.nPortIndex = portIndex;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2492,15 +2599,15 @@
def.nBufferSize = size;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
- err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2522,9 +2629,8 @@
format.nPortIndex = portIndex;
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
format.nIndex = index;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioPortFormat,
- &format, sizeof(format));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamAudioPortFormat, &format, sizeof(format));
if (err != OK) {
return err;
@@ -2542,8 +2648,8 @@
}
}
- return mOMX->setParameter(
- mNode, OMX_IndexParamAudioPortFormat, &format, sizeof(format));
+ return mOMXNode->setParameter(
+ OMX_IndexParamAudioPortFormat, &format, sizeof(format));
}
status_t ACodec::setupAACCodec(
@@ -2575,8 +2681,8 @@
InitOMXParams(&def);
def.nPortIndex = kPortIndexOutput;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2585,8 +2691,8 @@
def.format.audio.bFlagErrorConcealment = OMX_TRUE;
def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2596,8 +2702,8 @@
InitOMXParams(&profile);
profile.nPortIndex = kPortIndexOutput;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (err != OK) {
return err;
@@ -2644,8 +2750,8 @@
}
- err = mOMX->setParameter(
- mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (err != OK) {
return err;
@@ -2658,8 +2764,8 @@
InitOMXParams(&profile);
profile.nPortIndex = kPortIndexInput;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (err != OK) {
return err;
@@ -2683,10 +2789,12 @@
presentation.nEncodedTargetLevel = drc.encodedTargetLevel;
presentation.nPCMLimiterEnable = pcmLimiterEnable;
- status_t res = mOMX->setParameter(mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+ status_t res = mOMXNode->setParameter(
+ OMX_IndexParamAudioAac, &profile, sizeof(profile));
if (res == OK) {
// optional parameters, will not cause configuration failure
- mOMX->setParameter(mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
+ mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAacPresentation,
&presentation, sizeof(presentation));
} else {
ALOGW("did not set AudioAndroidAacPresentation due to error %d when setting AudioAac", res);
@@ -2713,11 +2821,8 @@
InitOMXParams(&def);
def.nPortIndex = kPortIndexInput;
- err = mOMX->getParameter(
- mNode,
- (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
- &def,
- sizeof(def));
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2726,11 +2831,8 @@
def.nChannels = numChannels;
def.nSampleRate = sampleRate;
- return mOMX->setParameter(
- mNode,
- (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
- &def,
- sizeof(def));
+ return mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3, &def, sizeof(def));
}
status_t ACodec::setupEAC3Codec(
@@ -2751,11 +2853,8 @@
InitOMXParams(&def);
def.nPortIndex = kPortIndexInput;
- err = mOMX->getParameter(
- mNode,
- (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
- &def,
- sizeof(def));
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2764,11 +2863,8 @@
def.nChannels = numChannels;
def.nSampleRate = sampleRate;
- return mOMX->setParameter(
- mNode,
- (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
- &def,
- sizeof(def));
+ return mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3, &def, sizeof(def));
}
static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate(
@@ -2821,8 +2917,8 @@
InitOMXParams(&def);
def.nPortIndex = encoder ? kPortIndexOutput : kPortIndexInput;
- status_t err =
- mOMX->getParameter(mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamAudioAmr, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2831,8 +2927,8 @@
def.eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
def.eAMRBandMode = pickModeFromBitRate(isWAMR, bitrate);
- err = mOMX->setParameter(
- mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamAudioAmr, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2862,13 +2958,13 @@
def.nPortIndex = kPortIndexOutput;
// configure compression level
- status_t err = mOMX->getParameter(mNode, OMX_IndexParamAudioFlac, &def, sizeof(def));
+ status_t err = mOMXNode->getParameter(OMX_IndexParamAudioFlac, &def, sizeof(def));
if (err != OK) {
ALOGE("setupFlacCodec(): Error %d getting OMX_IndexParamAudioFlac parameter", err);
return err;
}
def.nCompressionLevel = compressionLevel;
- err = mOMX->setParameter(mNode, OMX_IndexParamAudioFlac, &def, sizeof(def));
+ err = mOMXNode->setParameter(OMX_IndexParamAudioFlac, &def, sizeof(def));
if (err != OK) {
ALOGE("setupFlacCodec(): Error %d setting OMX_IndexParamAudioFlac parameter", err);
return err;
@@ -2887,8 +2983,8 @@
InitOMXParams(&def);
def.nPortIndex = portIndex;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2896,8 +2992,8 @@
def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
@@ -2907,8 +3003,8 @@
InitOMXParams(&pcmParams);
pcmParams.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
if (err != OK) {
return err;
@@ -2939,15 +3035,15 @@
return OMX_ErrorNone;
}
- err = mOMX->setParameter(
- mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
// if we could not set up raw format to non-16-bit, try with 16-bit
// NOTE: we will also verify this via readback, in case codec ignores these fields
if (err != OK && encoding != kAudioEncodingPcm16bit) {
pcmParams.eNumData = OMX_NumericalDataSigned;
pcmParams.nBitPerSample = 16;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
}
return err;
}
@@ -2956,8 +3052,8 @@
int32_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
native_handle_t* sidebandHandle;
- status_t err = mOMX->configureVideoTunnelMode(
- mNode, kPortIndexOutput, OMX_TRUE, audioHwSync, &sidebandHandle);
+ status_t err = mOMXNode->configureVideoTunnelMode(
+ kPortIndexOutput, OMX_TRUE, audioHwSync, &sidebandHandle);
if (err != OK) {
ALOGE("configureVideoTunnelMode failed! (err %d).", err);
return err;
@@ -2986,8 +3082,8 @@
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
format.nIndex = index;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamVideoPortFormat,
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamVideoPortFormat,
&format, sizeof(format));
if (err != OK) {
@@ -2998,7 +3094,7 @@
OMX_U32 flexibleEquivalent;
if (compressionFormat == OMX_VIDEO_CodingUnused
&& IsFlexibleColorFormat(
- mOMX, mNode, format.eColorFormat, usingNativeBuffers, &flexibleEquivalent)
+ mOMXNode, format.eColorFormat, usingNativeBuffers, &flexibleEquivalent)
&& colorFormat == flexibleEquivalent) {
ALOGI("[%s] using color format %#x in place of %#x",
mComponentName.c_str(), format.eColorFormat, colorFormat);
@@ -3041,9 +3137,8 @@
return UNKNOWN_ERROR;
}
- status_t err = mOMX->setParameter(
- mNode, OMX_IndexParamVideoPortFormat,
- &format, sizeof(format));
+ status_t err = mOMXNode->setParameter(
+ OMX_IndexParamVideoPortFormat, &format, sizeof(format));
return err;
}
@@ -3073,9 +3168,8 @@
for (OMX_U32 index = 0; ; ++index) {
format.nIndex = index;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamVideoPortFormat,
- &format, sizeof(format));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamVideoPortFormat, &format, sizeof(format));
if (err != OK) {
// no more formats, pick legacy format if found
if (legacyFormat.eColorFormat != OMX_COLOR_FormatUnused) {
@@ -3102,15 +3196,14 @@
OMX_U32 flexibleEquivalent;
if (legacyFormat.eColorFormat == OMX_COLOR_FormatUnused
&& IsFlexibleColorFormat(
- mOMX, mNode, format.eColorFormat, false /* usingNativeBuffers */,
+ mOMXNode, format.eColorFormat, false /* usingNativeBuffers */,
&flexibleEquivalent)
&& flexibleEquivalent == OMX_COLOR_FormatYUV420Flexible) {
memcpy(&legacyFormat, &format, sizeof(format));
}
}
- return mOMX->setParameter(
- mNode, OMX_IndexParamVideoPortFormat,
- &format, sizeof(format));
+ return mOMXNode->setParameter(
+ OMX_IndexParamVideoPortFormat, &format, sizeof(format));
}
static const struct VideoCodingMapEntry {
@@ -3166,14 +3259,14 @@
status_t err;
ALOGD("Setting [%s] %s port buffer number: %d", mComponentName.c_str(),
portIndex == kPortIndexInput ? "input" : "output", bufferNum);
- err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
def.nBufferCountActual = bufferNum;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
// Component could reject this request.
ALOGW("Fail to set [%s] %s port buffer number: %d", mComponentName.c_str(),
@@ -3204,11 +3297,9 @@
params.nPortIndex = kPortIndexInput;
// Check if VP9 decoder advertises supported profiles.
params.nProfileIndex = 0;
- status_t err = mOMX->getParameter(
- mNode,
+ status_t err = mOMXNode->getParameter(
OMX_IndexParamVideoProfileLevelQuerySupported,
- ¶ms,
- sizeof(params));
+ ¶ms, sizeof(params));
mIsLegacyVP9Decoder = err != OK;
}
@@ -3297,8 +3388,8 @@
}
status_t ACodec::initDescribeColorAspectsIndex() {
- status_t err = mOMX->getExtensionIndex(
- mNode, "OMX.google.android.index.describeColorAspects", &mDescribeColorAspectsIndex);
+ status_t err = mOMXNode->getExtensionIndex(
+ "OMX.google.android.index.describeColorAspects", &mDescribeColorAspectsIndex);
if (err != OK) {
mDescribeColorAspectsIndex = (OMX_INDEXTYPE)0;
}
@@ -3308,7 +3399,7 @@
status_t ACodec::setCodecColorAspects(DescribeColorAspectsParams ¶ms, bool verify) {
status_t err = ERROR_UNSUPPORTED;
if (mDescribeColorAspectsIndex) {
- err = mOMX->setConfig(mNode, mDescribeColorAspectsIndex, ¶ms, sizeof(params));
+ err = mOMXNode->setConfig(mDescribeColorAspectsIndex, ¶ms, sizeof(params));
}
ALOGV("[%s] setting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
mComponentName.c_str(),
@@ -3353,7 +3444,7 @@
status_t ACodec::getCodecColorAspects(DescribeColorAspectsParams ¶ms) {
status_t err = ERROR_UNSUPPORTED;
if (mDescribeColorAspectsIndex) {
- err = mOMX->getConfig(mNode, mDescribeColorAspectsIndex, ¶ms, sizeof(params));
+ err = mOMXNode->getConfig(mDescribeColorAspectsIndex, ¶ms, sizeof(params));
}
ALOGV("[%s] got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
mComponentName.c_str(),
@@ -3575,8 +3666,8 @@
}
status_t ACodec::initDescribeHDRStaticInfoIndex() {
- status_t err = mOMX->getExtensionIndex(
- mNode, "OMX.google.android.index.describeHDRStaticInfo", &mDescribeHDRStaticInfoIndex);
+ status_t err = mOMXNode->getExtensionIndex(
+ "OMX.google.android.index.describeHDRStaticInfo", &mDescribeHDRStaticInfoIndex);
if (err != OK) {
mDescribeHDRStaticInfoIndex = (OMX_INDEXTYPE)0;
}
@@ -3586,7 +3677,7 @@
status_t ACodec::setHDRStaticInfo(const DescribeHDRStaticInfoParams ¶ms) {
status_t err = ERROR_UNSUPPORTED;
if (mDescribeHDRStaticInfoIndex) {
- err = mOMX->setConfig(mNode, mDescribeHDRStaticInfoIndex, ¶ms, sizeof(params));
+ err = mOMXNode->setConfig(mDescribeHDRStaticInfoIndex, ¶ms, sizeof(params));
}
const HDRStaticInfo *info = ¶ms.sInfo;
@@ -3607,7 +3698,7 @@
status_t ACodec::getHDRStaticInfo(DescribeHDRStaticInfoParams ¶ms) {
status_t err = ERROR_UNSUPPORTED;
if (mDescribeHDRStaticInfoIndex) {
- err = mOMX->getConfig(mNode, mDescribeHDRStaticInfoIndex, ¶ms, sizeof(params));
+ err = mOMXNode->getConfig(mDescribeHDRStaticInfoIndex, ¶ms, sizeof(params));
}
ALOGW_IF(err == ERROR_UNSUPPORTED && mDescribeHDRStaticInfoIndex,
@@ -3646,8 +3737,8 @@
def.nPortIndex = kPortIndexInput;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
@@ -3679,17 +3770,18 @@
def.nBufferSize = (video_def->nStride * video_def->nSliceHeight * 3) / 2;
- float frameRate;
- if (!msg->findFloat("frame-rate", &frameRate)) {
+ float framerate;
+ if (!msg->findFloat("frame-rate", &framerate)) {
int32_t tmp;
if (!msg->findInt32("frame-rate", &tmp)) {
return INVALID_OPERATION;
}
- frameRate = (float)tmp;
- mTimePerFrameUs = (int64_t) (1000000.0f / frameRate);
+ mFps = (double)tmp;
+ } else {
+ mFps = (double)framerate;
}
- video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f);
+ video_def->xFramerate = (OMX_U32)(mFps * 65536);
video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
// this is redundant as it was already set up in setVideoPortFormatType
// FIXME for now skip this only for flexible YUV formats
@@ -3697,8 +3789,8 @@
video_def->eColorFormat = colorFormat;
}
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
ALOGE("[%s] failed to set input port definition parameters.",
@@ -3728,8 +3820,8 @@
def.nPortIndex = kPortIndexOutput;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
@@ -3742,8 +3834,8 @@
video_def->eCompressionFormat = compressionFormat;
video_def->eColorFormat = OMX_COLOR_FormatUnused;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
ALOGE("[%s] failed to set output port definition parameters.",
@@ -3763,6 +3855,8 @@
}
}
+ configureEncoderLatency(msg);
+
switch (compressionFormat) {
case OMX_VIDEO_CodingMPEG4:
err = setupMPEG4EncoderParameters(msg);
@@ -3872,9 +3966,8 @@
params.nAirRef = ref;
}
- status_t err = mOMX->setParameter(
- mNode, OMX_IndexParamVideoIntraRefresh,
- ¶ms, sizeof(params));
+ status_t err = mOMXNode->setParameter(
+ OMX_IndexParamVideoIntraRefresh, ¶ms, sizeof(params));
return err;
}
@@ -3937,8 +4030,8 @@
InitOMXParams(&mpeg4type);
mpeg4type.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
if (err != OK) {
return err;
@@ -3980,8 +4073,8 @@
mpeg4type.eLevel = static_cast<OMX_VIDEO_MPEG4LEVELTYPE>(level);
}
- err = mOMX->setParameter(
- mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
if (err != OK) {
return err;
@@ -4019,8 +4112,8 @@
InitOMXParams(&h263type);
h263type.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
if (err != OK) {
return err;
@@ -4057,8 +4150,8 @@
h263type.nPictureHeaderRepetition = 0;
h263type.nGOBHeaderInterval = 0;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
if (err != OK) {
return err;
@@ -4159,8 +4252,8 @@
InitOMXParams(&h264type);
h264type.nPortIndex = kPortIndexOutput;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
if (err != OK) {
return err;
@@ -4223,7 +4316,7 @@
h264type.nSliceHeaderSpacing = 0;
h264type.bUseHadamard = OMX_TRUE;
h264type.nRefFrames = 2;
- h264type.nBFrames = 1;
+ h264type.nBFrames = mLatency == 0 ? 1 : std::min(1U, mLatency - 1);
h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
h264type.nAllowedPictureTypes =
OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB;
@@ -4249,8 +4342,8 @@
h264type.bMBAFF = OMX_FALSE;
h264type.eLoopFilterMode = OMX_VIDEO_AVCLoopFilterEnable;
- err = mOMX->setParameter(
- mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
if (err != OK) {
return err;
@@ -4266,8 +4359,8 @@
OMX_VIDEO_PARAM_ANDROID_TEMPORALLAYERINGTYPE layering;
InitOMXParams(&layering);
layering.nPortIndex = kPortIndexOutput;
- if (mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+ if (mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
&layering, sizeof(layering)) == OK
&& layering.eSupportedPatterns
&& layering.nBLayerCountMax == 0) {
@@ -4275,8 +4368,8 @@
h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
h264type.nAllowedPictureTypes &= ~OMX_VIDEO_PictureTypeB;
ALOGI("disabling B-frames");
- err = mOMX->setParameter(
- mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
if (err != OK) {
return err;
@@ -4311,8 +4404,8 @@
hevcType.nPortIndex = kPortIndexOutput;
status_t err = OK;
- err = mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
if (err != OK) {
return err;
}
@@ -4335,8 +4428,8 @@
// TODO: finer control?
hevcType.nKeyFrameInterval = setPFramesSpacing(iFrameInterval, frameRate) + 1;
- err = mOMX->setParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
+ err = mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
if (err != OK) {
return err;
}
@@ -4404,8 +4497,8 @@
OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
InitOMXParams(&vp8type);
vp8type.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+ status_t err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
&vp8type, sizeof(vp8type));
if (err == OK) {
@@ -4425,8 +4518,8 @@
vp8type.nMaxQuantizer = 63;
}
- err = mOMX->setParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
+ err = mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
&vp8type, sizeof(vp8type));
if (err != OK) {
ALOGW("Extended VP8 parameters set failed: %d", err);
@@ -4450,11 +4543,9 @@
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
params.nProfileIndex = index;
- status_t err = mOMX->getParameter(
- mNode,
+ status_t err = mOMXNode->getParameter(
OMX_IndexParamVideoProfileLevelQuerySupported,
- ¶ms,
- sizeof(params));
+ ¶ms, sizeof(params));
if (err != OK) {
return err;
@@ -4482,9 +4573,8 @@
InitOMXParams(&bitrateType);
bitrateType.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamVideoBitrate,
- &bitrateType, sizeof(bitrateType));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamVideoBitrate, &bitrateType, sizeof(bitrateType));
if (err != OK) {
return err;
@@ -4493,9 +4583,31 @@
bitrateType.eControlRate = bitrateMode;
bitrateType.nTargetBitrate = bitrate;
- return mOMX->setParameter(
- mNode, OMX_IndexParamVideoBitrate,
- &bitrateType, sizeof(bitrateType));
+ return mOMXNode->setParameter(
+ OMX_IndexParamVideoBitrate, &bitrateType, sizeof(bitrateType));
+}
+
+void ACodec::configureEncoderLatency(const sp<AMessage> &msg) {
+ if (!mIsEncoder || !mIsVideo) {
+ return;
+ }
+
+ int32_t latency = 0, bitrateMode;
+ if (msg->findInt32("latency", &latency) && latency > 0) {
+ status_t err = setLatency(latency);
+ if (err != OK) {
+ ALOGW("[%s] failed setLatency. Failure is fine since this key is optional",
+ mComponentName.c_str());
+ err = OK;
+ } else {
+ mLatency = latency;
+ }
+ } else if ((!msg->findInt32("bitrate-mode", &bitrateMode) &&
+ bitrateMode == OMX_Video_ControlRateConstant)) {
+ // default the latency to be 1 if latency key is not specified or unsupported and bitrateMode
+ // is CBR.
+ mLatency = 1;
+ }
}
status_t ACodec::setupErrorCorrectionParameters() {
@@ -4503,8 +4615,8 @@
InitOMXParams(&errorCorrectionType);
errorCorrectionType.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamVideoErrorCorrection,
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamVideoErrorCorrection,
&errorCorrectionType, sizeof(errorCorrectionType));
if (err != OK) {
@@ -4517,8 +4629,8 @@
errorCorrectionType.bEnableDataPartitioning = OMX_FALSE;
errorCorrectionType.bEnableRVLC = OMX_FALSE;
- return mOMX->setParameter(
- mNode, OMX_IndexParamVideoErrorCorrection,
+ return mOMXNode->setParameter(
+ OMX_IndexParamVideoErrorCorrection,
&errorCorrectionType, sizeof(errorCorrectionType));
}
@@ -4532,8 +4644,8 @@
OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
- status_t err = mOMX->getParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ status_t err = mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
@@ -4562,21 +4674,12 @@
}
}
- err = mOMX->setParameter(
- mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ err = mOMXNode->setParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
return err;
}
-status_t ACodec::initNativeWindow() {
- if (mNativeWindow != NULL) {
- return mOMX->enableNativeBuffers(mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_TRUE);
- }
-
- mOMX->enableNativeBuffers(mNode, kPortIndexOutput, OMX_TRUE /* graphic */, OMX_FALSE);
- return OK;
-}
-
size_t ACodec::countBuffersOwnedByComponent(OMX_U32 portIndex) const {
size_t n = 0;
@@ -4661,7 +4764,7 @@
InitOMXParams(&def);
def.nPortIndex = portIndex;
- status_t err = mOMX->getParameter(mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+ status_t err = mOMXNode->getParameter(OMX_IndexParamPortDefinition, &def, sizeof(def));
if (err != OK) {
return err;
}
@@ -4695,7 +4798,7 @@
describeParams.nSliceHeight = videoDef->nSliceHeight;
describeParams.bUsingNativeBuffers = OMX_FALSE;
- if (DescribeColorFormat(mOMX, mNode, describeParams)) {
+ if (DescribeColorFormat(mOMXNode, describeParams)) {
notify->setBuffer(
"image-data",
ABuffer::CreateAsCopy(
@@ -4720,8 +4823,7 @@
InitOMXParams(&rect);
rect.nPortIndex = portIndex;
- if (mOMX->getConfig(
- mNode,
+ if (mOMXNode->getConfig(
(portIndex == kPortIndexOutput ?
OMX_IndexConfigCommonOutputCrop :
OMX_IndexConfigCommonInputCrop),
@@ -4766,6 +4868,10 @@
if (mConfigFormat->contains("hdr-static-info")) {
(void)getHDRStaticInfoForVideoCodec(kPortIndexInput, notify);
}
+ uint32_t latency = 0;
+ if (mIsEncoder && getLatency(&latency) == OK && latency > 0) {
+ notify->setInt32("latency", latency);
+ }
}
break;
@@ -4777,8 +4883,7 @@
OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
InitOMXParams(&vp8type);
vp8type.nPortIndex = kPortIndexOutput;
- status_t err = mOMX->getParameter(
- mNode,
+ status_t err = mOMXNode->getParameter(
(OMX_INDEXTYPE)OMX_IndexParamVideoAndroidVp8Encoder,
&vp8type,
sizeof(vp8type));
@@ -4847,8 +4952,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioPcm, ¶ms, sizeof(params));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioPcm, ¶ms, sizeof(params));
if (err != OK) {
return err;
}
@@ -4895,8 +5000,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioAac, ¶ms, sizeof(params));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioAac, ¶ms, sizeof(params));
if (err != OK) {
return err;
}
@@ -4913,8 +5018,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioAmr, ¶ms, sizeof(params));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioAmr, ¶ms, sizeof(params));
if (err != OK) {
return err;
}
@@ -4936,8 +5041,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioFlac, ¶ms, sizeof(params));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioFlac, ¶ms, sizeof(params));
if (err != OK) {
return err;
}
@@ -4954,8 +5059,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioMp3, ¶ms, sizeof(params));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioMp3, ¶ms, sizeof(params));
if (err != OK) {
return err;
}
@@ -4972,8 +5077,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioVorbis, ¶ms, sizeof(params));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioVorbis, ¶ms, sizeof(params));
if (err != OK) {
return err;
}
@@ -4990,8 +5095,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
¶ms, sizeof(params));
if (err != OK) {
return err;
@@ -5009,8 +5114,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
¶ms, sizeof(params));
if (err != OK) {
return err;
@@ -5028,8 +5133,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus,
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus,
¶ms, sizeof(params));
if (err != OK) {
return err;
@@ -5047,8 +5152,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioPcm, ¶ms, sizeof(params));
+ err = mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioPcm, ¶ms, sizeof(params));
if (err != OK) {
return err;
}
@@ -5074,8 +5179,8 @@
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
- err = mOMX->getParameter(
- mNode, OMX_IndexParamAudioPcm, ¶ms, sizeof(params));
+ err = mOMXNode->getParameter(
+ OMX_IndexParamAudioPcm, ¶ms, sizeof(params));
if (err != OK) {
return err;
}
@@ -5099,7 +5204,7 @@
return BAD_TYPE;
}
- return OK;
+ return getVendorParameters(portIndex, notify);
}
void ACodec::onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects) {
@@ -5177,25 +5282,6 @@
}
}
-void ACodec::addKeyFormatChangesToRenderBufferNotification(sp<AMessage> ¬ify) {
- AString mime;
- CHECK(mOutputFormat->findString("mime", &mime));
-
- if (mime == MEDIA_MIMETYPE_VIDEO_RAW && mNativeWindow != NULL) {
- // notify renderer of the crop change and dataspace change
- // NOTE: native window uses extended right-bottom coordinate
- int32_t left, top, right, bottom;
- if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
- notify->setRect("crop", left, top, right + 1, bottom + 1);
- }
-
- int32_t dataSpace;
- if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
- notify->setInt32("dataspace", dataSpace);
- }
- }
-}
-
void ACodec::sendFormatChange() {
AString mime;
CHECK(mOutputFormat->findString("mime", &mime));
@@ -5218,18 +5304,11 @@
mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay, mEncoderPadding, channelCount);
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatOutputFormatChanged);
- notify->setMessage("format", mOutputFormat);
- notify->post();
-
// mLastOutputFormat is not used when tunneled; doing this just to stay consistent
mLastOutputFormat = mOutputFormat;
}
void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatError);
ALOGE("signalError(omxError %#x, internalError %d)", error, internalError);
if (internalError == UNKNOWN_ERROR) { // find better error code
@@ -5242,15 +5321,7 @@
}
mFatalError = true;
-
- notify->setInt32("err", internalError);
- notify->setInt32("actionCode", ACTION_CODE_FATAL); // could translate from OMX error.
- notify->post();
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-ACodec::PortDescription::PortDescription() {
+ mCallback->onError(internalError, ACTION_CODE_FATAL);
}
status_t ACodec::requestIDRFrame() {
@@ -5264,42 +5335,12 @@
params.nPortIndex = kPortIndexOutput;
params.IntraRefreshVOP = OMX_TRUE;
- return mOMX->setConfig(
- mNode,
+ return mOMXNode->setConfig(
OMX_IndexConfigVideoIntraVOPRefresh,
¶ms,
sizeof(params));
}
-void ACodec::PortDescription::addBuffer(
- IOMX::buffer_id id, const sp<ABuffer> &buffer,
- const sp<NativeHandle> &handle, const sp<RefBase> &memRef) {
- mBufferIDs.push_back(id);
- mBuffers.push_back(buffer);
- mHandles.push_back(handle);
- mMemRefs.push_back(memRef);
-}
-
-size_t ACodec::PortDescription::countBuffers() {
- return mBufferIDs.size();
-}
-
-IOMX::buffer_id ACodec::PortDescription::bufferIDAt(size_t index) const {
- return mBufferIDs.itemAt(index);
-}
-
-sp<ABuffer> ACodec::PortDescription::bufferAt(size_t index) const {
- return mBuffers.itemAt(index);
-}
-
-sp<NativeHandle> ACodec::PortDescription::handleAt(size_t index) const {
- return mHandles.itemAt(index);
-}
-
-sp<RefBase> ACodec::PortDescription::memRefAt(size_t index) const {
- return mMemRefs.itemAt(index);
-}
-
////////////////////////////////////////////////////////////////////////////////
ACodec::BaseState::BaseState(ACodec *codec, const sp<AState> &parentState)
@@ -5312,6 +5353,10 @@
return KEEP_BUFFERS;
}
+void ACodec::BaseState::stateExited() {
+ ++mCodec->mStateGeneration;
+}
+
bool ACodec::BaseState::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatInputBufferFilled:
@@ -5372,6 +5417,7 @@
{
// This will result in kFlagSawMediaServerDie handling in MediaCodec.
ALOGE("OMX/mediaserver died, signalling error!");
+ mCodec->mGraphicBufferSource.clear();
mCodec->signalError(OMX_ErrorResourcesLost, DEAD_OBJECT);
break;
}
@@ -5380,12 +5426,18 @@
{
ALOGI("[%s] forcing the release of codec",
mCodec->mComponentName.c_str());
- status_t err = mCodec->mOMX->freeNode(mCodec->mNode);
+ status_t err = mCodec->mOMXNode->freeNode();
ALOGE_IF("[%s] failed to release codec instance: err=%d",
mCodec->mComponentName.c_str(), err);
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
- notify->post();
+ mCodec->mCallback->onReleaseCompleted();
+
+ mCodec->changeState(mCodec->mUninitializedState);
+ break;
+ }
+
+ case ACodec::kWhatForceStateTransition:
+ {
+ ALOGV("Already transitioned --- ignore");
break;
}
@@ -5399,16 +5451,17 @@
bool ACodec::BaseState::checkOMXMessage(const sp<AMessage> &msg) {
// there is a possibility that this is an outstanding message for a
// codec that we have already destroyed
- if (mCodec->mNode == 0) {
+ if (mCodec->mOMXNode == NULL) {
ALOGI("ignoring message as already freed component: %s",
msg->debugString().c_str());
return false;
}
- IOMX::node_id nodeID;
- CHECK(msg->findInt32("node", (int32_t*)&nodeID));
- if (nodeID != mCodec->mNode) {
- ALOGE("Unexpected message for nodeID: %u, should have been %u", nodeID, mCodec->mNode);
+ int32_t generation;
+ CHECK(msg->findInt32("generation", (int32_t*)&generation));
+ if (generation != mCodec->mNodeGeneration) {
+ ALOGW("Unexpected message for component: %s, gen %u, cur %u",
+ msg->debugString().c_str(), generation, mCodec->mNodeGeneration);
return false;
}
return true;
@@ -5526,11 +5579,7 @@
bool ACodec::BaseState::onOMXEvent(
OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
if (event == OMX_EventDataSpaceChanged) {
- ColorAspects aspects;
- aspects.mRange = (ColorAspects::Range)((data2 >> 24) & 0xFF);
- aspects.mPrimaries = (ColorAspects::Primaries)((data2 >> 16) & 0xFF);
- aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((data2 >> 8) & 0xFF);
- aspects.mTransfer = (ColorAspects::Transfer)(data2 & 0xFF);
+ ColorAspects aspects = ColorUtils::unpackToColorAspects(data2);
mCodec->onDataSpaceChanged((android_dataspace)data1, aspects);
return true;
@@ -5612,46 +5661,28 @@
CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
- notify->setInt32("buffer-id", info->mBufferID);
-
- info->mData->meta()->clear();
- notify->setBuffer("buffer", info->mData);
-
- sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec);
- reply->setInt32("buffer-id", info->mBufferID);
-
- notify->setMessage("reply", reply);
-
- notify->post();
-
+ info->mData->setFormat(mCodec->mInputFormat);
+ mCodec->mBufferChannel->fillThisBuffer(info->mBufferID);
+ info->mData.clear();
info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
}
void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) {
IOMX::buffer_id bufferID;
CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
- sp<ABuffer> buffer;
+ sp<MediaCodecBuffer> buffer;
int32_t err = OK;
bool eos = false;
PortMode mode = getPortMode(kPortIndexInput);
-
- if (!msg->findBuffer("buffer", &buffer)) {
- /* these are unfilled buffers returned by client */
- CHECK(msg->findInt32("err", &err));
-
- if (err == OK) {
- /* buffers with no errors are returned on MediaCodec.flush */
- mode = KEEP_BUFFERS;
- } else {
- ALOGV("[%s] saw error %d instead of an input buffer",
- mCodec->mComponentName.c_str(), err);
- eos = true;
- }
-
- buffer.clear();
+ int32_t discarded = 0;
+ if (msg->findInt32("discarded", &discarded) && discarded) {
+ // these are unfilled buffers returned by client
+ // buffers are returned on MediaCodec.flush
+ mode = KEEP_BUFFERS;
}
+ sp<RefBase> obj;
+ CHECK(msg->findObject("buffer", &obj));
+ buffer = static_cast<MediaCodecBuffer *>(obj.get());
int32_t tmp;
if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) {
@@ -5669,6 +5700,7 @@
}
info->mStatus = BufferInfo::OWNED_BY_US;
+ info->mData = buffer;
switch (mode) {
case KEEP_BUFFERS:
@@ -5696,7 +5728,6 @@
OMX_U32 flags = OMX_BUFFERFLAG_ENDOFFRAME;
- MetadataBufferType metaType = mCodec->mInputMetadataType;
int32_t isCSD = 0;
if (buffer->meta()->findInt32("csd", &isCSD) && isCSD != 0) {
if (mCodec->mIsLegacyVP9Decoder) {
@@ -5706,18 +5737,19 @@
break;
}
flags |= OMX_BUFFERFLAG_CODECCONFIG;
- metaType = kMetadataBufferTypeInvalid;
}
if (eos) {
flags |= OMX_BUFFERFLAG_EOS;
}
- if (buffer != info->mCodecData) {
+ size_t size = buffer->size();
+ size_t offset = buffer->offset();
+ if (buffer->base() != info->mCodecData->base()) {
ALOGV("[%s] Needs to copy input data for buffer %u. (%p != %p)",
mCodec->mComponentName.c_str(),
bufferID,
- buffer.get(), info->mCodecData.get());
+ buffer->base(), info->mCodecData->base());
sp<DataConverter> converter = mCodec->mConverter[kPortIndexInput];
if (converter == NULL || isCSD) {
@@ -5728,6 +5760,9 @@
mCodec->signalError(OMX_ErrorUndefined, err);
return;
}
+ size = info->mCodecData->size();
+ } else {
+ info->mCodecData->setRange(offset, size);
}
if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
@@ -5768,54 +5803,52 @@
info->checkReadFence("onInputBufferFilled");
status_t err2 = OK;
- switch (metaType) {
- case kMetadataBufferTypeInvalid:
+ switch (mCodec->mPortMode[kPortIndexInput]) {
+ case IOMX::kPortModePresetByteBuffer:
+ case IOMX::kPortModePresetANWBuffer:
+ case IOMX::kPortModePresetSecureBuffer:
+ {
+ err2 = mCodec->mOMXNode->emptyBuffer(
+ bufferID, info->mCodecData, flags, timeUs, info->mFenceFd);
+ }
break;
#ifndef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
- case kMetadataBufferTypeNativeHandleSource:
+ case IOMX::kPortModeDynamicNativeHandle:
if (info->mCodecData->size() >= sizeof(VideoNativeHandleMetadata)) {
VideoNativeHandleMetadata *vnhmd =
(VideoNativeHandleMetadata*)info->mCodecData->base();
- err2 = mCodec->mOMX->updateNativeHandleInMeta(
- mCodec->mNode, kPortIndexInput,
- NativeHandle::create(vnhmd->pHandle, false /* ownsHandle */),
- bufferID);
+ sp<NativeHandle> handle = NativeHandle::create(
+ vnhmd->pHandle, false /* ownsHandle */);
+ err2 = mCodec->mOMXNode->emptyBuffer(
+ bufferID, handle, flags, timeUs, info->mFenceFd);
}
break;
- case kMetadataBufferTypeANWBuffer:
+ case IOMX::kPortModeDynamicANWBuffer:
if (info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
VideoNativeMetadata *vnmd = (VideoNativeMetadata*)info->mCodecData->base();
- err2 = mCodec->mOMX->updateGraphicBufferInMeta(
- mCodec->mNode, kPortIndexInput,
- new GraphicBuffer(vnmd->pBuffer, false /* keepOwnership */),
- bufferID);
+ sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(vnmd->pBuffer);
+ err2 = mCodec->mOMXNode->emptyBuffer(
+ bufferID, graphicBuffer, flags, timeUs, info->mFenceFd);
}
break;
#endif
default:
ALOGW("Can't marshall %s data in %zu sized buffers in %zu-bit mode",
- asString(metaType), info->mCodecData->size(),
+ asString(mCodec->mPortMode[kPortIndexInput]),
+ info->mCodecData->size(),
sizeof(buffer_handle_t) * 8);
err2 = ERROR_UNSUPPORTED;
break;
}
- if (err2 == OK) {
- err2 = mCodec->mOMX->emptyBuffer(
- mCodec->mNode,
- bufferID,
- 0,
- info->mCodecData->size(),
- flags,
- timeUs,
- info->mFenceFd);
- }
info->mFenceFd = -1;
if (err2 != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err2));
return;
}
info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+ // Hold the reference while component is using the buffer.
+ info->mData = buffer;
if (!eos && err == OK) {
getMoreInputDataIfPossible();
@@ -5839,14 +5872,8 @@
mCodec->mComponentName.c_str(), bufferID);
info->checkReadFence("onInputBufferFilled");
- status_t err2 = mCodec->mOMX->emptyBuffer(
- mCodec->mNode,
- bufferID,
- 0,
- 0,
- OMX_BUFFERFLAG_EOS,
- 0,
- info->mFenceFd);
+ status_t err2 = mCodec->mOMXNode->emptyBuffer(
+ bufferID, OMXBuffer::sPreset, OMX_BUFFERFLAG_EOS, 0, info->mFenceFd);
info->mFenceFd = -1;
if (err2 != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err2));
@@ -5970,56 +5997,46 @@
ALOGV("[%s] calling fillBuffer %u",
mCodec->mComponentName.c_str(), info->mBufferID);
- err = mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID, info->mFenceFd);
- info->mFenceFd = -1;
+ err = mCodec->fillBuffer(info);
if (err != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
return true;
}
-
- info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
break;
}
- sp<AMessage> reply =
- new AMessage(kWhatOutputBufferDrained, mCodec);
+ sp<MediaCodecBuffer> buffer = info->mData;
if (mCodec->mOutputFormat != mCodec->mLastOutputFormat && rangeLength > 0) {
// pretend that output format has changed on the first frame (we used to do this)
if (mCodec->mBaseOutputFormat == mCodec->mOutputFormat) {
mCodec->onOutputFormatChanged(mCodec->mOutputFormat);
}
- mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
mCodec->sendFormatChange();
- } else if (rangeLength > 0 && mCodec->mNativeWindow != NULL) {
- // If potentially rendering onto a surface, always save key format data (crop &
- // data space) so that we can set it if and once the buffer is rendered.
- mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
}
+ buffer->setFormat(mCodec->mOutputFormat);
- if (mCodec->usingMetadataOnEncoderOutput()) {
+ if (mCodec->usingSecureBufferOnEncoderOutput()) {
native_handle_t *handle = NULL;
- VideoNativeHandleMetadata &nativeMeta =
- *(VideoNativeHandleMetadata *)info->mData->data();
- if (info->mData->size() >= sizeof(nativeMeta)
- && nativeMeta.eType == kMetadataBufferTypeNativeHandleSource) {
+ sp<SecureBuffer> secureBuffer = static_cast<SecureBuffer *>(buffer.get());
+ if (secureBuffer != NULL) {
#ifdef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
// handle is only valid on 32-bit/mediaserver process
handle = NULL;
#else
- handle = (native_handle_t *)nativeMeta.pHandle;
+ handle = (native_handle_t *)secureBuffer->getDestinationPointer();
#endif
}
- info->mData->meta()->setPointer("handle", handle);
- info->mData->meta()->setInt32("rangeOffset", rangeOffset);
- info->mData->meta()->setInt32("rangeLength", rangeLength);
- } else if (info->mData == info->mCodecData) {
- info->mData->setRange(rangeOffset, rangeLength);
+ buffer->meta()->setPointer("handle", handle);
+ buffer->meta()->setInt32("rangeOffset", rangeOffset);
+ buffer->meta()->setInt32("rangeLength", rangeLength);
+ } else if (buffer->base() == info->mCodecData->base()) {
+ buffer->setRange(rangeOffset, rangeLength);
} else {
info->mCodecData->setRange(rangeOffset, rangeLength);
// in this case we know that mConverter is not null
status_t err = mCodec->mConverter[kPortIndexOutput]->convert(
- info->mCodecData, info->mData);
+ info->mCodecData, buffer);
if (err != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
return true;
@@ -6034,32 +6051,20 @@
#endif
if (mCodec->mSkipCutBuffer != NULL) {
- mCodec->mSkipCutBuffer->submit(info->mData);
+ mCodec->mSkipCutBuffer->submit(buffer);
}
- info->mData->meta()->setInt64("timeUs", timeUs);
+ buffer->meta()->setInt64("timeUs", timeUs);
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
- notify->setInt32("buffer-id", info->mBufferID);
- notify->setBuffer("buffer", info->mData);
- notify->setInt32("flags", flags);
+ info->mData.clear();
- reply->setInt32("buffer-id", info->mBufferID);
-
- notify->setMessage("reply", reply);
-
- notify->post();
+ mCodec->mBufferChannel->drainThisBuffer(info->mBufferID, flags);
info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM;
if (flags & OMX_BUFFERFLAG_EOS) {
ALOGV("[%s] saw output EOS", mCodec->mComponentName.c_str());
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatEOS);
- notify->setInt32("err", mCodec->mInputEOSResult);
- notify->post();
-
+ mCodec->mCallback->onEos(mCodec->mInputEOSResult);
mCodec->mPortEOS[kPortIndexOutput] = true;
}
break;
@@ -6084,6 +6089,12 @@
void ACodec::BaseState::onOutputBufferDrained(const sp<AMessage> &msg) {
IOMX::buffer_id bufferID;
CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
+ sp<RefBase> obj;
+ CHECK(msg->findObject("buffer", &obj));
+ sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
+ int32_t discarded = 0;
+ msg->findInt32("discarded", &discarded);
+
ssize_t index;
BufferInfo *info = mCodec->findBufferByID(kPortIndexOutput, bufferID, &index);
BufferInfo::Status status = BufferInfo::getSafeStatus(info);
@@ -6093,34 +6104,38 @@
mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
return;
}
-
- android_native_rect_t crop;
- if (msg->findRect("crop", &crop.left, &crop.top, &crop.right, &crop.bottom)
- && memcmp(&crop, &mCodec->mLastNativeWindowCrop, sizeof(crop)) != 0) {
- mCodec->mLastNativeWindowCrop = crop;
- status_t err = native_window_set_crop(mCodec->mNativeWindow.get(), &crop);
- ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
- }
-
- int32_t dataSpace;
- if (msg->findInt32("dataspace", &dataSpace)
- && dataSpace != mCodec->mLastNativeWindowDataSpace) {
- status_t err = native_window_set_buffers_data_space(
- mCodec->mNativeWindow.get(), (android_dataspace)dataSpace);
- mCodec->mLastNativeWindowDataSpace = dataSpace;
- ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err);
- }
-
+ info->mData = buffer;
int32_t render;
if (mCodec->mNativeWindow != NULL
&& msg->findInt32("render", &render) && render != 0
- && info->mData != NULL && info->mData->size() != 0) {
+ && !discarded && buffer->size() != 0) {
ATRACE_NAME("render");
// The client wants this buffer to be rendered.
+ android_native_rect_t crop;
+ if (buffer->format()->findRect("crop", &crop.left, &crop.top, &crop.right, &crop.bottom)) {
+ // NOTE: native window uses extended right-bottom coordinate
+ ++crop.right;
+ ++crop.bottom;
+ if (memcmp(&crop, &mCodec->mLastNativeWindowCrop, sizeof(crop)) != 0) {
+ mCodec->mLastNativeWindowCrop = crop;
+ status_t err = native_window_set_crop(mCodec->mNativeWindow.get(), &crop);
+ ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
+ }
+ }
+
+ int32_t dataSpace;
+ if (buffer->format()->findInt32("android._dataspace", &dataSpace)
+ && dataSpace != mCodec->mLastNativeWindowDataSpace) {
+ status_t err = native_window_set_buffers_data_space(
+ mCodec->mNativeWindow.get(), (android_dataspace)dataSpace);
+ mCodec->mLastNativeWindowDataSpace = dataSpace;
+ ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err);
+ }
+
// save buffers sent to the surface so we can get render time when they return
int64_t mediaTimeUs = -1;
- info->mData->meta()->findInt64("timeUs", &mediaTimeUs);
+ buffer->meta()->findInt64("timeUs", &mediaTimeUs);
if (mediaTimeUs >= 0) {
mCodec->mRenderTracker.onFrameQueued(
mediaTimeUs, info->mGraphicBuffer, new Fence(::dup(info->mFenceFd)));
@@ -6129,7 +6144,7 @@
int64_t timestampNs = 0;
if (!msg->findInt64("timestampNs", ×tampNs)) {
// use media timestamp if client did not request a specific render timestamp
- if (info->mData->meta()->findInt64("timeUs", ×tampNs)) {
+ if (buffer->meta()->findInt64("timeUs", ×tampNs)) {
ALOGV("using buffer PTS of %lld", (long long)timestampNs);
timestampNs *= 1000;
}
@@ -6153,8 +6168,7 @@
info->mIsReadFence = false;
}
} else {
- if (mCodec->mNativeWindow != NULL &&
- (info->mData == NULL || info->mData->size() != 0)) {
+ if (mCodec->mNativeWindow != NULL && (discarded || buffer->size() != 0)) {
// move read fence into write fence to avoid clobbering
info->mIsReadFence = false;
ATRACE_NAME("frame-drop");
@@ -6192,12 +6206,8 @@
ALOGV("[%s] calling fillBuffer %u",
mCodec->mComponentName.c_str(), info->mBufferID);
info->checkWriteFence("onOutputBufferDrained::RESUBMIT_BUFFERS");
- status_t err = mCodec->mOMX->fillBuffer(
- mCodec->mNode, info->mBufferID, info->mFenceFd);
- info->mFenceFd = -1;
- if (err == OK) {
- info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
- } else {
+ status_t err = mCodec->fillBuffer(info);
+ if (err != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
}
}
@@ -6230,19 +6240,26 @@
ALOGV("Now uninitialized");
if (mDeathNotifier != NULL) {
- mCodec->mNodeBinder->unlinkToDeath(mDeathNotifier);
+ if (mCodec->mOMXNode != NULL) {
+ if (mCodec->getTrebleFlag()) {
+ auto tOmxNode = mCodec->mOMXNode->getHalInterface();
+ tOmxNode->unlinkToDeath(mDeathNotifier);
+ } else {
+ sp<IBinder> binder = IInterface::asBinder(mCodec->mOMXNode);
+ binder->unlinkToDeath(mDeathNotifier);
+ }
+ }
mDeathNotifier.clear();
}
mCodec->mUsingNativeWindow = false;
mCodec->mNativeWindow.clear();
mCodec->mNativeWindowUsageBits = 0;
- mCodec->mNode = 0;
mCodec->mOMX.clear();
- mCodec->mQuirks = 0;
+ mCodec->mOMXNode.clear();
mCodec->mFlags = 0;
- mCodec->mInputMetadataType = kMetadataBufferTypeInvalid;
- mCodec->mOutputMetadataType = kMetadataBufferTypeInvalid;
+ mCodec->mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
+ mCodec->mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
mCodec->mConverter[0].clear();
mCodec->mConverter[1].clear();
mCodec->mComponentName.clear();
@@ -6274,21 +6291,18 @@
"keepComponentAllocated", &keepComponentAllocated));
ALOGW_IF(keepComponentAllocated,
"cannot keep component allocated on shutdown in Uninitialized state");
-
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
- notify->post();
-
+ if (keepComponentAllocated) {
+ mCodec->mCallback->onStopCompleted();
+ } else {
+ mCodec->mCallback->onReleaseCompleted();
+ }
handled = true;
break;
}
case ACodec::kWhatFlush:
{
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
-
+ mCodec->mCallback->onFlushCompleted();
handled = true;
break;
}
@@ -6318,13 +6332,15 @@
bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
ALOGV("onAllocateComponent");
- CHECK(mCodec->mNode == 0);
+ CHECK(mCodec->mOMXNode == NULL);
OMXClient client;
- if (client.connect() != OK) {
+ bool trebleFlag;
+ if (client.connect(&trebleFlag) != OK) {
mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
return false;
}
+ mCodec->setTrebleFlag(trebleFlag);
sp<IOMX> omx = client.interface();
@@ -6335,7 +6351,6 @@
AString mime;
AString componentName;
- uint32_t quirks = 0;
int32_t encoder = false;
if (msg->findString("componentName", &componentName)) {
sp<IMediaCodecList> list = MediaCodecList::getInstance();
@@ -6357,18 +6372,17 @@
}
sp<CodecObserver> observer = new CodecObserver;
- IOMX::node_id node = 0;
+ sp<IOMXNode> omxNode;
status_t err = NAME_NOT_FOUND;
for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
++matchIndex) {
componentName = matchingCodecs[matchIndex];
- quirks = MediaCodecList::getQuirksFor(componentName.c_str());
pid_t tid = gettid();
int prevPriority = androidGetThreadPriority(tid);
androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
- err = omx->allocateNode(componentName.c_str(), observer, &mCodec->mNodeBinder, &node);
+ err = omx->allocateNode(componentName.c_str(), observer, &omxNode);
androidSetThreadPriority(tid, prevPriority);
if (err == OK) {
@@ -6377,10 +6391,10 @@
ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
}
- node = 0;
+ omxNode = NULL;
}
- if (node == 0) {
+ if (omxNode == NULL) {
if (!mime.empty()) {
ALOGE("Unable to instantiate a %scoder for type '%s' with err %#x.",
encoder ? "en" : "de", mime.c_str(), err);
@@ -6393,14 +6407,21 @@
}
mDeathNotifier = new DeathNotifier(notify);
- if (mCodec->mNodeBinder == NULL ||
- mCodec->mNodeBinder->linkToDeath(mDeathNotifier) != OK) {
- // This was a local binder, if it dies so do we, we won't care
- // about any notifications in the afterlife.
- mDeathNotifier.clear();
+ if (mCodec->getTrebleFlag()) {
+ auto tOmxNode = omxNode->getHalInterface();
+ if (!tOmxNode->linkToDeath(mDeathNotifier, 0)) {
+ mDeathNotifier.clear();
+ }
+ } else {
+ if (IInterface::asBinder(omxNode)->linkToDeath(mDeathNotifier) != OK) {
+ // This was a local binder, if it dies so do we, we won't care
+ // about any notifications in the afterlife.
+ mDeathNotifier.clear();
+ }
}
notify = new AMessage(kWhatOMXMessageList, mCodec);
+ notify->setInt32("generation", ++mCodec->mNodeGeneration);
observer->setNotificationMessage(notify);
mCodec->mComponentName = componentName;
@@ -6413,17 +6434,9 @@
mCodec->mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
}
- mCodec->mQuirks = quirks;
mCodec->mOMX = omx;
- mCodec->mNode = node;
-
- {
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatComponentAllocated);
- notify->setString("componentName", mCodec->mComponentName.c_str());
- notify->post();
- }
-
+ mCodec->mOMXNode = omxNode;
+ mCodec->mCallback->onComponentAllocated(mCodec->mComponentName.c_str());
mCodec->changeState(mCodec->mLoadedState);
return true;
@@ -6449,6 +6462,7 @@
mCodec->mInputFormat.clear();
mCodec->mOutputFormat.clear();
mCodec->mBaseOutputFormat.clear();
+ mCodec->mGraphicBufferSource.clear();
if (mCodec->mShutdownInProgress) {
bool keepComponentAllocated = mCodec->mKeepComponentAllocated;
@@ -6465,15 +6479,17 @@
void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) {
if (!keepComponentAllocated) {
- (void)mCodec->mOMX->freeNode(mCodec->mNode);
+ (void)mCodec->mOMXNode->freeNode();
mCodec->changeState(mCodec->mUninitializedState);
}
if (mCodec->mExplicitShutdown) {
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
- notify->post();
+ if (keepComponentAllocated) {
+ mCodec->mCallback->onStopCompleted();
+ } else {
+ mCodec->mCallback->onReleaseCompleted();
+ }
mCodec->mExplicitShutdown = false;
}
}
@@ -6525,10 +6541,7 @@
case ACodec::kWhatFlush:
{
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
-
+ mCodec->mCallback->onFlushCompleted();
handled = true;
break;
}
@@ -6544,7 +6557,7 @@
const sp<AMessage> &msg) {
ALOGV("onConfigureComponent");
- CHECK(mCodec->mNode != 0);
+ CHECK(mCodec->mOMXNode != NULL);
status_t err = OK;
AString mime;
@@ -6561,109 +6574,99 @@
return false;
}
- {
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatComponentConfigured);
- notify->setMessage("input-format", mCodec->mInputFormat);
- notify->setMessage("output-format", mCodec->mOutputFormat);
- notify->post();
- }
+ mCodec->mCallback->onComponentConfigured(mCodec->mInputFormat, mCodec->mOutputFormat);
return true;
}
status_t ACodec::LoadedState::setupInputSurface() {
- status_t err = OK;
+ if (mCodec->mGraphicBufferSource == NULL) {
+ return BAD_VALUE;
+ }
+
+ android_dataspace dataSpace;
+ status_t err =
+ mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
+ if (err != OK) {
+ ALOGE("Failed to get default data space");
+ return err;
+ }
+
+ err = statusFromBinderStatus(
+ mCodec->mGraphicBufferSource->configure(mCodec->mOMXNode, dataSpace));
+ if (err != OK) {
+ ALOGE("[%s] Unable to configure for node (err %d)",
+ mCodec->mComponentName.c_str(), err);
+ return err;
+ }
if (mCodec->mRepeatFrameDelayUs > 0ll) {
- err = mCodec->mOMX->setInternalOption(
- mCodec->mNode,
- kPortIndexInput,
- IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY,
- &mCodec->mRepeatFrameDelayUs,
- sizeof(mCodec->mRepeatFrameDelayUs));
+ err = statusFromBinderStatus(
+ mCodec->mGraphicBufferSource->setRepeatPreviousFrameDelayUs(
+ mCodec->mRepeatFrameDelayUs));
if (err != OK) {
ALOGE("[%s] Unable to configure option to repeat previous "
"frames (err %d)",
- mCodec->mComponentName.c_str(),
- err);
+ mCodec->mComponentName.c_str(), err);
return err;
}
}
if (mCodec->mMaxPtsGapUs > 0ll) {
- err = mCodec->mOMX->setInternalOption(
- mCodec->mNode,
- kPortIndexInput,
- IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP,
- &mCodec->mMaxPtsGapUs,
- sizeof(mCodec->mMaxPtsGapUs));
+ OMX_PARAM_U32TYPE maxPtsGapParams;
+ InitOMXParams(&maxPtsGapParams);
+ maxPtsGapParams.nPortIndex = kPortIndexInput;
+ maxPtsGapParams.nU32 = (uint32_t) mCodec->mMaxPtsGapUs;
+
+ err = mCodec->mOMXNode->setParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamMaxFrameDurationForBitrateControl,
+ &maxPtsGapParams, sizeof(maxPtsGapParams));
if (err != OK) {
ALOGE("[%s] Unable to configure max timestamp gap (err %d)",
- mCodec->mComponentName.c_str(),
- err);
+ mCodec->mComponentName.c_str(), err);
return err;
}
}
if (mCodec->mMaxFps > 0) {
- err = mCodec->mOMX->setInternalOption(
- mCodec->mNode,
- kPortIndexInput,
- IOMX::INTERNAL_OPTION_MAX_FPS,
- &mCodec->mMaxFps,
- sizeof(mCodec->mMaxFps));
+ err = statusFromBinderStatus(
+ mCodec->mGraphicBufferSource->setMaxFps(mCodec->mMaxFps));
if (err != OK) {
ALOGE("[%s] Unable to configure max fps (err %d)",
- mCodec->mComponentName.c_str(),
- err);
+ mCodec->mComponentName.c_str(), err);
return err;
}
}
- if (mCodec->mTimePerCaptureUs > 0ll
- && mCodec->mTimePerFrameUs > 0ll) {
- int64_t timeLapse[2];
- timeLapse[0] = mCodec->mTimePerFrameUs;
- timeLapse[1] = mCodec->mTimePerCaptureUs;
- err = mCodec->mOMX->setInternalOption(
- mCodec->mNode,
- kPortIndexInput,
- IOMX::INTERNAL_OPTION_TIME_LAPSE,
- &timeLapse[0],
- sizeof(timeLapse));
+ if (mCodec->mCaptureFps > 0. && mCodec->mFps > 0.) {
+ err = statusFromBinderStatus(
+ mCodec->mGraphicBufferSource->setTimeLapseConfig(
+ mCodec->mFps, mCodec->mCaptureFps));
if (err != OK) {
ALOGE("[%s] Unable to configure time lapse (err %d)",
- mCodec->mComponentName.c_str(),
- err);
+ mCodec->mComponentName.c_str(), err);
return err;
}
}
if (mCodec->mCreateInputBuffersSuspended) {
- bool suspend = true;
- err = mCodec->mOMX->setInternalOption(
- mCodec->mNode,
- kPortIndexInput,
- IOMX::INTERNAL_OPTION_SUSPEND,
- &suspend,
- sizeof(suspend));
+ err = statusFromBinderStatus(
+ mCodec->mGraphicBufferSource->setSuspend(true, -1));
if (err != OK) {
ALOGE("[%s] Unable to configure option to suspend (err %d)",
- mCodec->mComponentName.c_str(),
- err);
+ mCodec->mComponentName.c_str(), err);
return err;
}
}
uint32_t usageBits;
- if (mCodec->mOMX->getParameter(
- mCodec->mNode, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+ if (mCodec->mOMXNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
&usageBits, sizeof(usageBits)) == OK) {
mCodec->mInputFormat->setInt32(
"using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
@@ -6671,9 +6674,14 @@
sp<ABuffer> colorAspectsBuffer;
if (mCodec->mInputFormat->findBuffer("android._color-aspects", &colorAspectsBuffer)) {
- err = mCodec->mOMX->setInternalOption(
- mCodec->mNode, kPortIndexInput, IOMX::INTERNAL_OPTION_COLOR_ASPECTS,
- colorAspectsBuffer->base(), colorAspectsBuffer->capacity());
+ if (colorAspectsBuffer->size() != sizeof(ColorAspects)) {
+ return INVALID_OPERATION;
+ }
+
+ err = statusFromBinderStatus(
+ mCodec->mGraphicBufferSource->setColorAspects(ColorUtils::packToU32(
+ *(ColorAspects *)colorAspectsBuffer->base())));
+
if (err != OK) {
ALOGE("[%s] Unable to configure color aspects (err %d)",
mCodec->mComponentName.c_str(), err);
@@ -6687,33 +6695,18 @@
const sp<AMessage> & /* msg */) {
ALOGV("onCreateInputSurface");
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
-
- android_dataspace dataSpace;
- status_t err =
- mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
- notify->setMessage("input-format", mCodec->mInputFormat);
- notify->setMessage("output-format", mCodec->mOutputFormat);
-
sp<IGraphicBufferProducer> bufferProducer;
- if (err == OK) {
- mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
- err = mCodec->mOMX->createInputSurface(
- mCodec->mNode, kPortIndexInput, dataSpace, &bufferProducer,
- &mCodec->mInputMetadataType);
- // framework uses ANW buffers internally instead of gralloc handles
- if (mCodec->mInputMetadataType == kMetadataBufferTypeGrallocSource) {
- mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
- }
- }
+ status_t err = mCodec->mOMX->createInputSurface(
+ &bufferProducer, &mCodec->mGraphicBufferSource);
if (err == OK) {
err = setupInputSurface();
}
if (err == OK) {
- notify->setObject("input-surface",
+ mCodec->mCallback->onInputSurfaceCreated(
+ mCodec->mInputFormat,
+ mCodec->mOutputFormat,
new BufferProducerWrapper(bufferProducer));
} else {
// Can't use mCodec->signalError() here -- MediaCodec won't forward
@@ -6721,59 +6714,37 @@
// send a kWhatInputSurfaceCreated with an error value instead.
ALOGE("[%s] onCreateInputSurface returning error %d",
mCodec->mComponentName.c_str(), err);
- notify->setInt32("err", err);
+ mCodec->mCallback->onInputSurfaceCreationFailed(err);
}
- notify->post();
}
-void ACodec::LoadedState::onSetInputSurface(
- const sp<AMessage> &msg) {
+void ACodec::LoadedState::onSetInputSurface(const sp<AMessage> &msg) {
ALOGV("onSetInputSurface");
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatInputSurfaceAccepted);
-
sp<RefBase> obj;
CHECK(msg->findObject("input-surface", &obj));
sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
+ mCodec->mGraphicBufferSource = surface->getBufferSource();
- android_dataspace dataSpace;
- status_t err =
- mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
- notify->setMessage("input-format", mCodec->mInputFormat);
- notify->setMessage("output-format", mCodec->mOutputFormat);
+ status_t err = setupInputSurface();
if (err == OK) {
- mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
- err = mCodec->mOMX->setInputSurface(
- mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
- &mCodec->mInputMetadataType);
- // framework uses ANW buffers internally instead of gralloc handles
- if (mCodec->mInputMetadataType == kMetadataBufferTypeGrallocSource) {
- mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
- }
- }
-
- if (err == OK) {
- surface->getBufferConsumer()->setDefaultBufferDataSpace(dataSpace);
- err = setupInputSurface();
- }
-
- if (err != OK) {
+ mCodec->mCallback->onInputSurfaceAccepted(
+ mCodec->mInputFormat, mCodec->mOutputFormat);
+ } else {
// Can't use mCodec->signalError() here -- MediaCodec won't forward
// the error through because it's in the "configured" state. We
// send a kWhatInputSurfaceAccepted with an error value instead.
ALOGE("[%s] onSetInputSurface returning error %d",
mCodec->mComponentName.c_str(), err);
- notify->setInt32("err", err);
+ mCodec->mCallback->onInputSurfaceDeclined(err);
}
- notify->post();
}
void ACodec::LoadedState::onStart() {
ALOGV("onStart");
- status_t err = mCodec->mOMX->sendCommand(mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle);
+ status_t err = mCodec->mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateIdle);
if (err != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
} else {
@@ -6798,8 +6769,8 @@
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
- mCodec->mOMX->sendCommand(
- mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded);
+ mCodec->mOMXNode->sendCommand(
+ OMX_CommandStateSet, OMX_StateLoaded);
if (mCodec->allYourBuffersAreBelongToUs(kPortIndexInput)) {
mCodec->freeBuffersOnPort(kPortIndexInput);
}
@@ -6813,12 +6784,18 @@
status_t ACodec::LoadedToIdleState::allocateBuffers() {
status_t err = mCodec->allocateBuffersOnPort(kPortIndexInput);
-
if (err != OK) {
return err;
}
- return mCodec->allocateBuffersOnPort(kPortIndexOutput);
+ err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
+ if (err != OK) {
+ return err;
+ }
+
+ mCodec->mCallback->onStartCompleted();
+
+ return OK;
}
bool ACodec::LoadedToIdleState::onMessageReceived(const sp<AMessage> &msg) {
@@ -6845,9 +6822,7 @@
case kWhatFlush:
{
// We haven't even started yet, so we're flushed alright...
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
+ mCodec->mCallback->onFlushCompleted();
return true;
}
@@ -6871,8 +6846,8 @@
}
if (err == OK) {
- err = mCodec->mOMX->sendCommand(
- mCodec->mNode, OMX_CommandStateSet, OMX_StateExecuting);
+ err = mCodec->mOMXNode->sendCommand(
+ OMX_CommandStateSet, OMX_StateExecuting);
}
if (err != OK) {
@@ -6917,10 +6892,7 @@
case kWhatFlush:
{
// We haven't even started yet, so we're flushed alright...
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
-
+ mCodec->mCallback->onFlushCompleted();
return true;
}
@@ -7015,14 +6987,11 @@
ALOGV("[%s] calling fillBuffer %u", mCodec->mComponentName.c_str(), info->mBufferID);
info->checkWriteFence("submitRegularOutputBuffers");
- status_t err = mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID, info->mFenceFd);
- info->mFenceFd = -1;
+ status_t err = mCodec->fillBuffer(info);
if (err != OK) {
failed = true;
break;
}
-
- info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
}
if (failed) {
@@ -7062,7 +7031,6 @@
void ACodec::ExecutingState::stateEntered() {
ALOGV("[%s] Now Executing", mCodec->mComponentName.c_str());
-
mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
mCodec->processDeferredMessages();
}
@@ -7083,8 +7051,8 @@
mActive = false;
- status_t err = mCodec->mOMX->sendCommand(
- mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle);
+ status_t err = mCodec->mOMXNode->sendCommand(
+ OMX_CommandStateSet, OMX_StateIdle);
if (err != OK) {
if (keepComponentAllocated) {
mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
@@ -7110,7 +7078,7 @@
mActive = false;
- status_t err = mCodec->mOMX->sendCommand(mCodec->mNode, OMX_CommandFlush, OMX_ALL);
+ status_t err = mCodec->mOMXNode->sendCommand(OMX_CommandFlush, OMX_ALL);
if (err != OK) {
mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
} else {
@@ -7193,8 +7161,7 @@
configParams.nPortIndex = kPortIndexOutput;
configParams.nEncodeBitrate = videoBitrate;
- status_t err = mOMX->setConfig(
- mNode,
+ status_t err = mOMXNode->setConfig(
OMX_IndexConfigVideoBitrate,
&configParams,
sizeof(configParams));
@@ -7209,12 +7176,14 @@
int64_t timeOffsetUs;
if (params->findInt64("time-offset-us", &timeOffsetUs)) {
- status_t err = mOMX->setInternalOption(
- mNode,
- kPortIndexInput,
- IOMX::INTERNAL_OPTION_TIME_OFFSET,
- &timeOffsetUs,
- sizeof(timeOffsetUs));
+ if (mGraphicBufferSource == NULL) {
+ ALOGE("[%s] Invalid to set input buffer time offset without surface",
+ mComponentName.c_str());
+ return INVALID_OPERATION;
+ }
+
+ status_t err = statusFromBinderStatus(
+ mGraphicBufferSource->setTimeOffsetUs(timeOffsetUs));
if (err != OK) {
ALOGE("[%s] Unable to set input buffer time offset (err %d)",
@@ -7226,13 +7195,14 @@
int64_t skipFramesBeforeUs;
if (params->findInt64("skip-frames-before", &skipFramesBeforeUs)) {
- status_t err =
- mOMX->setInternalOption(
- mNode,
- kPortIndexInput,
- IOMX::INTERNAL_OPTION_START_TIME,
- &skipFramesBeforeUs,
- sizeof(skipFramesBeforeUs));
+ if (mGraphicBufferSource == NULL) {
+ ALOGE("[%s] Invalid to set start time without surface",
+ mComponentName.c_str());
+ return INVALID_OPERATION;
+ }
+
+ status_t err = statusFromBinderStatus(
+ mGraphicBufferSource->setStartTimeUs(skipFramesBeforeUs));
if (err != OK) {
ALOGE("Failed to set parameter 'skip-frames-before' (err %d)", err);
@@ -7242,15 +7212,16 @@
int32_t dropInputFrames;
if (params->findInt32("drop-input-frames", &dropInputFrames)) {
- bool suspend = dropInputFrames != 0;
+ if (mGraphicBufferSource == NULL) {
+ ALOGE("[%s] Invalid to set suspend without surface",
+ mComponentName.c_str());
+ return INVALID_OPERATION;
+ }
- status_t err =
- mOMX->setInternalOption(
- mNode,
- kPortIndexInput,
- IOMX::INTERNAL_OPTION_SUSPEND,
- &suspend,
- sizeof(suspend));
+ int64_t suspendStartTimeUs = -1;
+ (void) params->findInt64("drop-start-time-us", &suspendStartTimeUs);
+ status_t err = statusFromBinderStatus(
+ mGraphicBufferSource->setSuspend(dropInputFrames != 0, suspendStartTimeUs));
if (err != OK) {
ALOGE("Failed to set parameter 'drop-input-frames' (err %d)", err);
@@ -7258,6 +7229,22 @@
}
}
+ int64_t stopTimeUs;
+ if (params->findInt64("stop-time-us", &stopTimeUs)) {
+ if (mGraphicBufferSource == NULL) {
+ ALOGE("[%s] Invalid to set stop time without surface",
+ mComponentName.c_str());
+ return INVALID_OPERATION;
+ }
+ status_t err = statusFromBinderStatus(
+ mGraphicBufferSource->setStopTimeUs(stopTimeUs));
+
+ if (err != OK) {
+ ALOGE("Failed to set parameter 'stop-time-us' (err %d)", err);
+ return err;
+ }
+ }
+
int32_t dummy;
if (params->findInt32("request-sync", &dummy)) {
status_t err = requestIDRFrame();
@@ -7288,23 +7275,409 @@
}
}
+ int32_t latency = 0;
+ if (params->findInt32("latency", &latency) && latency > 0) {
+ status_t err = setLatency(latency);
+ if (err != OK) {
+ ALOGI("[%s] failed setLatency. Failure is fine since this key is optional",
+ mComponentName.c_str());
+ err = OK;
+ }
+ }
+
status_t err = configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
if (err != OK) {
err = OK; // ignore failure
}
- return err;
+ return setVendorParameters(params);
+}
+
+// Removes trailing tags matching |tag| from |key| (e.g. a settings name). |minLength| specifies
+// the minimum number of characters to keep in |key| (even if it has trailing tags).
+// (Used to remove trailing 'value' tags in settings names, e.g. to normalize
+// 'vendor.settingsX.value' to 'vendor.settingsX')
+static void removeTrailingTags(char *key, size_t minLength, const char *tag) {
+ size_t length = strlen(key);
+ size_t tagLength = strlen(tag);
+ while (length > minLength + tagLength
+ && !strcmp(key + length - tagLength, tag)
+ && key[length - tagLength - 1] == '.') {
+ length -= tagLength + 1;
+ key[length] = '\0';
+ }
+}
+
+/**
+ * Struct encompassing a vendor extension config structure and a potential error status (in case
+ * the structure is null). Used to iterate through vendor extensions.
+ */
+struct VendorExtension {
+ OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE *config; // structure does not own config
+ status_t status;
+
+ // create based on an error status
+ VendorExtension(status_t s_ = NO_INIT) : config(nullptr), status(s_) { }
+
+ // create based on a successfully retrieved config structure
+ VendorExtension(OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE *c_) : config(c_), status(OK) { }
+};
+
+// class VendorExtensions;
+/**
+ * Forward iterator to enumerate vendor extensions supported by an OMX component.
+ */
+class VendorExtensionIterator {
+//private:
+ static constexpr size_t kLastIndex = ~(size_t)0; // last index marker
+
+ sp<IOMXNode> mNode; // component
+ size_t mIndex; // current android extension index
+ std::unique_ptr<uint8_t[]> mBacking; // current extension's backing
+ VendorExtension mCurrent; // current extension
+
+ VendorExtensionIterator(const sp<IOMXNode> &node, size_t index)
+ : mNode(node),
+ mIndex(index) {
+ mCurrent = retrieve();
+ }
+
+ friend class VendorExtensions;
+
+public:
+ // copy constructor
+ VendorExtensionIterator(const VendorExtensionIterator &it)
+ : VendorExtensionIterator(it.mNode, it.mIndex) { }
+
+ // retrieves the current extension pointed to by this iterator
+ VendorExtension retrieve() {
+ if (mIndex == kLastIndex) {
+ return NO_INIT;
+ }
+
+ // try with one param first, then retry if extension needs more than 1 param
+ for (size_t paramSizeUsed = 1;; ) {
+ if (paramSizeUsed > OMX_MAX_ANDROID_VENDOR_PARAMCOUNT) {
+ return BAD_VALUE; // this prevents overflow in the following formula
+ }
+
+ size_t size = sizeof(OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE) +
+ (paramSizeUsed - 1) * sizeof(OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE::param);
+ mBacking.reset(new uint8_t[size]);
+ if (!mBacking) {
+ return NO_MEMORY;
+ }
+
+ OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE *config =
+ reinterpret_cast<OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE *>(mBacking.get());
+
+ InitOMXParams(config);
+ config->nSize = size;
+ config->nIndex = mIndex;
+ config->nParamSizeUsed = paramSizeUsed;
+ status_t err = mNode->getConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAndroidVendorExtension, config, size);
+ if (err == OK && config->nParamCount > paramSizeUsed && paramSizeUsed == 1) {
+ // reallocate if we need a bigger config
+ paramSizeUsed = config->nParamCount;
+ continue;
+ } else if (err == NOT_ENOUGH_DATA
+ || (err != OK && mIndex == 0)) {
+ // stop iterator on no-more signal, or if index is not at all supported
+ mIndex = kLastIndex;
+ return NO_INIT;
+ } else if (err != OK) {
+ return err;
+ } else if (paramSizeUsed != config->nParamSizeUsed) {
+ return BAD_VALUE; // component shall not modify size of nParam
+ }
+
+ return config;
+ }
+ }
+
+ // returns extension pointed to by this iterator
+ VendorExtension operator*() {
+ return mCurrent;
+ }
+
+ // prefix increment: move to next extension
+ VendorExtensionIterator &operator++() { // prefix
+ if (mIndex != kLastIndex) {
+ ++mIndex;
+ mCurrent = retrieve();
+ }
+ return *this;
+ }
+
+ // iterator equality operators
+ bool operator==(const VendorExtensionIterator &o) {
+ return mNode == o.mNode && mIndex == o.mIndex;
+ }
+
+ bool operator!=(const VendorExtensionIterator &o) {
+ return !(*this == o);
+ }
+};
+
+/**
+ * Iterable container for vendor extensions provided by a component
+ */
+class VendorExtensions {
+//private:
+ sp<IOMXNode> mNode;
+
+public:
+ VendorExtensions(const sp<IOMXNode> &node)
+ : mNode(node) {
+ }
+
+ VendorExtensionIterator begin() {
+ return VendorExtensionIterator(mNode, 0);
+ }
+
+ VendorExtensionIterator end() {
+ return VendorExtensionIterator(mNode, VendorExtensionIterator::kLastIndex);
+ }
+};
+
+status_t ACodec::setVendorParameters(const sp<AMessage> ¶ms) {
+ std::map<std::string, std::string> vendorKeys; // maps reduced name to actual name
+ constexpr char prefix[] = "vendor.";
+ constexpr size_t prefixLength = sizeof(prefix) - 1;
+ // longest possible vendor param name
+ char reducedKey[OMX_MAX_STRINGNAME_SIZE + OMX_MAX_STRINGVALUE_SIZE];
+
+ // identify all vendor keys to speed up search later and to detect vendor keys
+ for (size_t i = params->countEntries(); i; --i) {
+ AMessage::Type keyType;
+ const char* key = params->getEntryNameAt(i - 1, &keyType);
+ if (key != nullptr && !strncmp(key, prefix, prefixLength)
+ // it is safe to limit format keys to the max vendor param size as we only
+ // shorten parameter names by removing any trailing 'value' tags, and we
+ // already remove the vendor prefix.
+ && strlen(key + prefixLength) < sizeof(reducedKey)
+ && (keyType == AMessage::kTypeInt32
+ || keyType == AMessage::kTypeInt64
+ || keyType == AMessage::kTypeString)) {
+ strcpy(reducedKey, key + prefixLength);
+ removeTrailingTags(reducedKey, 0, "value");
+ auto existingKey = vendorKeys.find(reducedKey);
+ if (existingKey != vendorKeys.end()) {
+ ALOGW("[%s] vendor parameter '%s' aliases parameter '%s'",
+ mComponentName.c_str(), key, existingKey->second.c_str());
+ // ignore for now
+ }
+ vendorKeys.emplace(reducedKey, key);
+ }
+ }
+
+ // don't bother component if we don't have vendor extensions as they may not have implemented
+ // the android vendor extension support, which will lead to unnecessary OMX failure logs.
+ if (vendorKeys.empty()) {
+ return OK;
+ }
+
+ char key[sizeof(OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE::cName) +
+ sizeof(OMX_CONFIG_ANDROID_VENDOR_PARAMTYPE::cKey)];
+
+ status_t finalError = OK;
+
+ // don't try again if component does not have vendor extensions
+ if (mVendorExtensionsStatus == kExtensionsNone) {
+ return OK;
+ }
+
+ for (VendorExtension ext : VendorExtensions(mOMXNode)) {
+ OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE *config = ext.config;
+ if (config == nullptr) {
+ return ext.status;
+ }
+
+ mVendorExtensionsStatus = kExtensionsExist;
+
+ config->cName[sizeof(config->cName) - 1] = '\0'; // null-terminate name
+ strcpy(key, (const char *)config->cName);
+ size_t nameLength = strlen(key);
+ key[nameLength] = '.';
+
+ // don't set vendor extension if client has not provided any of its parameters
+ // or if client simply unsets parameters that are already unset
+ bool needToSet = false;
+ for (size_t paramIndex = 0; paramIndex < config->nParamCount; ++paramIndex) {
+ // null-terminate param key
+ config->param[paramIndex].cKey[sizeof(config->param[0].cKey) - 1] = '\0';
+ strcpy(key + nameLength + 1, (const char *)config->param[paramIndex].cKey);
+ removeTrailingTags(key, nameLength, "value");
+ auto existingKey = vendorKeys.find(key);
+
+ // don't touch (e.g. change) parameters that are not specified by client
+ if (existingKey == vendorKeys.end()) {
+ continue;
+ }
+
+ bool wasSet = config->param[paramIndex].bSet;
+ switch (config->param[paramIndex].eValueType) {
+ case OMX_AndroidVendorValueInt32:
+ {
+ int32_t value;
+ config->param[paramIndex].bSet =
+ (OMX_BOOL)params->findInt32(existingKey->second.c_str(), &value);
+ if (config->param[paramIndex].bSet) {
+ config->param[paramIndex].nInt32 = value;
+ }
+ break;
+ }
+ case OMX_AndroidVendorValueInt64:
+ {
+ int64_t value;
+ config->param[paramIndex].bSet =
+ (OMX_BOOL)params->findAsInt64(existingKey->second.c_str(), &value);
+ if (config->param[paramIndex].bSet) {
+ config->param[paramIndex].nInt64 = value;
+ }
+ break;
+ }
+ case OMX_AndroidVendorValueString:
+ {
+ AString value;
+ config->param[paramIndex].bSet =
+ (OMX_BOOL)params->findString(existingKey->second.c_str(), &value);
+ if (config->param[paramIndex].bSet) {
+ strncpy((char *)config->param[paramIndex].cString, value.c_str(),
+ sizeof(OMX_CONFIG_ANDROID_VENDOR_PARAMTYPE::cString));
+ }
+ break;
+ }
+ default:
+ ALOGW("[%s] vendor parameter '%s' is not a supported value",
+ mComponentName.c_str(), key);
+ continue;
+ }
+ if (config->param[paramIndex].bSet || wasSet) {
+ needToSet = true;
+ }
+ }
+
+ if (needToSet) {
+ status_t err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAndroidVendorExtension,
+ config, config->nSize);
+ if (err != OK) {
+ key[nameLength] = '\0';
+ ALOGW("[%s] failed to set vendor extension '%s'", mComponentName.c_str(), key);
+ // try to set each extension, and return first failure
+ if (finalError == OK) {
+ finalError = err;
+ }
+ }
+ }
+ }
+
+ if (mVendorExtensionsStatus == kExtensionsUnchecked) {
+ mVendorExtensionsStatus = kExtensionsNone;
+ }
+
+ return finalError;
+}
+
+status_t ACodec::getVendorParameters(OMX_U32 portIndex, sp<AMessage> &format) {
+ constexpr char prefix[] = "vendor.";
+ constexpr size_t prefixLength = sizeof(prefix) - 1;
+ char key[sizeof(OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE::cName) +
+ sizeof(OMX_CONFIG_ANDROID_VENDOR_PARAMTYPE::cKey) + prefixLength];
+ strcpy(key, prefix);
+
+ // don't try again if component does not have vendor extensions
+ if (mVendorExtensionsStatus == kExtensionsNone) {
+ return OK;
+ }
+
+ for (VendorExtension ext : VendorExtensions(mOMXNode)) {
+ OMX_CONFIG_ANDROID_VENDOR_EXTENSIONTYPE *config = ext.config;
+ if (config == nullptr) {
+ return ext.status;
+ }
+
+ mVendorExtensionsStatus = kExtensionsExist;
+
+ if (config->eDir != (portIndex == kPortIndexInput ? OMX_DirInput : OMX_DirOutput)) {
+ continue;
+ }
+
+ config->cName[sizeof(config->cName) - 1] = '\0'; // null-terminate name
+ strcpy(key + prefixLength, (const char *)config->cName);
+ size_t nameLength = strlen(key);
+ key[nameLength] = '.';
+
+ for (size_t paramIndex = 0; paramIndex < config->nParamCount; ++paramIndex) {
+ // null-terminate param key
+ config->param[paramIndex].cKey[sizeof(config->param[0].cKey) - 1] = '\0';
+ strcpy(key + nameLength + 1, (const char *)config->param[paramIndex].cKey);
+ removeTrailingTags(key, nameLength, "value");
+ if (config->param[paramIndex].bSet) {
+ switch (config->param[paramIndex].eValueType) {
+ case OMX_AndroidVendorValueInt32:
+ {
+ format->setInt32(key, config->param[paramIndex].nInt32);
+ break;
+ }
+ case OMX_AndroidVendorValueInt64:
+ {
+ format->setInt64(key, config->param[paramIndex].nInt64);
+ break;
+ }
+ case OMX_AndroidVendorValueString:
+ {
+ config->param[paramIndex].cString[OMX_MAX_STRINGVALUE_SIZE - 1] = '\0';
+ format->setString(key, (const char *)config->param[paramIndex].cString);
+ break;
+ }
+ default:
+ ALOGW("vendor parameter %s is not a supported value", key);
+ continue;
+ }
+ }
+ }
+ }
+
+ if (mVendorExtensionsStatus == kExtensionsUnchecked) {
+ mVendorExtensionsStatus = kExtensionsNone;
+ }
+
+ return OK;
}
void ACodec::onSignalEndOfInputStream() {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
-
- status_t err = mOMX->signalEndOfInputStream(mNode);
- if (err != OK) {
- notify->setInt32("err", err);
+ status_t err = INVALID_OPERATION;
+ if (mGraphicBufferSource != NULL) {
+ err = statusFromBinderStatus(mGraphicBufferSource->signalEndOfInputStream());
}
- notify->post();
+ mCallback->onSignaledInputEOS(err);
+}
+
+void ACodec::forceStateTransition(int generation) {
+ if (generation != mStateGeneration) {
+ ALOGV("Ignoring stale force state transition message: #%d (now #%d)",
+ generation, mStateGeneration);
+ return;
+ }
+ ALOGE("State machine stuck");
+ // Error must have already been signalled to the client.
+
+ // Deferred messages will be handled at LoadedState at the end of the
+ // transition.
+ mShutdownInProgress = true;
+ // No shutdown complete callback at the end of the transition.
+ mExplicitShutdown = false;
+ mKeepComponentAllocated = true;
+
+ status_t err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateIdle);
+ if (err != OK) {
+ // TODO: do some recovery here.
+ } else {
+ changeState(mExecutingToIdleState);
+ }
}
bool ACodec::ExecutingState::onOMXFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano) {
@@ -7323,8 +7696,7 @@
if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
mCodec->mMetadataBuffersToSubmit = 0;
- CHECK_EQ(mCodec->mOMX->sendCommand(
- mCodec->mNode,
+ CHECK_EQ(mCodec->mOMXNode->sendCommand(
OMX_CommandPortDisable, kPortIndexOutput),
(status_t)OK);
@@ -7374,7 +7746,14 @@
switch (msg->what()) {
case kWhatFlush:
- case kWhatShutdown:
+ case kWhatShutdown: {
+ if (mCodec->mFatalError) {
+ sp<AMessage> msg = new AMessage(ACodec::kWhatForceStateTransition, mCodec);
+ msg->setInt32("generation", mCodec->mStateGeneration);
+ msg->post(3000000);
+ }
+ // fall-through
+ }
case kWhatResume:
case kWhatSetParameters:
{
@@ -7387,6 +7766,16 @@
break;
}
+ case kWhatForceStateTransition:
+ {
+ int32_t generation = 0;
+ CHECK(msg->findInt32("generation", &generation));
+ mCodec->forceStateTransition(generation);
+
+ handled = true;
+ break;
+ }
+
default:
handled = BaseState::onMessageReceived(msg);
break;
@@ -7425,31 +7814,28 @@
mCodec->mBuffers[kPortIndexOutput].size());
err = FAILED_TRANSACTION;
} else {
- mCodec->mDealer[kPortIndexOutput].clear();
+ if (mCodec->getTrebleFlag()) {
+ mCodec->mAllocator[kPortIndexOutput].clear();
+ } else {
+ mCodec->mDealer[kPortIndexOutput].clear();
+ }
}
if (err == OK) {
- err = mCodec->mOMX->sendCommand(
- mCodec->mNode, OMX_CommandPortEnable, kPortIndexOutput);
+ err = mCodec->mOMXNode->sendCommand(
+ OMX_CommandPortEnable, kPortIndexOutput);
}
if (err == OK) {
err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
ALOGE_IF(err != OK, "Failed to allocate output port buffers after port "
"reconfiguration: (%d)", err);
+ mCodec->mCallback->onOutputBuffersChanged();
}
if (err != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
-
- // This is technically not correct, but appears to be
- // the only way to free the component instance.
- // Controlled transitioning from excecuting->idle
- // and idle->loaded seem impossible probably because
- // the output port never finishes re-enabling.
- mCodec->mShutdownInProgress = true;
- mCodec->mKeepComponentAllocated = false;
- mCodec->changeState(mCodec->mLoadedState);
+ ALOGE("Error occurred while disabling the output port");
}
return true;
@@ -7474,7 +7860,7 @@
}
default:
- return false;
+ return BaseState::onOMXEvent(event, data1, data2);
}
}
@@ -7499,8 +7885,7 @@
case kWhatShutdown:
{
- // We're already doing that...
-
+ mCodec->deferMessage(msg);
handled = true;
break;
}
@@ -7555,8 +7940,8 @@
void ACodec::ExecutingToIdleState::changeStateIfWeOwnAllBuffers() {
if (mComponentNowIdle && mCodec->allYourBuffersAreBelongToUs()) {
- status_t err = mCodec->mOMX->sendCommand(
- mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded);
+ status_t err = mCodec->mOMXNode->sendCommand(
+ OMX_CommandStateSet, OMX_StateLoaded);
if (err == OK) {
err = mCodec->freeBuffersOnPort(kPortIndexInput);
status_t err2 = mCodec->freeBuffersOnPort(kPortIndexOutput);
@@ -7609,8 +7994,7 @@
switch (msg->what()) {
case kWhatShutdown:
{
- // We're already doing that...
-
+ mCodec->deferMessage(msg);
handled = true;
break;
}
@@ -7678,6 +8062,11 @@
case kWhatShutdown:
{
mCodec->deferMessage(msg);
+ if (mCodec->mFatalError) {
+ sp<AMessage> msg = new AMessage(ACodec::kWhatForceStateTransition, mCodec);
+ msg->setInt32("generation", mCodec->mStateGeneration);
+ msg->post(3000000);
+ }
break;
}
@@ -7688,6 +8077,16 @@
break;
}
+ case kWhatForceStateTransition:
+ {
+ int32_t generation = 0;
+ CHECK(msg->findInt32("generation", &generation));
+ mCodec->forceStateTransition(generation);
+
+ handled = true;
+ break;
+ }
+
default:
handled = BaseState::onMessageReceived(msg);
break;
@@ -7742,7 +8141,7 @@
{
sp<AMessage> msg = new AMessage(kWhatOMXMessage, mCodec);
msg->setInt32("type", omx_message::EVENT);
- msg->setInt32("node", mCodec->mNode);
+ msg->setInt32("generation", mCodec->mNodeGeneration);
msg->setInt32("event", event);
msg->setInt32("data1", data1);
msg->setInt32("data2", data2);
@@ -7784,9 +8183,7 @@
mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
- sp<AMessage> notify = mCodec->mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
+ mCodec->mCallback->onFlushCompleted();
mCodec->mPortEOS[kPortIndexInput] =
mCodec->mPortEOS[kPortIndexOutput] = false;
@@ -7818,17 +8215,17 @@
sp<IOMX> omx = client.interface();
sp<CodecObserver> observer = new CodecObserver;
- IOMX::node_id node = 0;
+ sp<IOMXNode> omxNode;
- err = omx->allocateNode(name.c_str(), observer, NULL, &node);
+ err = omx->allocateNode(name.c_str(), observer, &omxNode);
if (err != OK) {
client.disconnect();
return err;
}
- err = SetComponentRole(omx, node, role);
+ err = SetComponentRole(omxNode, role);
if (err != OK) {
- omx->freeNode(node);
+ omxNode->freeNode();
client.disconnect();
return err;
}
@@ -7843,8 +8240,8 @@
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
param.nProfileIndex = index;
- status_t err = omx->getParameter(
- node, OMX_IndexParamVideoProfileLevelQuerySupported,
+ status_t err = omxNode->getParameter(
+ OMX_IndexParamVideoProfileLevelQuerySupported,
¶m, sizeof(param));
if (err != OK) {
break;
@@ -7867,8 +8264,8 @@
Vector<uint32_t> supportedColors; // shadow copy to check for duplicates
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
portFormat.nIndex = index;
- status_t err = omx->getParameter(
- node, OMX_IndexParamVideoPortFormat,
+ status_t err = omxNode->getParameter(
+ OMX_IndexParamVideoPortFormat,
&portFormat, sizeof(portFormat));
if (err != OK) {
break;
@@ -7876,7 +8273,7 @@
OMX_U32 flexibleEquivalent;
if (IsFlexibleColorFormat(
- omx, node, portFormat.eColorFormat, false /* usingNativeWindow */,
+ omxNode, portFormat.eColorFormat, false /* usingNativeWindow */,
&flexibleEquivalent)) {
bool marked = false;
for (size_t i = 0; i < supportedColors.size(); ++i) {
@@ -7906,8 +8303,8 @@
param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
param.nProfileIndex = index;
- status_t err = omx->getParameter(
- node, (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported,
+ status_t err = omxNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported,
¶m, sizeof(param));
if (err != OK) {
break;
@@ -7931,15 +8328,15 @@
if (isVideo && !isEncoder) {
native_handle_t *sidebandHandle = NULL;
- if (omx->configureVideoTunnelMode(
- node, kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
+ if (omxNode->configureVideoTunnelMode(
+ kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
// tunneled playback includes adaptive playback
builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback
| MediaCodecInfo::Capabilities::kFlagSupportsTunneledPlayback);
- } else if (omx->storeMetaDataInBuffers(
- node, kPortIndexOutput, OMX_TRUE) == OK ||
- omx->prepareForAdaptivePlayback(
- node, kPortIndexOutput, OMX_TRUE,
+ } else if (omxNode->setPortMode(
+ kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
+ omxNode->prepareForAdaptivePlayback(
+ kPortIndexOutput, OMX_TRUE,
1280 /* width */, 720 /* height */) == OK) {
builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsAdaptivePlayback);
}
@@ -7950,15 +8347,15 @@
InitOMXParams(¶ms);
params.nPortIndex = kPortIndexOutput;
// TODO: should we verify if fallback is supported?
- if (omx->getConfig(
- node, (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
+ if (omxNode->getConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
¶ms, sizeof(params)) == OK) {
builder->addFlags(MediaCodecInfo::Capabilities::kFlagSupportsIntraRefresh);
}
}
*caps = builder;
- omx->freeNode(node);
+ omxNode->freeNode();
client.disconnect();
return OK;
}
@@ -8027,4 +8424,12 @@
return OK;
}
+void ACodec::setTrebleFlag(bool trebleFlag) {
+ mTrebleFlag = trebleFlag;
+}
+
+bool ACodec::getTrebleFlag() const {
+ return mTrebleFlag;
+}
+
} // namespace android
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
new file mode 100644
index 0000000..0d9696f
--- /dev/null
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -0,0 +1,383 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ACodecBufferChannel"
+#include <utils/Log.h>
+
+#include <numeric>
+
+#include <android/media/IDescrambler.h>
+#include <binder/MemoryDealer.h>
+#include <media/openmax/OMX_Core.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/MediaCodecBuffer.h>
+#include <system/window.h>
+
+#include "include/ACodecBufferChannel.h"
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
+
+namespace android {
+using binder::Status;
+using MediaDescrambler::DescrambleInfo;
+using BufferInfo = ACodecBufferChannel::BufferInfo;
+using BufferInfoIterator = std::vector<const BufferInfo>::const_iterator;
+
+ACodecBufferChannel::~ACodecBufferChannel() {
+ if (mCrypto != nullptr && mDealer != nullptr && mHeapSeqNum >= 0) {
+ mCrypto->unsetHeap(mHeapSeqNum);
+ }
+}
+
+static BufferInfoIterator findClientBuffer(
+ const std::shared_ptr<const std::vector<const BufferInfo>> &array,
+ const sp<MediaCodecBuffer> &buffer) {
+ return std::find_if(
+ array->begin(), array->end(),
+ [buffer](const BufferInfo &info) { return info.mClientBuffer == buffer; });
+}
+
+static BufferInfoIterator findBufferId(
+ const std::shared_ptr<const std::vector<const BufferInfo>> &array,
+ IOMX::buffer_id bufferId) {
+ return std::find_if(
+ array->begin(), array->end(),
+ [bufferId](const BufferInfo &info) { return bufferId == info.mBufferId; });
+}
+
+ACodecBufferChannel::BufferInfo::BufferInfo(
+ const sp<MediaCodecBuffer> &buffer,
+ IOMX::buffer_id bufferId,
+ const sp<IMemory> &sharedEncryptedBuffer)
+ : mClientBuffer(
+ (sharedEncryptedBuffer == nullptr)
+ ? buffer
+ : new SharedMemoryBuffer(buffer->format(), sharedEncryptedBuffer)),
+ mCodecBuffer(buffer),
+ mBufferId(bufferId),
+ mSharedEncryptedBuffer(sharedEncryptedBuffer) {
+}
+
+ACodecBufferChannel::ACodecBufferChannel(
+ const sp<AMessage> &inputBufferFilled, const sp<AMessage> &outputBufferDrained)
+ : mInputBufferFilled(inputBufferFilled),
+ mOutputBufferDrained(outputBufferDrained),
+ mHeapSeqNum(-1) {
+}
+
+status_t ACodecBufferChannel::queueInputBuffer(const sp<MediaCodecBuffer> &buffer) {
+ if (mDealer != nullptr) {
+ return -ENOSYS;
+ }
+ std::shared_ptr<const std::vector<const BufferInfo>> array(
+ std::atomic_load(&mInputBuffers));
+ BufferInfoIterator it = findClientBuffer(array, buffer);
+ if (it == array->end()) {
+ return -ENOENT;
+ }
+ ALOGV("queueInputBuffer #%d", it->mBufferId);
+ sp<AMessage> msg = mInputBufferFilled->dup();
+ msg->setObject("buffer", it->mCodecBuffer);
+ msg->setInt32("buffer-id", it->mBufferId);
+ msg->post();
+ return OK;
+}
+
+status_t ACodecBufferChannel::queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer, bool secure, const uint8_t *key,
+ const uint8_t *iv, CryptoPlugin::Mode mode, CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ AString *errorDetailMsg) {
+ if (!hasCryptoOrDescrambler() || mDealer == nullptr) {
+ return -ENOSYS;
+ }
+ std::shared_ptr<const std::vector<const BufferInfo>> array(
+ std::atomic_load(&mInputBuffers));
+ BufferInfoIterator it = findClientBuffer(array, buffer);
+ if (it == array->end()) {
+ return -ENOENT;
+ }
+
+ ICrypto::DestinationBuffer destination;
+ if (secure) {
+ sp<SecureBuffer> secureData =
+ static_cast<SecureBuffer *>(it->mCodecBuffer.get());
+ destination.mType = secureData->getDestinationType();
+ if (destination.mType != ICrypto::kDestinationTypeNativeHandle) {
+ return BAD_VALUE;
+ }
+ destination.mHandle =
+ static_cast<native_handle_t *>(secureData->getDestinationPointer());
+ } else {
+ destination.mType = ICrypto::kDestinationTypeSharedMemory;
+ destination.mSharedMemory = mDecryptDestination;
+ }
+
+ ICrypto::SourceBuffer source;
+ source.mSharedMemory = it->mSharedEncryptedBuffer;
+ source.mHeapSeqNum = mHeapSeqNum;
+
+ ssize_t result = -1;
+ if (mCrypto != NULL) {
+ result = mCrypto->decrypt(key, iv, mode, pattern,
+ source, it->mClientBuffer->offset(),
+ subSamples, numSubSamples, destination, errorDetailMsg);
+ } else {
+ DescrambleInfo descrambleInfo;
+ descrambleInfo.dstType = destination.mType ==
+ ICrypto::kDestinationTypeSharedMemory ?
+ DescrambleInfo::kDestinationTypeVmPointer :
+ DescrambleInfo::kDestinationTypeNativeHandle;
+ descrambleInfo.scramblingControl = key != NULL ?
+ (DescramblerPlugin::ScramblingControl)key[0] :
+ DescramblerPlugin::kScrambling_Unscrambled;
+ descrambleInfo.numSubSamples = numSubSamples;
+ descrambleInfo.subSamples = (DescramblerPlugin::SubSample *)subSamples;
+ descrambleInfo.srcMem = it->mSharedEncryptedBuffer;
+ descrambleInfo.srcOffset = 0;
+ descrambleInfo.dstPtr = NULL;
+ descrambleInfo.dstOffset = 0;
+
+ int32_t descrambleResult = -1;
+ Status status = mDescrambler->descramble(descrambleInfo, &descrambleResult);
+
+ if (status.isOk()) {
+ result = descrambleResult;
+ }
+
+ if (result < 0) {
+ ALOGE("descramble failed, exceptionCode=%d, err=%d, result=%zd",
+ status.exceptionCode(), status.transactionError(), result);
+ } else {
+ ALOGV("descramble succeeded, result=%zd", result);
+ }
+
+ if (result > 0 && destination.mType == ICrypto::kDestinationTypeSharedMemory) {
+ memcpy(destination.mSharedMemory->pointer(),
+ (uint8_t*)it->mSharedEncryptedBuffer->pointer(), result);
+ }
+ }
+
+ if (result < 0) {
+ return result;
+ }
+
+ if (destination.mType == ICrypto::kDestinationTypeSharedMemory) {
+ memcpy(it->mCodecBuffer->base(), destination.mSharedMemory->pointer(), result);
+ }
+
+ it->mCodecBuffer->setRange(0, result);
+
+ // Copy metadata from client to codec buffer.
+ it->mCodecBuffer->meta()->clear();
+ int64_t timeUs;
+ CHECK(it->mClientBuffer->meta()->findInt64("timeUs", &timeUs));
+ it->mCodecBuffer->meta()->setInt64("timeUs", timeUs);
+ int32_t eos;
+ if (it->mClientBuffer->meta()->findInt32("eos", &eos)) {
+ it->mCodecBuffer->meta()->setInt32("eos", eos);
+ }
+ int32_t csd;
+ if (it->mClientBuffer->meta()->findInt32("csd", &csd)) {
+ it->mCodecBuffer->meta()->setInt32("csd", csd);
+ }
+
+ ALOGV("queueSecureInputBuffer #%d", it->mBufferId);
+ sp<AMessage> msg = mInputBufferFilled->dup();
+ msg->setObject("buffer", it->mCodecBuffer);
+ msg->setInt32("buffer-id", it->mBufferId);
+ msg->post();
+ return OK;
+}
+
+status_t ACodecBufferChannel::renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) {
+ std::shared_ptr<const std::vector<const BufferInfo>> array(
+ std::atomic_load(&mOutputBuffers));
+ BufferInfoIterator it = findClientBuffer(array, buffer);
+ if (it == array->end()) {
+ return -ENOENT;
+ }
+
+ ALOGV("renderOutputBuffer #%d", it->mBufferId);
+ sp<AMessage> msg = mOutputBufferDrained->dup();
+ msg->setObject("buffer", buffer);
+ msg->setInt32("buffer-id", it->mBufferId);
+ msg->setInt32("render", true);
+ msg->setInt64("timestampNs", timestampNs);
+ msg->post();
+ return OK;
+}
+
+status_t ACodecBufferChannel::discardBuffer(const sp<MediaCodecBuffer> &buffer) {
+ std::shared_ptr<const std::vector<const BufferInfo>> array(
+ std::atomic_load(&mInputBuffers));
+ bool input = true;
+ BufferInfoIterator it = findClientBuffer(array, buffer);
+ if (it == array->end()) {
+ array = std::atomic_load(&mOutputBuffers);
+ input = false;
+ it = findClientBuffer(array, buffer);
+ if (it == array->end()) {
+ return -ENOENT;
+ }
+ }
+ ALOGV("discardBuffer #%d", it->mBufferId);
+ sp<AMessage> msg = input ? mInputBufferFilled->dup() : mOutputBufferDrained->dup();
+ msg->setObject("buffer", it->mCodecBuffer);
+ msg->setInt32("buffer-id", it->mBufferId);
+ msg->setInt32("discarded", true);
+ msg->post();
+ return OK;
+}
+
+void ACodecBufferChannel::getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+ std::shared_ptr<const std::vector<const BufferInfo>> inputBuffers(
+ std::atomic_load(&mInputBuffers));
+ array->clear();
+ for (const BufferInfo &elem : *inputBuffers) {
+ array->push_back(elem.mClientBuffer);
+ }
+}
+
+void ACodecBufferChannel::getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
+ std::shared_ptr<const std::vector<const BufferInfo>> outputBuffers(
+ std::atomic_load(&mOutputBuffers));
+ array->clear();
+ for (const BufferInfo &elem : *outputBuffers) {
+ array->push_back(elem.mClientBuffer);
+ }
+}
+
+sp<MemoryDealer> ACodecBufferChannel::makeMemoryDealer(size_t heapSize) {
+ sp<MemoryDealer> dealer;
+ if (mDealer != nullptr && mCrypto != nullptr && mHeapSeqNum >= 0) {
+ mCrypto->unsetHeap(mHeapSeqNum);
+ }
+ dealer = new MemoryDealer(heapSize, "ACodecBufferChannel");
+ if (mCrypto != nullptr) {
+ int32_t seqNum = mCrypto->setHeap(dealer->getMemoryHeap());
+ if (seqNum >= 0) {
+ mHeapSeqNum = seqNum;
+ ALOGD("setHeap returned mHeapSeqNum=%d", mHeapSeqNum);
+ } else {
+ mHeapSeqNum = -1;
+ ALOGD("setHeap failed, setting mHeapSeqNum=-1");
+ }
+ }
+ return dealer;
+}
+
+void ACodecBufferChannel::setInputBufferArray(const std::vector<BufferAndId> &array) {
+ if (hasCryptoOrDescrambler()) {
+ size_t totalSize = std::accumulate(
+ array.begin(), array.end(), 0u,
+ [alignment = MemoryDealer::getAllocationAlignment()]
+ (size_t sum, const BufferAndId& elem) {
+ return sum + align(elem.mBuffer->capacity(), alignment);
+ });
+ size_t maxSize = std::accumulate(
+ array.begin(), array.end(), 0u,
+ [alignment = MemoryDealer::getAllocationAlignment()]
+ (size_t max, const BufferAndId& elem) {
+ return std::max(max, align(elem.mBuffer->capacity(), alignment));
+ });
+ size_t destinationBufferSize = maxSize;
+ size_t heapSize = totalSize + destinationBufferSize;
+ if (heapSize > 0) {
+ mDealer = makeMemoryDealer(heapSize);
+ mDecryptDestination = mDealer->allocate(destinationBufferSize);
+ }
+ }
+ std::vector<const BufferInfo> inputBuffers;
+ for (const BufferAndId &elem : array) {
+ sp<IMemory> sharedEncryptedBuffer;
+ if (hasCryptoOrDescrambler()) {
+ sharedEncryptedBuffer = mDealer->allocate(elem.mBuffer->capacity());
+ }
+ inputBuffers.emplace_back(elem.mBuffer, elem.mBufferId, sharedEncryptedBuffer);
+ }
+ std::atomic_store(
+ &mInputBuffers,
+ std::make_shared<const std::vector<const BufferInfo>>(inputBuffers));
+}
+
+void ACodecBufferChannel::setOutputBufferArray(const std::vector<BufferAndId> &array) {
+ std::vector<const BufferInfo> outputBuffers;
+ for (const BufferAndId &elem : array) {
+ outputBuffers.emplace_back(elem.mBuffer, elem.mBufferId, nullptr);
+ }
+ std::atomic_store(
+ &mOutputBuffers,
+ std::make_shared<const std::vector<const BufferInfo>>(outputBuffers));
+}
+
+void ACodecBufferChannel::fillThisBuffer(IOMX::buffer_id bufferId) {
+ ALOGV("fillThisBuffer #%d", bufferId);
+ std::shared_ptr<const std::vector<const BufferInfo>> array(
+ std::atomic_load(&mInputBuffers));
+ BufferInfoIterator it = findBufferId(array, bufferId);
+
+ if (it == array->end()) {
+ ALOGE("fillThisBuffer: unrecognized buffer #%d", bufferId);
+ return;
+ }
+ if (it->mClientBuffer != it->mCodecBuffer) {
+ it->mClientBuffer->setFormat(it->mCodecBuffer->format());
+ }
+
+ mCallback->onInputBufferAvailable(
+ std::distance(array->begin(), it),
+ it->mClientBuffer);
+}
+
+void ACodecBufferChannel::drainThisBuffer(
+ IOMX::buffer_id bufferId,
+ OMX_U32 omxFlags) {
+ ALOGV("drainThisBuffer #%d", bufferId);
+ std::shared_ptr<const std::vector<const BufferInfo>> array(
+ std::atomic_load(&mOutputBuffers));
+ BufferInfoIterator it = findBufferId(array, bufferId);
+
+ if (it == array->end()) {
+ ALOGE("drainThisBuffer: unrecognized buffer #%d", bufferId);
+ return;
+ }
+ if (it->mClientBuffer != it->mCodecBuffer) {
+ it->mClientBuffer->setFormat(it->mCodecBuffer->format());
+ }
+
+ uint32_t flags = 0;
+ if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) {
+ flags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
+ }
+ if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
+ flags |= MediaCodec::BUFFER_FLAG_CODECCONFIG;
+ }
+ if (omxFlags & OMX_BUFFERFLAG_EOS) {
+ flags |= MediaCodec::BUFFER_FLAG_EOS;
+ }
+ it->mClientBuffer->meta()->setInt32("flags", flags);
+
+ mCallback->onOutputBufferAvailable(
+ std::distance(array->begin(), it),
+ it->mClientBuffer);
+}
+
+} // namespace android
diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp
index 0e98db8..2892520 100644
--- a/media/libstagefright/AMRExtractor.cpp
+++ b/media/libstagefright/AMRExtractor.cpp
@@ -259,7 +259,7 @@
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ if (mOffsetTableLength > 0 && options && options->getSeekTo(&seekTimeUs, &mode)) {
size_t size;
int64_t seekFrame = seekTimeUs / 20000ll; // 20ms per frame.
mCurrentTimeUs = seekFrame * 20000ll;
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 685189c..5b8a0d1 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -9,20 +9,20 @@
srcs: [
"ACodec.cpp",
+ "ACodecBufferChannel.cpp",
"AACExtractor.cpp",
"AACWriter.cpp",
"AMRExtractor.cpp",
"AMRWriter.cpp",
"AudioPlayer.cpp",
"AudioSource.cpp",
+ "BufferImpl.cpp",
"CallbackDataSource.cpp",
"CameraSource.cpp",
"CameraSourceTimeLapse.cpp",
- "CodecBase.cpp",
"DataConverter.cpp",
"DataSource.cpp",
"DataURISource.cpp",
- "DRMExtractor.cpp",
"ESDS.cpp",
"FileSource.cpp",
"FLACExtractor.cpp",
@@ -63,7 +63,6 @@
"VBRISeeker.cpp",
"VideoFrameScheduler.cpp",
"WAVExtractor.cpp",
- "WVMExtractor.cpp",
"XINGSeeker.cpp",
"avc_utils.cpp",
],
@@ -77,6 +76,7 @@
"libaudioutils",
"libbinder",
"libcamera_client",
+ "libcrypto",
"libcutils",
"libdl",
"libdrmframework",
@@ -85,6 +85,7 @@
"liblog",
"libmedia",
"libaudioclient",
+ "libmediametrics",
"libmediautils",
"libnetd_client",
"libsonivox",
@@ -92,10 +93,19 @@
"libui",
"libutils",
"libvorbisidec",
+ "libmediadrm",
+ "libnativewindow",
+ "libmedia_helper",
"libstagefright_foundation",
"libdl",
"libRScpp",
+ "libhidlbase",
+ "libhidlmemory",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "android.hardware.media.omx@1.0",
+ "libstagefright_xmlparser@1.0",
],
static_libs: [
@@ -111,7 +121,6 @@
"libstagefright_mpeg2ts",
"libstagefright_id3",
"libFLAC",
- "libmedia_helper",
],
export_shared_lib_headers: ["libmedia"],
@@ -135,10 +144,14 @@
},
sanitize: {
+ cfi: true,
misc_undefined: [
"unsigned-integer-overflow",
"signed-integer-overflow",
],
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index efdee77..4ccd2d0 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -62,6 +62,8 @@
mPrevSampleTimeUs(0),
mInitialReadTimeUs(0),
mNumFramesReceived(0),
+ mNumFramesSkipped(0),
+ mNumFramesLost(0),
mNumClientOwnedBuffers(0) {
ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
sampleRate, outSampleRate, channelCount);
@@ -295,11 +297,27 @@
}
status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
- int64_t timeUs = systemTime() / 1000ll;
- // Estimate the real sampling time of the 1st sample in this buffer
- // from AudioRecord's latency. (Apply this adjustment first so that
- // the start time logic is not affected.)
- timeUs -= mRecord->latency() * 1000LL;
+ int64_t timeUs, position, timeNs;
+ ExtendedTimestamp ts;
+ ExtendedTimestamp::Location location;
+ const int32_t usPerSec = 1000000;
+
+ if (mRecord->getTimestamp(&ts) == OK &&
+ ts.getBestTimestamp(&position, &timeNs, ExtendedTimestamp::TIMEBASE_MONOTONIC,
+ &location) == OK) {
+ // Use audio timestamp.
+ timeUs = timeNs / 1000 -
+ (position - mNumFramesSkipped -
+ mNumFramesReceived + mNumFramesLost) * usPerSec / mSampleRate;
+ } else {
+ // This should not happen in normal case.
+ ALOGW("Failed to get audio timestamp, fallback to use systemclock");
+ timeUs = systemTime() / 1000ll;
+ // Estimate the real sampling time of the 1st sample in this buffer
+ // from AudioRecord's latency. (Apply this adjustment first so that
+ // the start time logic is not affected.)
+ timeUs -= mRecord->latency() * 1000LL;
+ }
ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
Mutex::Autolock autoLock(mLock);
@@ -308,10 +326,15 @@
return OK;
}
+ const size_t bufferSize = audioBuffer.size;
+
// Drop retrieved and previously lost audio data.
if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) {
(void) mRecord->getInputFramesLost();
- ALOGV("Drop audio data at %" PRId64 "/%" PRId64 " us", timeUs, mStartTimeUs);
+ int64_t receievedFrames = bufferSize / mRecord->frameSize();
+ ALOGV("Drop audio data(%" PRId64 " frames) at %" PRId64 "/%" PRId64 " us",
+ receievedFrames, timeUs, mStartTimeUs);
+ mNumFramesSkipped += receievedFrames;
return OK;
}
@@ -320,11 +343,7 @@
// Initial delay
if (mStartTimeUs > 0) {
mStartTimeUs = timeUs - mStartTimeUs;
- } else {
- // Assume latency is constant.
- mStartTimeUs += mRecord->latency() * 1000;
}
-
mPrevSampleTimeUs = mStartTimeUs;
}
@@ -354,6 +373,7 @@
MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize);
memset(lostAudioBuffer->data(), 0, bufferSize);
lostAudioBuffer->set_range(0, bufferSize);
+ mNumFramesLost += bufferSize / mRecord->frameSize();
queueInputBuffer_l(lostAudioBuffer, timeUs);
}
@@ -362,7 +382,6 @@
return OK;
}
- const size_t bufferSize = audioBuffer.size;
MediaBuffer *buffer = new MediaBuffer(bufferSize);
memcpy((uint8_t *) buffer->data(),
audioBuffer.i16, audioBuffer.size);
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
new file mode 100644
index 0000000..fee3739
--- /dev/null
+++ b/media/libstagefright/BufferImpl.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "BufferImpl"
+#include <utils/Log.h>
+
+#include <binder/IMemory.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/ICrypto.h>
+#include <utils/NativeHandle.h>
+
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
+
+namespace android {
+
+SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem)
+ : MediaCodecBuffer(format, new ABuffer(mem->pointer(), mem->size())),
+ mMemory(mem) {
+}
+
+SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<TMemory> &mem)
+ : MediaCodecBuffer(format, new ABuffer(mem->getPointer(), mem->getSize())),
+ mTMemory(mem) {
+}
+
+SecureBuffer::SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size)
+ : MediaCodecBuffer(format, new ABuffer(nullptr, size)),
+ mPointer(ptr) {
+}
+
+SecureBuffer::SecureBuffer(
+ const sp<AMessage> &format, const sp<NativeHandle> &handle, size_t size)
+ : MediaCodecBuffer(format, new ABuffer(nullptr, size)),
+ mPointer(nullptr),
+ mHandle(handle) {
+}
+
+void *SecureBuffer::getDestinationPointer() {
+ return (void *)(mHandle == nullptr ? mPointer : mHandle->handle());
+}
+
+ICrypto::DestinationType SecureBuffer::getDestinationType() {
+ return ICrypto::kDestinationTypeNativeHandle;
+}
+
+} // namespace android
diff --git a/media/libstagefright/CallbackDataSource.cpp b/media/libstagefright/CallbackDataSource.cpp
index 0434bab..4309372 100644
--- a/media/libstagefright/CallbackDataSource.cpp
+++ b/media/libstagefright/CallbackDataSource.cpp
@@ -113,6 +113,10 @@
return mIDataSource->DrmInitialization(mime);
}
+sp<IDataSource> CallbackDataSource::getIDataSource() const {
+ return mIDataSource;
+}
+
TinyCacheSource::TinyCacheSource(const sp<DataSource>& source)
: mSource(source), mCachedOffset(0), mCachedSize(0) {
mName = String8::format("TinyCacheSource(%s)", mSource->toString().string());
@@ -190,4 +194,9 @@
mCachedSize = 0;
return mSource->DrmInitialization(mime);
}
+
+sp<IDataSource> TinyCacheSource::getIDataSource() const {
+ return mSource->getIDataSource();
+}
+
} // namespace android
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 408ad7a..a569f5d 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -58,6 +58,10 @@
virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle);
+ virtual void postRecordingFrameHandleTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles);
+
protected:
virtual ~CameraSourceListener();
@@ -110,7 +114,26 @@
}
}
+void CameraSourceListener::postRecordingFrameHandleTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) {
+ sp<CameraSource> source = mSource.promote();
+ if (source.get() != nullptr) {
+ int n = timestamps.size();
+ std::vector<nsecs_t> modifiedTimestamps(n);
+ for (int i = 0; i < n; i++) {
+ modifiedTimestamps[i] = timestamps[i] / 1000;
+ }
+ source->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
+ }
+}
+
static int32_t getColorFormat(const char* colorFormat) {
+ if (!colorFormat) {
+ ALOGE("Invalid color format");
+ return -1;
+ }
+
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420P)) {
return OMX_COLOR_FormatYUV420Planar;
}
@@ -765,9 +788,7 @@
return mInitCheck;
}
- char value[PROPERTY_VALUE_MAX];
- if (property_get("media.stagefright.record-stats", value, NULL)
- && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
+ if (property_get_bool("media.stagefright.record-stats", false)) {
mCollectStats = true;
}
@@ -949,10 +970,35 @@
}
if (handle != nullptr) {
- // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
- releaseRecordingFrameHandle(handle);
- mMemoryBases.push_back(frame);
- mMemoryBaseAvailableCond.signal();
+ uint32_t batchSize = 0;
+ {
+ Mutex::Autolock autoLock(mBatchLock);
+ if (mInflightBatchSizes.size() > 0) {
+ batchSize = mInflightBatchSizes[0];
+ }
+ }
+ if (batchSize == 0) { // return buffers one by one
+ // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
+ releaseRecordingFrameHandle(handle);
+ mMemoryBases.push_back(frame);
+ mMemoryBaseAvailableCond.signal();
+ } else { // Group buffers in batch then return
+ Mutex::Autolock autoLock(mBatchLock);
+ mInflightReturnedHandles.push_back(handle);
+ mInflightReturnedMemorys.push_back(frame);
+ if (mInflightReturnedHandles.size() == batchSize) {
+ releaseRecordingFrameHandleBatch(mInflightReturnedHandles);
+
+ mInflightBatchSizes.pop_front();
+ mInflightReturnedHandles.clear();
+ for (const auto& mem : mInflightReturnedMemorys) {
+ mMemoryBases.push_back(mem);
+ mMemoryBaseAvailableCond.signal();
+ }
+ mInflightReturnedMemorys.clear();
+ }
+ }
+
} else if (mCameraRecordingProxy != nullptr) {
// mCamera is created by application. Return the frame back to camera via camera
// recording proxy.
@@ -1123,6 +1169,21 @@
}
}
+void CameraSource::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+ if (mCameraRecordingProxy != nullptr) {
+ mCameraRecordingProxy->releaseRecordingFrameHandleBatch(handles);
+ } else if (mCamera != nullptr) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ mCamera->releaseRecordingFrameHandleBatch(handles);
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ } else {
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ }
+}
+
void CameraSource::recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
native_handle_t* handle) {
ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
@@ -1160,6 +1221,62 @@
mFrameAvailableCondition.signal();
}
+void CameraSource::recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles) {
+ size_t n = timestampsUs.size();
+ if (n != handles.size()) {
+ ALOGE("%s: timestampsUs(%zu) and handles(%zu) size mismatch!",
+ __FUNCTION__, timestampsUs.size(), handles.size());
+ }
+
+ Mutex::Autolock autoLock(mLock);
+ int batchSize = 0;
+ for (size_t i = 0; i < n; i++) {
+ int64_t timestampUs = timestampsUs[i];
+ native_handle_t* handle = handles[i];
+
+ ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
+ if (handle == nullptr) continue;
+
+ if (shouldSkipFrameLocked(timestampUs)) {
+ releaseRecordingFrameHandle(handle);
+ continue;
+ }
+
+ while (mMemoryBases.empty()) {
+ if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
+ TIMED_OUT) {
+ ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
+ releaseRecordingFrameHandle(handle);
+ continue;
+ }
+ }
+ ++batchSize;
+ ++mNumFramesReceived;
+ sp<IMemory> data = *mMemoryBases.begin();
+ mMemoryBases.erase(mMemoryBases.begin());
+
+ // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
+ VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
+ metadata->eType = kMetadataBufferTypeNativeHandleSource;
+ metadata->pHandle = handle;
+
+ mFramesReceived.push_back(data);
+ int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
+ mFrameTimes.push_back(timeUs);
+ ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64, mStartTimeUs, timeUs);
+
+ }
+ if (batchSize > 0) {
+ Mutex::Autolock autoLock(mBatchLock);
+ mInflightBatchSizes.push_back(batchSize);
+ }
+ for (int i = 0; i < batchSize; i++) {
+ mFrameAvailableCondition.signal();
+ }
+}
+
CameraSource::BufferQueueListener::BufferQueueListener(const sp<BufferItemConsumer>& consumer,
const sp<CameraSource>& cameraSource) {
mConsumer = consumer;
@@ -1276,6 +1393,17 @@
mSource->recordingFrameHandleCallbackTimestamp(timestamp / 1000, handle);
}
+void CameraSource::ProxyListener::recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles) {
+ int n = timestampsUs.size();
+ std::vector<nsecs_t> modifiedTimestamps(n);
+ for (int i = 0; i < n; i++) {
+ modifiedTimestamps[i] = timestampsUs[i] / 1000;
+ }
+ mSource->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
+}
+
void CameraSource::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
ALOGI("Camera recording proxy died");
}
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 390c556..970526a 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -315,6 +315,17 @@
CameraSource::recordingFrameHandleCallbackTimestamp(timestampUs, handle);
}
+void CameraSourceTimeLapse::recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles) {
+ ALOGV("recordingFrameHandleCallbackTimestampBatch");
+ int n = timestampsUs.size();
+ for (int i = 0; i < n; i++) {
+ // Don't do batching for CameraSourceTimeLapse for now
+ recordingFrameHandleCallbackTimestamp(timestampsUs[i], handles[i]);
+ }
+}
+
void CameraSourceTimeLapse::processBufferQueueFrame(BufferItem& buffer) {
ALOGV("processBufferQueueFrame");
int64_t timestampUs = buffer.mTimestamp / 1000;
diff --git a/media/libstagefright/CodecBase.cpp b/media/libstagefright/CodecBase.cpp
deleted file mode 100644
index f729d4d..0000000
--- a/media/libstagefright/CodecBase.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "CodecBase"
-
-#include <inttypes.h>
-
-#include <media/stagefright/CodecBase.h>
-
-namespace android {
-
-CodecBase::CodecBase() {
-}
-
-CodecBase::~CodecBase() {
-}
-
-CodecBase::PortDescription::PortDescription() {
-}
-
-CodecBase::PortDescription::~PortDescription() {
-}
-
-} // namespace android
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
deleted file mode 100644
index d36ac65..0000000
--- a/media/libstagefright/DRMExtractor.cpp
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "include/DRMExtractor.h"
-
-#include <arpa/inet.h>
-#include <utils/String8.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaBuffer.h>
-
-#include <drm/drm_framework_common.h>
-#include <utils/Errors.h>
-
-
-namespace android {
-
-class DRMSource : public MediaSource {
-public:
- DRMSource(const sp<IMediaSource> &mediaSource,
- const sp<DecryptHandle> &decryptHandle,
- DrmManagerClient *managerClient,
- int32_t trackId, DrmBuffer *ipmpBox);
-
- virtual status_t start(MetaData *params = NULL);
- virtual status_t stop();
- virtual sp<MetaData> getFormat();
- virtual status_t read(
- MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-protected:
- virtual ~DRMSource();
-
-private:
- sp<IMediaSource> mOriginalMediaSource;
- sp<DecryptHandle> mDecryptHandle;
- DrmManagerClient* mDrmManagerClient;
- size_t mTrackId;
- mutable Mutex mDRMLock;
- size_t mNALLengthSize;
- bool mWantsNALFragments;
-
- DRMSource(const DRMSource &);
- DRMSource &operator=(const DRMSource &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-DRMSource::DRMSource(const sp<IMediaSource> &mediaSource,
- const sp<DecryptHandle> &decryptHandle,
- DrmManagerClient *managerClient,
- int32_t trackId, DrmBuffer *ipmpBox)
- : mOriginalMediaSource(mediaSource),
- mDecryptHandle(decryptHandle),
- mDrmManagerClient(managerClient),
- mTrackId(trackId),
- mNALLengthSize(0),
- mWantsNALFragments(false) {
- CHECK(mDrmManagerClient);
- mDrmManagerClient->initializeDecryptUnit(
- mDecryptHandle, trackId, ipmpBox);
-
- const char *mime;
- bool success = getFormat()->findCString(kKeyMIMEType, &mime);
- CHECK(success);
-
- if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
- uint32_t type;
- const void *data;
- size_t size;
- CHECK(getFormat()->findData(kKeyAVCC, &type, &data, &size));
-
- const uint8_t *ptr = (const uint8_t *)data;
-
- CHECK(size >= 7);
- CHECK_EQ(ptr[0], 1); // configurationVersion == 1
-
- // The number of bytes used to encode the length of a NAL unit.
- mNALLengthSize = 1 + (ptr[4] & 3);
- }
-}
-
-DRMSource::~DRMSource() {
- Mutex::Autolock autoLock(mDRMLock);
- mDrmManagerClient->finalizeDecryptUnit(mDecryptHandle, mTrackId);
-}
-
-status_t DRMSource::start(MetaData *params) {
- int32_t val;
- if (params && params->findInt32(kKeyWantsNALFragments, &val)
- && val != 0) {
- mWantsNALFragments = true;
- } else {
- mWantsNALFragments = false;
- }
-
- return mOriginalMediaSource->start(params);
-}
-
-status_t DRMSource::stop() {
- return mOriginalMediaSource->stop();
-}
-
-sp<MetaData> DRMSource::getFormat() {
- return mOriginalMediaSource->getFormat();
-}
-
-status_t DRMSource::read(MediaBuffer **buffer, const ReadOptions *options) {
- Mutex::Autolock autoLock(mDRMLock);
- status_t err;
- if ((err = mOriginalMediaSource->read(buffer, options)) != OK) {
- return err;
- }
-
- size_t len = (*buffer)->range_length();
-
- char *src = (char *)(*buffer)->data() + (*buffer)->range_offset();
-
- DrmBuffer encryptedDrmBuffer(src, len);
- DrmBuffer decryptedDrmBuffer;
- decryptedDrmBuffer.length = len;
- decryptedDrmBuffer.data = new char[len];
- DrmBuffer *pDecryptedDrmBuffer = &decryptedDrmBuffer;
-
- if ((err = mDrmManagerClient->decrypt(mDecryptHandle, mTrackId,
- &encryptedDrmBuffer, &pDecryptedDrmBuffer)) != NO_ERROR) {
-
- if (decryptedDrmBuffer.data) {
- delete [] decryptedDrmBuffer.data;
- decryptedDrmBuffer.data = NULL;
- }
-
- return err;
- }
- CHECK(pDecryptedDrmBuffer == &decryptedDrmBuffer);
-
- const char *mime;
- CHECK(getFormat()->findCString(kKeyMIMEType, &mime));
-
- if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC) && !mWantsNALFragments) {
- uint8_t *dstData = (uint8_t*)src;
- size_t srcOffset = 0;
- size_t dstOffset = 0;
-
- len = decryptedDrmBuffer.length;
- while (srcOffset < len) {
- CHECK(srcOffset + mNALLengthSize <= len);
- size_t nalLength = 0;
- const uint8_t* data = (const uint8_t*)(&decryptedDrmBuffer.data[srcOffset]);
-
- switch (mNALLengthSize) {
- case 1:
- nalLength = *data;
- break;
- case 2:
- nalLength = U16_AT(data);
- break;
- case 3:
- nalLength = ((size_t)data[0] << 16) | U16_AT(&data[1]);
- break;
- case 4:
- nalLength = U32_AT(data);
- break;
- default:
- CHECK(!"Should not be here.");
- break;
- }
-
- srcOffset += mNALLengthSize;
-
- size_t end = srcOffset + nalLength;
- if (end > len || end < srcOffset) {
- if (decryptedDrmBuffer.data) {
- delete [] decryptedDrmBuffer.data;
- decryptedDrmBuffer.data = NULL;
- }
-
- return ERROR_MALFORMED;
- }
-
- if (nalLength == 0) {
- continue;
- }
-
- if (dstOffset > SIZE_MAX - 4 ||
- dstOffset + 4 > SIZE_MAX - nalLength ||
- dstOffset + 4 + nalLength > (*buffer)->size()) {
- (*buffer)->release();
- (*buffer) = NULL;
- if (decryptedDrmBuffer.data) {
- delete [] decryptedDrmBuffer.data;
- decryptedDrmBuffer.data = NULL;
- }
- return ERROR_MALFORMED;
- }
-
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 0;
- dstData[dstOffset++] = 1;
- memcpy(&dstData[dstOffset], &decryptedDrmBuffer.data[srcOffset], nalLength);
- srcOffset += nalLength;
- dstOffset += nalLength;
- }
-
- CHECK_EQ(srcOffset, len);
- (*buffer)->set_range((*buffer)->range_offset(), dstOffset);
-
- } else {
- memcpy(src, decryptedDrmBuffer.data, decryptedDrmBuffer.length);
- (*buffer)->set_range((*buffer)->range_offset(), decryptedDrmBuffer.length);
- }
-
- if (decryptedDrmBuffer.data) {
- delete [] decryptedDrmBuffer.data;
- decryptedDrmBuffer.data = NULL;
- }
-
- return OK;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-DRMExtractor::DRMExtractor(const sp<DataSource> &source, const char* mime)
- : mDataSource(source),
- mDecryptHandle(NULL),
- mDrmManagerClient(NULL) {
- mOriginalExtractor = MediaExtractor::Create(source, mime);
- mOriginalExtractor->setDrmFlag(true);
- mOriginalExtractor->getMetaData()->setInt32(kKeyIsDRM, 1);
-
- source->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
-}
-
-DRMExtractor::~DRMExtractor() {
-}
-
-size_t DRMExtractor::countTracks() {
- return mOriginalExtractor->countTracks();
-}
-
-sp<IMediaSource> DRMExtractor::getTrack(size_t index) {
- sp<IMediaSource> originalMediaSource = mOriginalExtractor->getTrack(index);
- originalMediaSource->getFormat()->setInt32(kKeyIsDRM, 1);
-
- int32_t trackID;
- CHECK(getTrackMetaData(index, 0)->findInt32(kKeyTrackID, &trackID));
-
- DrmBuffer ipmpBox;
- ipmpBox.data = mOriginalExtractor->getDrmTrackInfo(trackID, &(ipmpBox.length));
- CHECK(ipmpBox.length > 0);
-
- return interface_cast<IMediaSource>(
- new DRMSource(originalMediaSource, mDecryptHandle, mDrmManagerClient,
- trackID, &ipmpBox));
-}
-
-sp<MetaData> DRMExtractor::getTrackMetaData(size_t index, uint32_t flags) {
- return mOriginalExtractor->getTrackMetaData(index, flags);
-}
-
-sp<MetaData> DRMExtractor::getMetaData() {
- return mOriginalExtractor->getMetaData();
-}
-
-bool SniffDRM(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
- sp<DecryptHandle> decryptHandle = source->DrmInitialization();
-
- if (decryptHandle != NULL) {
- if (decryptHandle->decryptApiType == DecryptApiType::CONTAINER_BASED) {
- *mimeType = String8("drm+container_based+") + decryptHandle->mimeType;
- *confidence = 10.0f;
- } else if (decryptHandle->decryptApiType == DecryptApiType::ELEMENTARY_STREAM_BASED) {
- *mimeType = String8("drm+es_based+") + decryptHandle->mimeType;
- *confidence = 10.0f;
- } else {
- return false;
- }
-
- return true;
- }
-
- return false;
-}
-} //namespace android
-
diff --git a/media/libstagefright/DataConverter.cpp b/media/libstagefright/DataConverter.cpp
index aea47f3..52be054 100644
--- a/media/libstagefright/DataConverter.cpp
+++ b/media/libstagefright/DataConverter.cpp
@@ -21,13 +21,13 @@
#include <audio_utils/primitives.h>
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AUtils.h>
namespace android {
-status_t DataConverter::convert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+status_t DataConverter::convert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target) {
CHECK(source->base() != target->base());
size_t size = targetSize(source->size());
status_t err = OK;
@@ -43,7 +43,8 @@
return err;
}
-status_t DataConverter::safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+status_t DataConverter::safeConvert(
+ const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target) {
memcpy(target->base(), source->data(), source->size());
return OK;
}
@@ -101,7 +102,7 @@
return NULL;
}
-status_t AudioConverter::safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt) {
+status_t AudioConverter::safeConvert(const sp<MediaCodecBuffer> &src, sp<MediaCodecBuffer> &tgt) {
if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcm16bit) {
memcpy_to_u8_from_i16((uint8_t*)tgt->base(), (const int16_t*)src->data(), src->size() / 2);
} else if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcmFloat) {
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 2cfadba..a5760d1 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -16,25 +16,11 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "DataSource"
-#include "include/AMRExtractor.h"
-
-#include "include/AACExtractor.h"
#include "include/CallbackDataSource.h"
-#include "include/DRMExtractor.h"
-#include "include/FLACExtractor.h"
#include "include/HTTPBase.h"
-#include "include/MidiExtractor.h"
-#include "include/MP3Extractor.h"
-#include "include/MPEG2PSExtractor.h"
-#include "include/MPEG2TSExtractor.h"
-#include "include/MPEG4Extractor.h"
#include "include/NuCachedSource2.h"
-#include "include/OggExtractor.h"
-#include "include/WAVExtractor.h"
-#include "include/WVMExtractor.h"
-#include "matroska/MatroskaExtractor.h"
-
+#include <media/IDataSource.h>
#include <media/IMediaHTTPConnection.h>
#include <media/IMediaHTTPService.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -44,6 +30,8 @@
#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaHTTP.h>
+#include <media/stagefright/RemoteDataSource.h>
+#include <media/stagefright/Utils.h>
#include <utils/String8.h>
#include <cutils/properties.h>
@@ -110,85 +98,12 @@
return ERROR_UNSUPPORTED;
}
+sp<IDataSource> DataSource::getIDataSource() const {
+ return nullptr;
+}
+
////////////////////////////////////////////////////////////////////////////////
-Mutex DataSource::gSnifferMutex;
-List<DataSource::SnifferFunc> DataSource::gSniffers;
-bool DataSource::gSniffersRegistered = false;
-
-bool DataSource::sniff(
- String8 *mimeType, float *confidence, sp<AMessage> *meta) {
- *mimeType = "";
- *confidence = 0.0f;
- meta->clear();
-
- {
- Mutex::Autolock autoLock(gSnifferMutex);
- if (!gSniffersRegistered) {
- return false;
- }
- }
-
- for (List<SnifferFunc>::iterator it = gSniffers.begin();
- it != gSniffers.end(); ++it) {
- String8 newMimeType;
- float newConfidence;
- sp<AMessage> newMeta;
- if ((*it)(this, &newMimeType, &newConfidence, &newMeta)) {
- if (newConfidence > *confidence) {
- *mimeType = newMimeType;
- *confidence = newConfidence;
- *meta = newMeta;
- }
- }
- }
-
- return *confidence > 0.0;
-}
-
-// static
-void DataSource::RegisterSniffer_l(SnifferFunc func) {
- for (List<SnifferFunc>::iterator it = gSniffers.begin();
- it != gSniffers.end(); ++it) {
- if (*it == func) {
- return;
- }
- }
-
- gSniffers.push_back(func);
-}
-
-// static
-void DataSource::RegisterDefaultSniffers() {
- Mutex::Autolock autoLock(gSnifferMutex);
- if (gSniffersRegistered) {
- return;
- }
-
- RegisterSniffer_l(SniffMPEG4);
- RegisterSniffer_l(SniffMatroska);
- RegisterSniffer_l(SniffOgg);
- RegisterSniffer_l(SniffWAV);
- RegisterSniffer_l(SniffFLAC);
- RegisterSniffer_l(SniffAMR);
- RegisterSniffer_l(SniffMPEG2TS);
- RegisterSniffer_l(SniffMP3);
- RegisterSniffer_l(SniffAAC);
- RegisterSniffer_l(SniffMPEG2PS);
- if (getuid() == AID_MEDIA) {
- // WVM only in the media server process
- RegisterSniffer_l(SniffWVM);
- }
- RegisterSniffer_l(SniffMidi);
-
- char value[PROPERTY_VALUE_MAX];
- if (property_get("drm.service.enabled", value, NULL)
- && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
- RegisterSniffer_l(SniffDRM);
- }
- gSniffersRegistered = true;
-}
-
// static
sp<DataSource> DataSource::CreateFromURI(
const sp<IMediaHTTPService> &httpService,
@@ -200,14 +115,10 @@
*contentType = "";
}
- bool isWidevine = !strncasecmp("widevine://", uri, 11);
-
sp<DataSource> source;
if (!strncasecmp("file://", uri, 7)) {
source = new FileSource(uri + 7);
- } else if (!strncasecmp("http://", uri, 7)
- || !strncasecmp("https://", uri, 8)
- || isWidevine) {
+ } else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
if (httpService == NULL) {
ALOGE("Invalid http service!");
return NULL;
@@ -222,14 +133,6 @@
httpSource = new MediaHTTP(conn);
}
- String8 tmp;
- if (isWidevine) {
- tmp = String8("http://");
- tmp.append(uri + 11);
-
- uri = tmp.string();
- }
-
String8 cacheConfig;
bool disconnectAtHighwatermark = false;
KeyedVector<String8, String8> nonCacheSpecificHeaders;
@@ -246,20 +149,14 @@
return NULL;
}
- if (!isWidevine) {
- if (contentType != NULL) {
- *contentType = httpSource->getMIMEType();
- }
-
- source = NuCachedSource2::Create(
- httpSource,
- cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
- disconnectAtHighwatermark);
- } else {
- // We do not want that prefetching, caching, datasource wrapper
- // in the widevine:// case.
- source = httpSource;
+ if (contentType != NULL) {
+ *contentType = httpSource->getMIMEType();
}
+
+ source = NuCachedSource2::Create(
+ httpSource,
+ cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+ disconnectAtHighwatermark);
} else if (!strncasecmp("data:", uri, 5)) {
source = DataURISource::Create(uri);
} else {
@@ -274,6 +171,11 @@
return source;
}
+sp<DataSource> DataSource::CreateFromFd(int fd, int64_t offset, int64_t length) {
+ sp<FileSource> source = new FileSource(fd, offset, length);
+ return source->initCheck() != OK ? nullptr : source;
+}
+
sp<DataSource> DataSource::CreateMediaHTTP(const sp<IMediaHTTPService> &httpService) {
if (httpService == NULL) {
return NULL;
@@ -295,4 +197,8 @@
return String8("application/octet-stream");
}
+sp<IDataSource> DataSource::asIDataSource() {
+ return RemoteDataSource::wrap(sp<DataSource>(this));
+}
+
} // namespace android
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index 5b92f91..97d8988 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -21,6 +21,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/FileSource.h>
#include <media/stagefright/Utils.h>
+#include <private/android_filesystem_config.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/types.h>
@@ -171,6 +172,7 @@
}
sp<DecryptHandle> FileSource::DrmInitialization(const char *mime) {
+ if (getuid() == AID_MEDIA_EX) return nullptr; // no DRM in media extractor
if (mDrmManagerClient == NULL) {
mDrmManagerClient = new DrmManagerClient();
}
@@ -227,4 +229,18 @@
return mDrmManagerClient->pread(mDecryptHandle, data, size, offset + mOffset);
}
}
+
+/* static */
+bool FileSource::requiresDrm(int fd, int64_t offset, int64_t length, const char *mime) {
+ std::unique_ptr<DrmManagerClient> drmClient(new DrmManagerClient());
+ sp<DecryptHandle> decryptHandle =
+ drmClient->openDecryptSession(fd, offset, length, mime);
+ bool requiresDrm = false;
+ if (decryptHandle != nullptr) {
+ requiresDrm = decryptHandle->decryptApiType == DecryptApiType::CONTAINER_BASED;
+ drmClient->closeDecryptSession(decryptHandle);
+ }
+ return requiresDrm;
+}
+
} // namespace android
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index 718710a..7d463a9 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -45,16 +45,32 @@
}
status_t HevcParameterSets::addNalUnit(const uint8_t* data, size_t size) {
+ if (size < 1) {
+ ALOGE("empty NAL b/35467107");
+ return ERROR_MALFORMED;
+ }
uint8_t nalUnitType = (data[0] >> 1) & 0x3f;
status_t err = OK;
switch (nalUnitType) {
case 32: // VPS
+ if (size < 2) {
+ ALOGE("invalid NAL/VPS size b/35467107");
+ return ERROR_MALFORMED;
+ }
err = parseVps(data + 2, size - 2);
break;
case 33: // SPS
+ if (size < 2) {
+ ALOGE("invalid NAL/SPS size b/35467107");
+ return ERROR_MALFORMED;
+ }
err = parseSps(data + 2, size - 2);
break;
case 34: // PPS
+ if (size < 2) {
+ ALOGE("invalid NAL/PPS size b/35467107");
+ return ERROR_MALFORMED;
+ }
err = parsePps(data + 2, size - 2);
break;
case 39: // Prefix SEI
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 82e7a26..22df522 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -538,7 +538,7 @@
buffer->release();
buffer = NULL;
- return ERROR_END_OF_STREAM;
+ return (n < 0 ? n : ERROR_END_OF_STREAM);
}
uint32_t header = U32_AT((const uint8_t *)buffer->data());
@@ -582,7 +582,7 @@
buffer->release();
buffer = NULL;
- return ERROR_END_OF_STREAM;
+ return (n < 0 ? n : ERROR_END_OF_STREAM);
}
buffer->set_range(0, frame_size);
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index dc4e32c..03ea959 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -37,7 +37,7 @@
struct MPEG2TSWriter::SourceInfo : public AHandler {
explicit SourceInfo(const sp<IMediaSource> &source);
- void start(const sp<AMessage> ¬ify);
+ void start(const sp<AMessage> ¬ify, const sp<MetaData> ¶ms);
void stop();
unsigned streamType() const;
@@ -75,7 +75,7 @@
sp<ABuffer> mAACCodecSpecificData;
- sp<ABuffer> mAACBuffer;
+ sp<ABuffer> mBuffer;
sp<ABuffer> mLastAccessUnit;
bool mEOSReceived;
@@ -85,10 +85,8 @@
void extractCodecSpecificData();
- bool appendAACFrames(MediaBuffer *buffer);
- bool flushAACFrames();
-
- void postAVCFrame(MediaBuffer *buffer);
+ void appendAACFrames(MediaBuffer *buffer);
+ void appendAVCFrame(MediaBuffer *buffer);
DISALLOW_EVIL_CONSTRUCTORS(SourceInfo);
};
@@ -129,13 +127,14 @@
return mContinuityCounter;
}
-void MPEG2TSWriter::SourceInfo::start(const sp<AMessage> ¬ify) {
+void MPEG2TSWriter::SourceInfo::start(const sp<AMessage> ¬ify, const sp<MetaData> ¶ms) {
mLooper->registerHandler(this);
mLooper->start();
-
mNotify = notify;
- (new AMessage(kWhatStart, this))->post();
+ sp<AMessage> msg = new AMessage(kWhatStart, this);
+ msg->setObject("meta", params);
+ msg->post();
}
void MPEG2TSWriter::SourceInfo::stop() {
@@ -250,56 +249,51 @@
notify->post();
}
-void MPEG2TSWriter::SourceInfo::postAVCFrame(MediaBuffer *buffer) {
+void MPEG2TSWriter::SourceInfo::appendAVCFrame(MediaBuffer *buffer) {
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kNotifyBuffer);
- sp<ABuffer> copy =
- new ABuffer(buffer->range_length());
- memcpy(copy->data(),
+ if (mBuffer == NULL || buffer->range_length() > mBuffer->capacity()) {
+ mBuffer = new ABuffer(buffer->range_length());
+ }
+ mBuffer->setRange(0, 0);
+
+ memcpy(mBuffer->data(),
(const uint8_t *)buffer->data()
+ buffer->range_offset(),
buffer->range_length());
int64_t timeUs;
CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
- copy->meta()->setInt64("timeUs", timeUs);
+ mBuffer->meta()->setInt64("timeUs", timeUs);
int32_t isSync;
if (buffer->meta_data()->findInt32(kKeyIsSyncFrame, &isSync)
&& isSync != 0) {
- copy->meta()->setInt32("isSync", true);
+ mBuffer->meta()->setInt32("isSync", true);
}
- notify->setBuffer("buffer", copy);
+ mBuffer->setRange(0, buffer->range_length());
+
+ notify->setBuffer("buffer", mBuffer);
notify->post();
}
-bool MPEG2TSWriter::SourceInfo::appendAACFrames(MediaBuffer *buffer) {
- bool accessUnitPosted = false;
+void MPEG2TSWriter::SourceInfo::appendAACFrames(MediaBuffer *buffer) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kNotifyBuffer);
- if (mAACBuffer != NULL
- && mAACBuffer->size() + 7 + buffer->range_length()
- > mAACBuffer->capacity()) {
- accessUnitPosted = flushAACFrames();
+ if (mBuffer == NULL || 7 + buffer->range_length() > mBuffer->capacity()) {
+ mBuffer = new ABuffer(7 + buffer->range_length());
}
- if (mAACBuffer == NULL) {
- size_t alloc = 4096;
- if (buffer->range_length() + 7 > alloc) {
- alloc = 7 + buffer->range_length();
- }
+ int64_t timeUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
- mAACBuffer = new ABuffer(alloc);
+ mBuffer->meta()->setInt64("timeUs", timeUs);
+ mBuffer->meta()->setInt32("isSync", true);
- int64_t timeUs;
- CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
-
- mAACBuffer->meta()->setInt64("timeUs", timeUs);
- mAACBuffer->meta()->setInt32("isSync", true);
-
- mAACBuffer->setRange(0, 0);
- }
+ mBuffer->setRange(0, 0);
const uint8_t *codec_specific_data = mAACCodecSpecificData->data();
@@ -312,7 +306,7 @@
unsigned channel_configuration =
(codec_specific_data[1] >> 3) & 0x0f;
- uint8_t *ptr = mAACBuffer->data() + mAACBuffer->size();
+ uint8_t *ptr = mBuffer->data() + mBuffer->size();
const uint32_t aac_frame_length = buffer->range_length() + 7;
@@ -340,24 +334,10 @@
ptr += buffer->range_length();
- mAACBuffer->setRange(0, ptr - mAACBuffer->data());
+ mBuffer->setRange(0, ptr - mBuffer->data());
- return accessUnitPosted;
-}
-
-bool MPEG2TSWriter::SourceInfo::flushAACFrames() {
- if (mAACBuffer == NULL) {
- return false;
- }
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kNotifyBuffer);
- notify->setBuffer("buffer", mAACBuffer);
+ notify->setBuffer("buffer", mBuffer);
notify->post();
-
- mAACBuffer.clear();
-
- return true;
}
void MPEG2TSWriter::SourceInfo::readMore() {
@@ -368,7 +348,10 @@
switch (msg->what()) {
case kWhatStart:
{
- status_t err = mSource->start();
+ sp<RefBase> obj;
+ CHECK(msg->findObject("meta", &obj));
+ MetaData *params = static_cast<MetaData *>(obj.get());
+ status_t err = mSource->start(params);
if (err != OK) {
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kNotifyStartFailed);
@@ -376,6 +359,7 @@
break;
}
+ // Extract CSD from config format.
extractCodecSpecificData();
readMore();
@@ -388,10 +372,6 @@
status_t err = mSource->read(&buffer);
if (err != OK && err != INFO_FORMAT_CHANGED) {
- if (mStreamType == 0x0f) {
- flushAACFrames();
- }
-
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kNotifyReachedEOS);
notify->setInt32("status", err);
@@ -401,23 +381,20 @@
if (err == OK) {
if (mStreamType == 0x0f && mAACCodecSpecificData == NULL) {
- // The first buffer contains codec specific data.
-
+ // The first audio buffer must contain CSD if not received yet.
CHECK_GE(buffer->range_length(), 2u);
-
mAACCodecSpecificData = new ABuffer(buffer->range_length());
memcpy(mAACCodecSpecificData->data(),
(const uint8_t *)buffer->data()
+ buffer->range_offset(),
buffer->range_length());
+ readMore();
} else if (buffer->range_length() > 0) {
if (mStreamType == 0x0f) {
- if (!appendAACFrames(buffer)) {
- msg->post();
- }
+ appendAACFrames(buffer);
} else {
- postAVCFrame(buffer);
+ appendAVCFrame(buffer);
}
} else {
readMore();
@@ -452,7 +429,6 @@
int64_t timeUs;
CHECK(mLastAccessUnit->meta()->findInt64("timeUs", &timeUs));
-
return timeUs;
}
@@ -542,7 +518,7 @@
return OK;
}
-status_t MPEG2TSWriter::start(MetaData * /* param */) {
+status_t MPEG2TSWriter::start(MetaData *param ) {
CHECK(!mStarted);
mStarted = true;
@@ -556,7 +532,7 @@
notify->setInt32("source-index", i);
- mSources.editItemAt(i)->start(notify);
+ mSources.editItemAt(i)->start(notify, param);
}
return OK;
@@ -594,13 +570,13 @@
{
int32_t sourceIndex;
CHECK(msg->findInt32("source-index", &sourceIndex));
+ sp<SourceInfo> source = mSources.editItemAt(sourceIndex);
int32_t what;
CHECK(msg->findInt32("what", &what));
if (what == SourceInfo::kNotifyReachedEOS
|| what == SourceInfo::kNotifyStartFailed) {
- sp<SourceInfo> source = mSources.editItemAt(sourceIndex);
source->setEOSReceived();
sp<ABuffer> buffer = source->lastAccessUnit();
@@ -615,6 +591,7 @@
} else if (what == SourceInfo::kNotifyBuffer) {
sp<ABuffer> buffer;
CHECK(msg->findBuffer("buffer", &buffer));
+ CHECK(source->lastAccessUnit() == NULL);
int32_t oob;
if (msg->findInt32("oob", &oob) && oob) {
@@ -635,15 +612,10 @@
// Rinse, repeat.
// If we don't have data on any track we don't write
// anything just yet.
-
- sp<SourceInfo> source = mSources.editItemAt(sourceIndex);
-
- CHECK(source->lastAccessUnit() == NULL);
source->setLastAccessUnit(buffer);
ALOGV("lastAccessUnitTimeUs[%d] = %.2f secs",
- sourceIndex, source->lastAccessUnitTimeUs() / 1E6);
-
+ sourceIndex, source->lastAccessUnitTimeUs() / 1E6);
int64_t minTimeUs = -1;
size_t minIndex = 0;
@@ -665,15 +637,14 @@
}
if (minTimeUs < 0) {
- ALOGV("not a all tracks have valid data.");
+ ALOGV("not all tracks have valid data.");
break;
}
ALOGV("writing access unit at time %.2f secs (index %zu)",
- minTimeUs / 1E6, minIndex);
+ minTimeUs / 1E6, minIndex);
source = mSources.editItemAt(minIndex);
-
buffer = source->lastAccessUnit();
source->setLastAccessUnit(NULL);
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 4c0bb39..941c759 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -73,6 +73,7 @@
Vector<SidxEntry> &sidx,
const Trex *trex,
off64_t firstMoofOffset);
+ virtual status_t init();
virtual status_t start(MetaData *params = NULL);
virtual status_t stop();
@@ -469,6 +470,22 @@
const char *mime;
CHECK(track->meta->findCString(kKeyMIMEType, &mime));
if (!strncasecmp("video/", mime, 6)) {
+ // MPEG2 tracks do not provide CSD, so read the stream header
+ if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
+ off64_t offset;
+ size_t size;
+ if (track->sampleTable->getMetaDataForSample(
+ 0 /* sampleIndex */, &offset, &size, NULL /* sampleTime */) == OK) {
+ if (size > kMaxTrackHeaderSize) {
+ size = kMaxTrackHeaderSize;
+ }
+ uint8_t header[kMaxTrackHeaderSize];
+ if (mDataSource->readAt(offset, &header, size) == (ssize_t)size) {
+ track->meta->setData(kKeyStreamHeader, 'mdat', header, size);
+ }
+ }
+ }
+
if (mMoofOffset > 0) {
int64_t duration;
if (track->meta->findInt64(kKeyDuration, &duration)) {
@@ -489,22 +506,6 @@
((int64_t)sampleTime * 1000000) / track->timescale);
}
}
-
- // MPEG2 tracks do not provide CSD, so read the stream header
- if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
- off64_t offset;
- size_t size;
- if (track->sampleTable->getMetaDataForSample(
- 0 /* sampleIndex */, &offset, &size, NULL /* sampleTime */) == OK) {
- if (size > kMaxTrackHeaderSize) {
- size = kMaxTrackHeaderSize;
- }
- uint8_t header[kMaxTrackHeaderSize];
- if (mDataSource->readAt(offset, &header, size) == (ssize_t)size) {
- track->meta->setData(kKeyStreamHeader, 'mdat', header, size);
- }
- }
- }
}
}
@@ -816,6 +817,10 @@
ALOGE("b/23540914");
return ERROR_MALFORMED;
}
+ if (depth > 100) {
+ ALOGE("b/27456299");
+ return ERROR_MALFORMED;
+ }
uint32_t hdr[2];
if (mDataSource->readAt(*offset, hdr, 8) < 8) {
return ERROR_IO;
@@ -1236,6 +1241,7 @@
ALOGV("allocated pssh @ %p", pssh.data);
ssize_t requested = (ssize_t) pssh.datalen;
if (mDataSource->readAt(data_offset + 24, pssh.data, requested) < requested) {
+ delete[] pssh.data;
return ERROR_IO;
}
mPssh.push_back(pssh);
@@ -1392,6 +1398,28 @@
}
break;
}
+ case FOURCC('m', 'e', 't', 't'):
+ {
+ *offset += chunk_size;
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ sp<ABuffer> buffer = new ABuffer(chunk_data_size);
+ if (buffer->data() == NULL) {
+ return NO_MEMORY;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer->data(), chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ String8 mimeFormat((const char *)(buffer->data()), chunk_data_size);
+ mLastTrack->meta->setCString(kKeyMIMEType, mimeFormat.string());
+
+ break;
+ }
case FOURCC('m', 'p', '4', 'a'):
case FOURCC('e', 'n', 'c', 'a'):
@@ -2164,7 +2192,10 @@
*offset += chunk_size;
if (underQTMetaPath(mPath, 3)) {
- parseQTMetaKey(data_offset, chunk_data_size);
+ status_t err = parseQTMetaKey(data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
}
break;
}
@@ -2323,11 +2354,20 @@
case FOURCC('s', 'i', 'd', 'x'):
{
- parseSegmentIndex(data_offset, chunk_data_size);
+ status_t err = parseSegmentIndex(data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
*offset += chunk_size;
return UNKNOWN_ERROR; // stop parsing after sidx
}
+ case FOURCC('a', 'c', '-', '3'):
+ {
+ *offset += chunk_size;
+ return parseAC3SampleEntry(data_offset);
+ }
+
case FOURCC('f', 't', 'y', 'p'):
{
if (chunk_data_size < 8 || depth != 0) {
@@ -2365,7 +2405,10 @@
// check if we're parsing 'ilst' for meta keys
// if so, treat type as a number (key-id).
if (underQTMetaPath(mPath, 3)) {
- parseQTMetaVal(chunk_type, data_offset, chunk_data_size);
+ status_t err = parseQTMetaVal(chunk_type, data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
}
*offset += chunk_size;
@@ -2376,6 +2419,99 @@
return OK;
}
+status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
+ // skip 16 bytes:
+ // + 6-byte reserved,
+ // + 2-byte data reference index,
+ // + 8-byte reserved
+ offset += 16;
+ uint16_t channelCount;
+ if (!mDataSource->getUInt16(offset, &channelCount)) {
+ return ERROR_MALFORMED;
+ }
+ // skip 8 bytes:
+ // + 2-byte channelCount,
+ // + 2-byte sample size,
+ // + 4-byte reserved
+ offset += 8;
+ uint16_t sampleRate;
+ if (!mDataSource->getUInt16(offset, &sampleRate)) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
+ return ERROR_MALFORMED;
+ }
+
+ // skip 4 bytes:
+ // + 2-byte sampleRate,
+ // + 2-byte reserved
+ offset += 4;
+ return parseAC3SpecificBox(offset, sampleRate);
+}
+
+status_t MPEG4Extractor::parseAC3SpecificBox(
+ off64_t offset, uint16_t sampleRate) {
+ uint32_t size;
+ // + 4-byte size
+ // + 4-byte type
+ // + 3-byte payload
+ const uint32_t kAC3SpecificBoxSize = 11;
+ if (!mDataSource->getUInt32(offset, &size) || size < kAC3SpecificBoxSize) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read specific box size");
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4;
+ uint32_t type;
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '3')) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 specific block: header not dac3");
+ return ERROR_MALFORMED;
+ }
+
+ offset += 4;
+ const uint32_t kAC3SpecificBoxPayloadSize = 3;
+ uint8_t chunk[kAC3SpecificBoxPayloadSize];
+ if (mDataSource->readAt(offset, chunk, sizeof(chunk)) != sizeof(chunk)) {
+ ALOGE("MPEG4Extractor: error while reading ac-3 specific block: bitstream fields");
+ return ERROR_MALFORMED;
+ }
+
+ ABitReader br(chunk, sizeof(chunk));
+ static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+ static const unsigned sampleRateTable[] = {48000, 44100, 32000};
+
+ unsigned fscod = br.getBits(2);
+ if (fscod == 3) {
+ ALOGE("Incorrect fscod (3) in AC3 header");
+ return ERROR_MALFORMED;
+ }
+ unsigned boxSampleRate = sampleRateTable[fscod];
+ if (boxSampleRate != sampleRate) {
+ ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
+ boxSampleRate, sampleRate);
+ return ERROR_MALFORMED;
+ }
+
+ unsigned bsid = br.getBits(5);
+ if (bsid > 8) {
+ ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?");
+ return ERROR_MALFORMED;
+ }
+
+ // skip
+ unsigned bsmod __unused = br.getBits(3);
+
+ unsigned acmod = br.getBits(3);
+ unsigned lfeon = br.getBits(1);
+ unsigned channelCount = channelCountTable[acmod] + lfeon;
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+ mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+ mLastTrack->meta->setInt32(kKeyChannelCount, channelCount);
+ mLastTrack->meta->setInt32(kKeySampleRate, sampleRate);
+ return OK;
+}
+
status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
ALOGV("MPEG4Extractor::parseSegmentIndex");
@@ -2704,6 +2840,10 @@
}
status_t MPEG4Extractor::parseITunesMetaData(off64_t offset, size_t size) {
+ if (size == 0) {
+ return OK;
+ }
+
if (size < 4 || size == SIZE_MAX) {
return ERROR_MALFORMED;
}
@@ -2842,8 +2982,10 @@
int32_t delay, padding;
if (sscanf(mLastCommentData,
" %*x %x %x %*x", &delay, &padding) == 2) {
- if (mLastTrack == NULL)
+ if (mLastTrack == NULL) {
+ delete[] buffer;
return ERROR_MALFORMED;
+ }
mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
mLastTrack->meta->setInt32(kKeyEncoderPadding, padding);
@@ -2920,7 +3062,7 @@
int32_t type = U32_AT(&buffer[0]);
if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
- || (type == FOURCC('n', 'c', 'l', 'c' && size >= 10))) {
+ || (type == FOURCC('n', 'c', 'l', 'c') && size >= 10)) {
int32_t primaries = U16_AT(&buffer[4]);
int32_t transfer = U16_AT(&buffer[6]);
int32_t coeffs = U16_AT(&buffer[8]);
@@ -2998,6 +3140,13 @@
}
case FOURCC('y', 'r', 'r', 'c'):
{
+ if (size < 6) {
+ delete[] buffer;
+ buffer = NULL;
+ ALOGE("b/62133227");
+ android_errorWriteLog(0x534e4554, "62133227");
+ return ERROR_MALFORMED;
+ }
char tmp[5];
uint16_t year = U16_AT(&buffer[4]);
@@ -3190,9 +3339,13 @@
}
}
- return new MPEG4Source(this,
+ sp<MPEG4Source> source = new MPEG4Source(this,
track->meta, mDataSource, track->timescale, track->sampleTable,
mSidxEntries, trex, mMoofOffset);
+ if (source->init() != OK) {
+ return NULL;
+ }
+ return source;
}
// static
@@ -3589,6 +3742,7 @@
mTrex(trex),
mFirstMoofOffset(firstMoofOffset),
mCurrentMoofOffset(firstMoofOffset),
+ mNextMoofOffset(-1),
mCurrentTime(0),
mCurrentSampleInfoAllocSize(0),
mCurrentSampleInfoSizes(NULL),
@@ -3653,10 +3807,14 @@
CHECK(format->findInt32(kKeyTrackID, &mTrackId));
+}
+
+status_t MPEG4Source::init() {
if (mFirstMoofOffset != 0) {
off64_t offset = mFirstMoofOffset;
- parseChunk(&offset);
+ return parseChunk(&offset);
}
+ return OK;
}
MPEG4Source::~MPEG4Source() {
@@ -3783,13 +3941,35 @@
while (true) {
if (mDataSource->readAt(*offset, hdr, 8) < 8) {
- return ERROR_END_OF_STREAM;
+ // no more box to the end of file.
+ break;
}
chunk_size = ntohl(hdr[0]);
chunk_type = ntohl(hdr[1]);
+ if (chunk_size == 1) {
+ // ISO/IEC 14496-12:2012, 8.8.4 Movie Fragment Box, moof is a Box
+ // which is defined in 4.2 Object Structure.
+ // When chunk_size==1, 8 bytes follows as "largesize".
+ if (mDataSource->readAt(*offset + 8, &chunk_size, 8) < 8) {
+ return ERROR_IO;
+ }
+ chunk_size = ntoh64(chunk_size);
+ if (chunk_size < 16) {
+ // The smallest valid chunk is 16 bytes long in this case.
+ return ERROR_MALFORMED;
+ }
+ } else if (chunk_size == 0) {
+ // next box extends to end of file.
+ } else if (chunk_size < 8) {
+ // The smallest valid chunk is 8 bytes long in this case.
+ return ERROR_MALFORMED;
+ }
+
if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
mNextMoofOffset = *offset;
break;
+ } else if (chunk_size == 0) {
+ break;
}
*offset += chunk_size;
}
@@ -4673,17 +4853,25 @@
totalOffset += se->mSize;
}
mCurrentMoofOffset = totalOffset;
+ mNextMoofOffset = -1;
mCurrentSamples.clear();
mCurrentSampleIndex = 0;
- parseChunk(&totalOffset);
+ status_t err = parseChunk(&totalOffset);
+ if (err != OK) {
+ return err;
+ }
mCurrentTime = totalTime * mTimescale / 1000000ll;
} else {
// without sidx boxes, we can only seek to 0
mCurrentMoofOffset = mFirstMoofOffset;
+ mNextMoofOffset = -1;
mCurrentSamples.clear();
mCurrentSampleIndex = 0;
off64_t tmp = mCurrentMoofOffset;
- parseChunk(&tmp);
+ status_t err = parseChunk(&tmp);
+ if (err != OK) {
+ return err;
+ }
mCurrentTime = 0;
}
@@ -4712,7 +4900,10 @@
mCurrentMoofOffset = nextMoof;
mCurrentSamples.clear();
mCurrentSampleIndex = 0;
- parseChunk(&nextMoof);
+ status_t err = parseChunk(&nextMoof);
+ if (err != OK) {
+ return err;
+ }
if (mCurrentSampleIndex >= mCurrentSamples.size()) {
return ERROR_END_OF_STREAM;
}
@@ -4980,6 +5171,10 @@
return NULL;
}
+void MPEG4Extractor::populateMetrics() {
+ ALOGV("MPEG4Extractor::populateMetrics");
+}
+
static bool LegacySniffMPEG4(
const sp<DataSource> &source, String8 *mimeType, float *confidence) {
uint8_t header[8];
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 5cf5863..b48257fc 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -69,15 +69,20 @@
static const uint8_t kNalUnitTypeSeqParamSet = 0x07;
static const uint8_t kNalUnitTypePicParamSet = 0x08;
static const int64_t kInitialDelayTimeUs = 700000LL;
+static const int64_t kMaxMetadataSize = 0x4000000LL; // 64MB max per-frame metadata size
static const char kMetaKey_Version[] = "com.android.version";
-#ifdef SHOW_MODEL_BUILD
+static const char kMetaKey_Manufacturer[] = "com.android.manufacturer";
static const char kMetaKey_Model[] = "com.android.model";
+
+#ifdef SHOW_BUILD
static const char kMetaKey_Build[] = "com.android.build";
#endif
static const char kMetaKey_CaptureFps[] = "com.android.capture.fps";
static const char kMetaKey_TemporalLayerCount[] = "com.android.video.temporal_layers_count";
+static const int kTimestampDebugCount = 10;
+
static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
kHevcNalUnitTypeVps,
kHevcNalUnitTypeSps,
@@ -90,7 +95,7 @@
kHevcNalUnitTypePrefixSei,
kHevcNalUnitTypeSuffixSei,
};
-/* uncomment to include model and build in meta */
+/* uncomment to include build in meta */
//#define SHOW_MODEL_BUILD 1
class MPEG4Writer::Track {
@@ -100,7 +105,7 @@
~Track();
status_t start(MetaData *params);
- status_t stop();
+ status_t stop(bool stopSource = true);
status_t pause();
bool reachedEOS();
@@ -116,6 +121,8 @@
int32_t getTrackId() const { return mTrackId; }
status_t dump(int fd, const Vector<String16>& args) const;
static const char *getFourCCForMime(const char *mime);
+ const char *getTrackType() const;
+ void resetInternal();
private:
enum {
@@ -271,7 +278,9 @@
bool mIsAvc;
bool mIsHevc;
bool mIsAudio;
+ bool mIsVideo;
bool mIsMPEG4;
+ bool mGotStartKeyFrame;
bool mIsMalformed;
int32_t mTrackId;
int64_t mTrackDurationUs;
@@ -300,6 +309,9 @@
int64_t mMinCttsOffsetTimeUs;
int64_t mMaxCttsOffsetTimeUs;
+ // Save the last 10 frames' timestamp for debug.
+ std::list<std::pair<int64_t, int64_t>> mTimestampDebugHelper;
+
// Sequence parameter set or picture parameter set
struct AVCParamSet {
AVCParamSet(uint16_t length, const uint8_t *data)
@@ -329,6 +341,8 @@
// Update the audio track's drift information.
void updateDriftTime(const sp<MetaData>& meta);
+ void dumpTimeStamps();
+
int32_t getStartTimeOffsetScaledTime() const;
static void *ThreadWrapper(void *me);
@@ -393,6 +407,7 @@
void writeMdhdBox(uint32_t now);
void writeSmhdBox();
void writeVmhdBox();
+ void writeNmhdBox();
void writeHdlrBox();
void writeTkhdBox(uint32_t now);
void writeColrBox();
@@ -400,47 +415,15 @@
void writeMp4vEsdsBox();
void writeAudioFourCCBox();
void writeVideoFourCCBox();
+ void writeMetadataFourCCBox();
void writeStblBox(bool use32BitOffset);
Track(const Track &);
Track &operator=(const Track &);
};
-MPEG4Writer::MPEG4Writer(int fd)
- : mFd(dup(fd)),
- mInitCheck(mFd < 0? NO_INIT: OK),
- mIsRealTimeRecording(true),
- mUse4ByteNalLength(true),
- mUse32BitOffset(true),
- mIsFileSizeLimitExplicitlyRequested(false),
- mPaused(false),
- mStarted(false),
- mWriterThreadStarted(false),
- mOffset(0),
- mMdatOffset(0),
- mMoovBoxBuffer(NULL),
- mMoovBoxBufferOffset(0),
- mWriteMoovBoxToMemory(false),
- mFreeBoxOffset(0),
- mStreamableFile(false),
- mEstimatedMoovBoxSize(0),
- mMoovExtraSize(0),
- mInterleaveDurationUs(1000000),
- mTimeScale(-1),
- mStartTimestampUs(-1ll),
- mLatitudex10000(0),
- mLongitudex10000(0),
- mAreGeoTagsAvailable(false),
- mStartTimeOffsetMs(-1),
- mMetaKeys(new AMessage()) {
- addDeviceMeta();
-
- // Verify mFd is seekable
- off64_t off = lseek64(mFd, 0, SEEK_SET);
- if (off < 0) {
- ALOGE("cannot seek mFd: %s (%d)", strerror(errno), errno);
- release();
- }
+MPEG4Writer::MPEG4Writer(int fd) {
+ initInternal(fd);
}
MPEG4Writer::~MPEG4Writer() {
@@ -453,6 +436,54 @@
mTracks.erase(it);
}
mTracks.clear();
+
+ if (mNextFd != -1) {
+ close(mNextFd);
+ }
+}
+
+void MPEG4Writer::initInternal(int fd) {
+ ALOGV("initInternal");
+ mFd = dup(fd);
+ mNextFd = -1;
+ mInitCheck = mFd < 0? NO_INIT: OK;
+ mIsRealTimeRecording = true;
+ mUse4ByteNalLength = true;
+ mUse32BitOffset = true;
+ mIsFileSizeLimitExplicitlyRequested = false;
+ mPaused = false;
+ mStarted = false;
+ mWriterThreadStarted = false;
+ mSendNotify = false;
+ mOffset = 0;
+ mMdatOffset = 0;
+ mMoovBoxBuffer = NULL;
+ mMoovBoxBufferOffset = 0;
+ mWriteMoovBoxToMemory = false;
+ mFreeBoxOffset = 0;
+ mStreamableFile = false;
+ mEstimatedMoovBoxSize = 0;
+ mMoovExtraSize = 0;
+ mInterleaveDurationUs = 1000000;
+ mTimeScale = -1;
+ mStartTimestampUs = -1ll;
+ mLatitudex10000 = 0;
+ mLongitudex10000 = 0;
+ mAreGeoTagsAvailable = false;
+ mStartTimeOffsetMs = -1;
+ mSwitchPending = false;
+ mMetaKeys = new AMessage();
+ addDeviceMeta();
+ // Verify mFd is seekable
+ off64_t off = lseek64(mFd, 0, SEEK_SET);
+ if (off < 0) {
+ ALOGE("cannot seek mFd: %s (%d) %lld", strerror(errno), errno, (long long)mFd);
+ release();
+ }
+ for (List<Track *>::iterator it = mTracks.begin();
+ it != mTracks.end(); ++it) {
+ (*it)->resetInternal();
+ }
}
status_t MPEG4Writer::dump(
@@ -477,7 +508,7 @@
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
- snprintf(buffer, SIZE, " %s track\n", mIsAudio? "Audio": "Video");
+ snprintf(buffer, SIZE, " %s track\n", getTrackType());
result.append(buffer);
snprintf(buffer, SIZE, " reached EOS: %s\n",
mReachedEOS? "true": "false");
@@ -513,8 +544,10 @@
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
return "hvc1";
}
+ } else if (!strncasecmp(mime, "application/", 12)) {
+ return "mett";
} else {
- ALOGE("Track (%s) other than video or audio is not supported", mime);
+ ALOGE("Track (%s) other than video/audio/metadata is not supported", mime);
}
return NULL;
}
@@ -526,37 +559,17 @@
return UNKNOWN_ERROR;
}
- // At most 2 tracks can be supported.
- if (mTracks.size() >= 2) {
- ALOGE("Too many tracks (%zu) to add", mTracks.size());
- return ERROR_UNSUPPORTED;
- }
-
CHECK(source.get() != NULL);
const char *mime;
source->getFormat()->findCString(kKeyMIMEType, &mime);
- bool isAudio = !strncasecmp(mime, "audio/", 6);
+
if (Track::getFourCCForMime(mime) == NULL) {
ALOGE("Unsupported mime '%s'", mime);
return ERROR_UNSUPPORTED;
}
- // At this point, we know the track to be added is either
- // video or audio. Thus, we only need to check whether it
- // is an audio track or not (if it is not, then it must be
- // a video track).
-
- // No more than one video or one audio track is supported.
- for (List<Track*>::iterator it = mTracks.begin();
- it != mTracks.end(); ++it) {
- if ((*it)->isAudio() == isAudio) {
- ALOGE("%s track already exists", isAudio? "Audio": "Video");
- return ERROR_UNSUPPORTED;
- }
- }
-
- // This is the first track of either audio or video.
+ // This is a metadata track or the first track of either audio or video
// Go ahead to add the track.
Track *track = new Track(this, source, 1 + mTracks.size());
mTracks.push_back(track);
@@ -600,12 +613,20 @@
mMetaKeys->setString(kMetaKey_Version, val, n + 1);
mMoovExtraSize += sizeof(kMetaKey_Version) + n + 32;
}
-#ifdef SHOW_MODEL_BUILD
- if (property_get("ro.product.model", val, NULL)
- && (n = strlen(val)) > 0) {
- mMetaKeys->setString(kMetaKey_Model, val, n + 1);
- mMoovExtraSize += sizeof(kMetaKey_Model) + n + 32;
+
+ if (property_get_bool("media.recorder.show_manufacturer_and_model", false)) {
+ if (property_get("ro.product.manufacturer", val, NULL)
+ && (n = strlen(val)) > 0) {
+ mMetaKeys->setString(kMetaKey_Manufacturer, val, n + 1);
+ mMoovExtraSize += sizeof(kMetaKey_Manufacturer) + n + 32;
+ }
+ if (property_get("ro.product.model", val, NULL)
+ && (n = strlen(val)) > 0) {
+ mMetaKeys->setString(kMetaKey_Model, val, n + 1);
+ mMoovExtraSize += sizeof(kMetaKey_Model) + n + 32;
+ }
}
+#ifdef SHOW_MODEL_BUILD
if (property_get("ro.build.display.id", val, NULL)
&& (n = strlen(val)) > 0) {
mMetaKeys->setString(kMetaKey_Build, val, n + 1);
@@ -680,6 +701,7 @@
if (mInitCheck != OK) {
return UNKNOWN_ERROR;
}
+ mStartMeta = param;
/*
* Check mMaxFileSizeLimitBytes at the beginning
@@ -937,7 +959,30 @@
mMoovBoxBuffer = NULL;
}
-status_t MPEG4Writer::reset() {
+void MPEG4Writer::finishCurrentSession() {
+ reset(false /* stopSource */);
+}
+
+status_t MPEG4Writer::switchFd() {
+ ALOGV("switchFd");
+ Mutex::Autolock l(mLock);
+ if (mSwitchPending) {
+ return OK;
+ }
+
+ if (mNextFd == -1) {
+ ALOGW("No FileDescripter for next recording");
+ return INVALID_OPERATION;
+ }
+
+ mSwitchPending = true;
+ sp<AMessage> msg = new AMessage(kWhatSwitch, mReflector);
+ status_t err = msg->post();
+
+ return err;
+}
+
+status_t MPEG4Writer::reset(bool stopSource) {
if (mInitCheck != OK) {
return OK;
} else {
@@ -956,7 +1001,7 @@
int64_t minDurationUs = 0x7fffffffffffffffLL;
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
- status_t status = (*it)->stop();
+ status_t status = (*it)->stop(stopSource);
if (err == OK && status != OK) {
err = status;
}
@@ -1123,9 +1168,7 @@
// Test mode is enabled only if rw.media.record.test system
// property is enabled.
- char value[PROPERTY_VALUE_MAX];
- if (property_get("rw.media.record.test", value, NULL) &&
- (!strcasecmp(value, "true") || !strcasecmp(value, "1"))) {
+ if (property_get_bool("rw.media.record.test", false)) {
return true;
}
return false;
@@ -1448,6 +1491,18 @@
return OK;
}
+void MPEG4Writer::notifyApproachingLimit() {
+ Mutex::Autolock autolock(mLock);
+ // Only notify once.
+ if (mSendNotify) {
+ return;
+ }
+ ALOGW("Recorded file size is approaching limit %" PRId64 "bytes",
+ mMaxFileSizeLimitBytes);
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_APPROACHING, 0);
+ mSendNotify = true;
+}
+
void MPEG4Writer::write(const void *data, size_t size) {
write(data, 1, size);
}
@@ -1461,7 +1516,6 @@
if (mMaxFileSizeLimitBytes == 0) {
return false;
}
-
int64_t nTotalBytesEstimate = static_cast<int64_t>(mEstimatedMoovBoxSize);
for (List<Track *>::iterator it = mTracks.begin();
it != mTracks.end(); ++it) {
@@ -1472,12 +1526,33 @@
// Add 1024 bytes as error tolerance
return nTotalBytesEstimate + 1024 >= mMaxFileSizeLimitBytes;
}
+
// Be conservative in the estimate: do not exceed 95% of
// the target file limit. For small target file size limit, though,
// this will not help.
return (nTotalBytesEstimate >= (95 * mMaxFileSizeLimitBytes) / 100);
}
+bool MPEG4Writer::approachingFileSizeLimit() {
+ // No limit
+ if (mMaxFileSizeLimitBytes == 0) {
+ return false;
+ }
+
+ int64_t nTotalBytesEstimate = static_cast<int64_t>(mEstimatedMoovBoxSize);
+ for (List<Track *>::iterator it = mTracks.begin();
+ it != mTracks.end(); ++it) {
+ nTotalBytesEstimate += (*it)->getEstimatedTrackSizeBytes();
+ }
+
+ if (!mStreamableFile) {
+ // Add 1024 bytes as error tolerance
+ return nTotalBytesEstimate + 1024 >= (90 * mMaxFileSizeLimitBytes) / 100;
+ }
+
+ return (nTotalBytesEstimate >= (90 * mMaxFileSizeLimitBytes) / 100);
+}
+
bool MPEG4Writer::exceedsFileDurationLimit() {
// No limit
if (mMaxFileDurationLimitUs == 0) {
@@ -1537,6 +1612,7 @@
mPaused(false),
mResumed(false),
mStarted(false),
+ mGotStartKeyFrame(false),
mIsMalformed(false),
mTrackId(trackId),
mTrackDurationUs(0),
@@ -1561,11 +1637,12 @@
mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
mIsAudio = !strncasecmp(mime, "audio/", 6);
+ mIsVideo = !strncasecmp(mime, "video/", 6);
mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
// store temporal layer count
- if (!mIsAudio) {
+ if (mIsVideo) {
int32_t count;
if (mMeta->findInt32(kKeyTemporalLayerCount, &count) && count > 1) {
mOwner->setTemporalLayerCount(count);
@@ -1575,6 +1652,50 @@
setTimeScale();
}
+// Clear all the internal states except the CSD data.
+void MPEG4Writer::Track::resetInternal() {
+ mDone = false;
+ mPaused = false;
+ mResumed = false;
+ mStarted = false;
+ mGotStartKeyFrame = false;
+ mIsMalformed = false;
+ mTrackDurationUs = 0;
+ mEstimatedTrackSizeBytes = 0;
+ mSamplesHaveSameSize = 0;
+ if (mStszTableEntries != NULL) {
+ delete mStszTableEntries;
+ mStszTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+ }
+
+ if (mStcoTableEntries != NULL) {
+ delete mStcoTableEntries;
+ mStcoTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+ }
+ if (mCo64TableEntries != NULL) {
+ delete mCo64TableEntries;
+ mCo64TableEntries = new ListTableEntries<off64_t, 1>(1000);
+ }
+
+ if (mStscTableEntries != NULL) {
+ delete mStscTableEntries;
+ mStscTableEntries = new ListTableEntries<uint32_t, 3>(1000);
+ }
+ if (mStssTableEntries != NULL) {
+ delete mStssTableEntries;
+ mStssTableEntries = new ListTableEntries<uint32_t, 1>(1000);
+ }
+ if (mSttsTableEntries != NULL) {
+ delete mSttsTableEntries;
+ mSttsTableEntries = new ListTableEntries<uint32_t, 2>(1000);
+ }
+ if (mCttsTableEntries != NULL) {
+ delete mCttsTableEntries;
+ mCttsTableEntries = new ListTableEntries<uint32_t, 2>(1000);
+ }
+ mReachedEOS = false;
+}
+
void MPEG4Writer::Track::updateTrackSizeEstimate() {
uint32_t stcoBoxCount = (mOwner->use32BitFileOffset()
@@ -1621,13 +1742,31 @@
void MPEG4Writer::Track::addOneCttsTableEntry(
size_t sampleCount, int32_t duration) {
- if (mIsAudio) {
+ if (!mIsVideo) {
return;
}
mCttsTableEntries->add(htonl(sampleCount));
mCttsTableEntries->add(htonl(duration));
}
+status_t MPEG4Writer::setNextFd(int fd) {
+ ALOGV("addNextFd");
+ Mutex::Autolock l(mLock);
+ if (mLooper == NULL) {
+ mReflector = new AHandlerReflector<MPEG4Writer>(this);
+ mLooper = new ALooper;
+ mLooper->registerHandler(mReflector);
+ mLooper->start();
+ }
+
+ if (mNextFd != -1) {
+ // No need to set a new FD yet.
+ return INVALID_OPERATION;
+ }
+ mNextFd = fd;
+ return OK;
+}
+
void MPEG4Writer::Track::addChunkOffset(off64_t offset) {
if (mOwner->use32BitFileOffset()) {
uint32_t value = offset;
@@ -1659,8 +1798,29 @@
CHECK_GT(mTimeScale, 0);
}
+void MPEG4Writer::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatSwitch:
+ {
+ finishCurrentSession();
+ mLock.lock();
+ int fd = mNextFd;
+ mNextFd = -1;
+ mLock.unlock();
+ initInternal(fd);
+ start(mStartMeta.get());
+ mSwitchPending = false;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_NEXT_OUTPUT_FILE_STARTED, 0);
+ break;
+ }
+ default:
+ TRESPASS();
+ }
+}
+
void MPEG4Writer::Track::getCodecSpecificDataFromInputFormatIfPossible() {
const char *mime;
+
CHECK(mMeta->findCString(kKeyMIMEType, &mime));
uint32_t type;
@@ -1753,7 +1913,7 @@
void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
ALOGV("writeChunkToFile: %" PRId64 " from %s track",
- chunk->mTimeStampUs, chunk->mTrack->isAudio()? "audio": "video");
+ chunk->mTimeStampUs, chunk->mTrack->getTrackType());
int32_t isFirstSample = true;
while (!chunk->mSamples.empty()) {
@@ -1906,7 +2066,7 @@
mStartTimeRealUs = startTimeUs;
int32_t rotationDegrees;
- if (!mIsAudio && params && params->findInt32(kKeyRotation, &rotationDegrees)) {
+ if (mIsVideo && params && params->findInt32(kKeyRotation, &rotationDegrees)) {
mRotation = rotationDegrees;
}
@@ -1963,8 +2123,8 @@
return OK;
}
-status_t MPEG4Writer::Track::stop() {
- ALOGD("%s track stopping", mIsAudio? "Audio": "Video");
+status_t MPEG4Writer::Track::stop(bool stopSource) {
+ ALOGD("%s track stopping. %s source", getTrackType(), stopSource ? "Stop" : "Not Stop");
if (!mStarted) {
ALOGE("Stop() called but track is not started");
return ERROR_END_OF_STREAM;
@@ -1974,16 +2134,17 @@
return OK;
}
mDone = true;
-
- ALOGD("%s track source stopping", mIsAudio? "Audio": "Video");
- mSource->stop();
- ALOGD("%s track source stopped", mIsAudio? "Audio": "Video");
+ if (stopSource) {
+ ALOGD("%s track source stopping", getTrackType());
+ mSource->stop();
+ ALOGD("%s track source stopped", getTrackType());
+ }
void *dummy;
pthread_join(mThread, &dummy);
status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
- ALOGD("%s track stopped", mIsAudio? "Audio": "Video");
+ ALOGD("%s track stopped. %s source", getTrackType(), stopSource ? "Stop" : "Not Stop");
return err;
}
@@ -2357,6 +2518,17 @@
}
}
+void MPEG4Writer::Track::dumpTimeStamps() {
+ ALOGE("Dumping %s track's last 10 frames timestamp ", getTrackType());
+ std::string timeStampString;
+ for (std::list<std::pair<int64_t, int64_t>>::iterator num = mTimestampDebugHelper.begin();
+ num != mTimestampDebugHelper.end(); ++num) {
+ timeStampString += "(" + std::to_string(num->first)+
+ "us, " + std::to_string(num->second) + "us) ";
+ }
+ ALOGE("%s", timeStampString.c_str());
+}
+
status_t MPEG4Writer::Track::threadEntry() {
int32_t count = 0;
const int64_t interleaveDurationUs = mOwner->interleaveDuration();
@@ -2381,8 +2553,10 @@
if (mIsAudio) {
prctl(PR_SET_NAME, (unsigned long)"AudioTrackEncoding", 0, 0, 0);
- } else {
+ } else if (mIsVideo) {
prctl(PR_SET_NAME, (unsigned long)"VideoTrackEncoding", 0, 0, 0);
+ } else {
+ prctl(PR_SET_NAME, (unsigned long)"MetadataTrackEncoding", 0, 0, 0);
}
if (mOwner->isRealTimeRecording()) {
@@ -2393,7 +2567,7 @@
status_t err = OK;
MediaBuffer *buffer;
- const char *trackName = mIsAudio ? "Audio" : "Video";
+ const char *trackName = getTrackType();
while (!mDone && (err = mSource->read(&buffer)) == OK) {
if (buffer->range_length() == 0) {
buffer->release();
@@ -2424,19 +2598,17 @@
ALOGI("ignoring additional CSD for video track after first frame");
} else {
mMeta = mSource->getFormat(); // get output format after format change
-
+ status_t err;
if (mIsAvc) {
- status_t err = makeAVCCodecSpecificData(
+ err = makeAVCCodecSpecificData(
(const uint8_t *)buffer->data()
+ buffer->range_offset(),
buffer->range_length());
- CHECK_EQ((status_t)OK, err);
} else if (mIsHevc) {
- status_t err = makeHEVCCodecSpecificData(
+ err = makeHEVCCodecSpecificData(
(const uint8_t *)buffer->data()
+ buffer->range_offset(),
buffer->range_length());
- CHECK_EQ((status_t)OK, err);
} else if (mIsMPEG4) {
copyCodecSpecificData((const uint8_t *)buffer->data() + buffer->range_offset(),
buffer->range_length());
@@ -2445,11 +2617,27 @@
buffer->release();
buffer = NULL;
+ if (OK != err) {
+ mSource->stop();
+ mOwner->notify(MEDIA_RECORDER_TRACK_EVENT_ERROR,
+ mTrackId | MEDIA_RECORDER_TRACK_ERROR_GENERAL, err);
+ break;
+ }
mGotAllCodecSpecificData = true;
continue;
}
+ // Per-frame metadata sample's size must be smaller than max allowed.
+ if (!mIsVideo && !mIsAudio && buffer->range_length() >= kMaxMetadataSize) {
+ ALOGW("Buffer size is %zu. Maximum metadata buffer size is %lld for %s track",
+ buffer->range_length(), (long long)kMaxMetadataSize, trackName);
+ buffer->release();
+ mSource->stop();
+ mIsMalformed = true;
+ break;
+ }
+
++nActualFrames;
// Make a deep copy of the MediaBuffer and Metadata and release
@@ -2478,13 +2666,20 @@
updateTrackSizeEstimate();
if (mOwner->exceedsFileSizeLimit()) {
- ALOGW("Recorded file size exceeds limit %" PRId64 "bytes",
- mOwner->mMaxFileSizeLimitBytes);
- mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+ if (mOwner->switchFd() != OK) {
+ ALOGW("Recorded file size exceeds limit %" PRId64 "bytes",
+ mOwner->mMaxFileSizeLimitBytes);
+ mSource->stop();
+ mOwner->notify(
+ MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+ } else {
+ ALOGV("%s Current recorded file size exceeds limit %" PRId64 "bytes. Switching output",
+ getTrackType(), mOwner->mMaxFileSizeLimitBytes);
+ }
copy->release();
- mSource->stop();
break;
}
+
if (mOwner->exceedsFileDurationLimit()) {
ALOGW("Recorded file duration exceeds limit %" PRId64 "microseconds",
mOwner->mMaxFileDurationLimitUs);
@@ -2494,11 +2689,23 @@
break;
}
+ if (mOwner->approachingFileSizeLimit()) {
+ mOwner->notifyApproachingLimit();
+ }
int32_t isSync = false;
meta_data->findInt32(kKeyIsSyncFrame, &isSync);
CHECK(meta_data->findInt64(kKeyTime, ×tampUs));
+ // For video, skip the first several non-key frames until getting the first key frame.
+ if (mIsVideo && !mGotStartKeyFrame && !isSync) {
+ ALOGD("Video skip non-key frame");
+ copy->release();
+ continue;
+ }
+ if (mIsVideo && isSync) {
+ mGotStartKeyFrame = true;
+ }
////////////////////////////////////////////////////////////////////////////////
if (mStszTableEntries->count() == 0) {
mFirstSampleTimeRealUs = systemTime() / 1000;
@@ -2527,8 +2734,9 @@
previousPausedDurationUs += pausedDurationUs - lastDurationUs;
mResumed = false;
}
-
+ std::pair<int64_t, int64_t> timestampPair;
timestampUs -= previousPausedDurationUs;
+ timestampPair.first = timestampUs;
if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
copy->release();
mSource->stop();
@@ -2536,7 +2744,7 @@
break;
}
- if (!mIsAudio) {
+ if (mIsVideo) {
/*
* Composition time: timestampUs
* Decoding time: decodingTimeUs
@@ -2550,9 +2758,11 @@
if (mLastDecodingTimeUs < 0) {
decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
} else {
- // increase decoding time by at least 1 tick
- decodingTimeUs = std::max(
- mLastDecodingTimeUs + divUp(1000000, mTimeScale), decodingTimeUs);
+ // increase decoding time by at least the larger vaule of 1 tick and
+ // 0.1 milliseconds. This needs to take into account the possible
+ // delta adjustment in DurationTicks in below.
+ decodingTimeUs = std::max(mLastDecodingTimeUs +
+ std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
}
mLastDecodingTimeUs = decodingTimeUs;
@@ -2661,7 +2871,6 @@
timestampUs += deltaUs;
}
}
-
mStszTableEntries->add(htonl(sampleSize));
if (mStszTableEntries->count() > 2) {
@@ -2686,6 +2895,12 @@
lastDurationUs = timestampUs - lastTimestampUs;
lastDurationTicks = currDurationTicks;
lastTimestampUs = timestampUs;
+ timestampPair.second = timestampUs;
+ // Insert the timestamp into the mTimestampDebugHelper
+ if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
+ mTimestampDebugHelper.pop_front();
+ }
+ mTimestampDebugHelper.push_back(timestampPair);
if (isSync != 0) {
addOneStssTableEntry(mStszTableEntries->count());
@@ -2741,6 +2956,7 @@
}
if (isTrackMalFormed()) {
+ dumpTimeStamps();
err = ERROR_MALFORMED;
}
@@ -2808,7 +3024,7 @@
return true;
}
- if (!mIsAudio && mStssTableEntries->count() == 0) { // no sync frames for video
+ if (mIsVideo && mStssTableEntries->count() == 0) { // no sync frames for video
ALOGE("There are no sync frames for video track");
return true;
}
@@ -2831,7 +3047,7 @@
mOwner->notify(MEDIA_RECORDER_TRACK_EVENT_INFO,
trackNum | MEDIA_RECORDER_TRACK_INFO_TYPE,
- mIsAudio? 0: 1);
+ mIsAudio ? 0: 1);
mOwner->notify(MEDIA_RECORDER_TRACK_EVENT_INFO,
trackNum | MEDIA_RECORDER_TRACK_INFO_DURATION_MS,
@@ -2971,11 +3187,11 @@
return OK;
}
+const char *MPEG4Writer::Track::getTrackType() const {
+ return mIsAudio ? "Audio" : (mIsVideo ? "Video" : "Metadata");
+}
+
void MPEG4Writer::Track::writeTrackHeader(bool use32BitOffset) {
-
- ALOGV("%s track time scale: %d",
- mIsAudio? "Audio": "Video", mTimeScale);
-
uint32_t now = getMpeg4Time();
mOwner->beginBox("trak");
writeTkhdBox(now);
@@ -2985,8 +3201,10 @@
mOwner->beginBox("minf");
if (mIsAudio) {
writeSmhdBox();
- } else {
+ } else if (mIsVideo) {
writeVmhdBox();
+ } else {
+ writeNmhdBox();
}
writeDinfBox();
writeStblBox(use32BitOffset);
@@ -3002,13 +3220,15 @@
mOwner->writeInt32(1); // entry count
if (mIsAudio) {
writeAudioFourCCBox();
- } else {
+ } else if (mIsVideo) {
writeVideoFourCCBox();
+ } else {
+ writeMetadataFourCCBox();
}
mOwner->endBox(); // stsd
writeSttsBox();
- writeCttsBox();
- if (!mIsAudio) {
+ if (mIsVideo) {
+ writeCttsBox();
writeStssBox();
}
writeStszBox();
@@ -3017,6 +3237,20 @@
mOwner->endBox(); // stbl
}
+void MPEG4Writer::Track::writeMetadataFourCCBox() {
+ const char *mime;
+ bool success = mMeta->findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+ const char *fourcc = getFourCCForMime(mime);
+ if (fourcc == NULL) {
+ ALOGE("Unknown mime type '%s'.", mime);
+ TRESPASS();
+ }
+ mOwner->beginBox(fourcc); // TextMetaDataSampleEntry
+ mOwner->writeCString(mime); // metadata mime_format
+ mOwner->endBox(); // mett
+}
+
void MPEG4Writer::Track::writeVideoFourCCBox() {
const char *mime;
bool success = mMeta->findCString(kKeyMIMEType, &mime);
@@ -3024,7 +3258,7 @@
const char *fourcc = getFourCCForMime(mime);
if (fourcc == NULL) {
ALOGE("Unknown mime type '%s'.", mime);
- CHECK(!"should not be here, unknown mime type.");
+ TRESPASS();
}
mOwner->beginBox(fourcc); // video format
@@ -3097,7 +3331,7 @@
const char *fourcc = getFourCCForMime(mime);
if (fourcc == NULL) {
ALOGE("Unknown mime type '%s'.", mime);
- CHECK(!"should not be here, unknown mime type.");
+ TRESPASS();
}
mOwner->beginBox(fourcc); // audio format
@@ -3240,13 +3474,19 @@
mOwner->writeCompositionMatrix(mRotation); // matrix
- if (mIsAudio) {
+ if (!mIsVideo) {
mOwner->writeInt32(0);
mOwner->writeInt32(0);
} else {
int32_t width, height;
- bool success = mMeta->findInt32(kKeyWidth, &width);
- success = success && mMeta->findInt32(kKeyHeight, &height);
+ bool success = mMeta->findInt32(kKeyDisplayWidth, &width);
+ success = success && mMeta->findInt32(kKeyDisplayHeight, &height);
+
+ // Use width/height if display width/height are not present.
+ if (!success) {
+ success = mMeta->findInt32(kKeyWidth, &width);
+ success = success && mMeta->findInt32(kKeyHeight, &height);
+ }
CHECK(success);
mOwner->writeInt32(width << 16); // 32-bit fixed-point value
@@ -3273,16 +3513,22 @@
mOwner->endBox();
}
+void MPEG4Writer::Track::writeNmhdBox() {
+ mOwner->beginBox("nmhd");
+ mOwner->writeInt32(0); // version=0, flags=0
+ mOwner->endBox();
+}
+
void MPEG4Writer::Track::writeHdlrBox() {
mOwner->beginBox("hdlr");
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt32(0); // component type: should be mhlr
- mOwner->writeFourcc(mIsAudio ? "soun" : "vide"); // component subtype
+ mOwner->writeFourcc(mIsAudio ? "soun" : (mIsVideo ? "vide" : "meta")); // component subtype
mOwner->writeInt32(0); // reserved
mOwner->writeInt32(0); // reserved
mOwner->writeInt32(0); // reserved
// Removing "r" for the name string just makes the string 4 byte aligned
- mOwner->writeCString(mIsAudio ? "SoundHandle": "VideoHandle"); // name
+ mOwner->writeCString(mIsAudio ? "SoundHandle": (mIsVideo ? "VideoHandle" : "MetadHandle"));
mOwner->endBox();
}
@@ -3309,7 +3555,12 @@
// Each character is packed as the difference between its ASCII value and 0x60.
// For "English", these are 00101, 01110, 00111.
// XXX: Where is the padding bit located: 0x15C7?
- mOwner->writeInt16(0); // language code
+ const char *lang = NULL;
+ int16_t langCode = 0;
+ if (mMeta->findCString(kKeyMediaLanguage, &lang) && lang && strnlen(lang, 3) > 2) {
+ langCode = ((lang[0] & 0x1f) << 10) | ((lang[1] & 0x1f) << 5) | (lang[2] & 0x1f);
+ }
+ mOwner->writeInt16(langCode); // language code
mOwner->writeInt16(0); // predefined
mOwner->endBox();
}
@@ -3413,10 +3664,6 @@
}
void MPEG4Writer::Track::writeCttsBox() {
- if (mIsAudio) { // ctts is not for audio
- return;
- }
-
// There is no B frame at all
if (mMinCttsOffsetTimeUs == mMaxCttsOffsetTimeUs) {
return;
@@ -3432,11 +3679,19 @@
mOwner->beginBox("ctts");
mOwner->writeInt32(0); // version=0, flags=0
- uint32_t delta = mMinCttsOffsetTimeUs - getStartTimeOffsetScaledTime();
+ int64_t delta = mMinCttsOffsetTimeUs - getStartTimeOffsetScaledTime();
mCttsTableEntries->adjustEntries([delta](size_t /* ix */, uint32_t (&value)[2]) {
// entries are <count, ctts> pairs; adjust only ctts
uint32_t duration = htonl(value[1]); // back to host byte order
- value[1] = htonl(duration - delta);
+ // Prevent overflow and underflow
+ if (delta > duration) {
+ duration = 0;
+ } else if (delta < 0 && UINT32_MAX + delta < duration) {
+ duration = UINT32_MAX;
+ } else {
+ duration -= delta;
+ }
+ value[1] = htonl(duration);
});
mCttsTableEntries->write(mOwner);
mOwner->endBox(); // ctts
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index b088775..bd71632 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -19,8 +19,11 @@
#include <inttypes.h>
#include "include/avc_utils.h"
+#include "include/SecureBuffer.h"
+#include "include/SharedMemoryBuffer.h"
#include "include/SoftwareRenderer.h"
+#include <android/media/IDescrambler.h>
#include <binder/IMemory.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
@@ -30,10 +33,13 @@
#include <media/ICrypto.h>
#include <media/IOMX.h>
#include <media/IResourceManagerService.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/ACodec.h>
#include <media/stagefright/BufferProducerWrapper.h>
@@ -53,6 +59,21 @@
namespace android {
+// key for media statistics
+static const char *kCodecKeyName = "codec";
+// attrs for media statistics
+static const char *kCodecCodec = "android.media.mediacodec.codec"; /* e.g. OMX.google.aac.decoder */
+static const char *kCodecMime = "android.media.mediacodec.mime"; /* e.g. audio/mime */
+static const char *kCodecMode = "android.media.mediacodec.mode"; /* audio, video */
+static const char *kCodecSecure = "android.media.mediacodec.secure"; /* 0, 1 */
+static const char *kCodecHeight = "android.media.mediacodec.height"; /* 0..n */
+static const char *kCodecWidth = "android.media.mediacodec.width"; /* 0..n */
+static const char *kCodecRotation = "android.media.mediacodec.rotation-degrees"; /* 0/90/180/270 */
+static const char *kCodecCrypto = "android.media.mediacodec.crypto"; /* 0,1 */
+static const char *kCodecEncoder = "android.media.mediacodec.encoder"; /* 0,1 */
+
+
+
static int64_t getId(const sp<IResourceManagerClient> &client) {
return (int64_t) client.get();
}
@@ -63,6 +84,9 @@
static const int kMaxRetry = 2;
static const int kMaxReclaimWaitTimeInUs = 500000; // 0.5s
+static const int kNumBuffersAlign = 16;
+
+////////////////////////////////////////////////////////////////////////////////
struct ResourceManagerClient : public BnResourceManagerClient {
explicit ResourceManagerClient(MediaCodec* codec) : mMediaCodec(codec) {}
@@ -168,10 +192,221 @@
return mService->reclaimResource(mPid, resources);
}
+////////////////////////////////////////////////////////////////////////////////
+
+MediaCodec::BufferInfo::BufferInfo() : mOwnedByClient(false) {}
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+enum {
+ kWhatFillThisBuffer = 'fill',
+ kWhatDrainThisBuffer = 'drai',
+ kWhatEOS = 'eos ',
+ kWhatStartCompleted = 'Scom',
+ kWhatStopCompleted = 'scom',
+ kWhatReleaseCompleted = 'rcom',
+ kWhatFlushCompleted = 'fcom',
+ kWhatError = 'erro',
+ kWhatComponentAllocated = 'cAll',
+ kWhatComponentConfigured = 'cCon',
+ kWhatInputSurfaceCreated = 'isfc',
+ kWhatInputSurfaceAccepted = 'isfa',
+ kWhatSignaledInputEOS = 'seos',
+ kWhatOutputFramesRendered = 'outR',
+ kWhatOutputBuffersChanged = 'outC',
+};
+
+class BufferCallback : public CodecBase::BufferCallback {
+public:
+ explicit BufferCallback(const sp<AMessage> ¬ify);
+ virtual ~BufferCallback() = default;
+
+ virtual void onInputBufferAvailable(
+ size_t index, const sp<MediaCodecBuffer> &buffer) override;
+ virtual void onOutputBufferAvailable(
+ size_t index, const sp<MediaCodecBuffer> &buffer) override;
+private:
+ const sp<AMessage> mNotify;
+};
+
+BufferCallback::BufferCallback(const sp<AMessage> ¬ify)
+ : mNotify(notify) {}
+
+void BufferCallback::onInputBufferAvailable(
+ size_t index, const sp<MediaCodecBuffer> &buffer) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatFillThisBuffer);
+ notify->setSize("index", index);
+ notify->setObject("buffer", buffer);
+ notify->post();
+}
+
+void BufferCallback::onOutputBufferAvailable(
+ size_t index, const sp<MediaCodecBuffer> &buffer) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatDrainThisBuffer);
+ notify->setSize("index", index);
+ notify->setObject("buffer", buffer);
+ notify->post();
+}
+
+class CodecCallback : public CodecBase::CodecCallback {
+public:
+ explicit CodecCallback(const sp<AMessage> ¬ify);
+ virtual ~CodecCallback() = default;
+
+ virtual void onEos(status_t err) override;
+ virtual void onStartCompleted() override;
+ virtual void onStopCompleted() override;
+ virtual void onReleaseCompleted() override;
+ virtual void onFlushCompleted() override;
+ virtual void onError(status_t err, enum ActionCode actionCode) override;
+ virtual void onComponentAllocated(const char *componentName) override;
+ virtual void onComponentConfigured(
+ const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) override;
+ virtual void onInputSurfaceCreated(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat,
+ const sp<BufferProducerWrapper> &inputSurface) override;
+ virtual void onInputSurfaceCreationFailed(status_t err) override;
+ virtual void onInputSurfaceAccepted(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat) override;
+ virtual void onInputSurfaceDeclined(status_t err) override;
+ virtual void onSignaledInputEOS(status_t err) override;
+ virtual void onOutputFramesRendered(const std::list<FrameRenderTracker::Info> &done) override;
+ virtual void onOutputBuffersChanged() override;
+private:
+ const sp<AMessage> mNotify;
+};
+
+CodecCallback::CodecCallback(const sp<AMessage> ¬ify) : mNotify(notify) {}
+
+void CodecCallback::onEos(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatEOS);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+void CodecCallback::onStartCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatStartCompleted);
+ notify->post();
+}
+
+void CodecCallback::onStopCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatStopCompleted);
+ notify->post();
+}
+
+void CodecCallback::onReleaseCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatReleaseCompleted);
+ notify->post();
+}
+
+void CodecCallback::onFlushCompleted() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatFlushCompleted);
+ notify->post();
+}
+
+void CodecCallback::onError(status_t err, enum ActionCode actionCode) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatError);
+ notify->setInt32("err", err);
+ notify->setInt32("actionCode", actionCode);
+ notify->post();
+}
+
+void CodecCallback::onComponentAllocated(const char *componentName) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatComponentAllocated);
+ notify->setString("componentName", componentName);
+ notify->post();
+}
+
+void CodecCallback::onComponentConfigured(
+ const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatComponentConfigured);
+ notify->setMessage("input-format", inputFormat);
+ notify->setMessage("output-format", outputFormat);
+ notify->post();
+}
+
+void CodecCallback::onInputSurfaceCreated(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat,
+ const sp<BufferProducerWrapper> &inputSurface) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatInputSurfaceCreated);
+ notify->setMessage("input-format", inputFormat);
+ notify->setMessage("output-format", outputFormat);
+ notify->setObject("input-surface", inputSurface);
+ notify->post();
+}
+
+void CodecCallback::onInputSurfaceCreationFailed(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatInputSurfaceCreated);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+void CodecCallback::onInputSurfaceAccepted(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatInputSurfaceAccepted);
+ notify->setMessage("input-format", inputFormat);
+ notify->setMessage("output-format", outputFormat);
+ notify->post();
+}
+
+void CodecCallback::onInputSurfaceDeclined(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatInputSurfaceAccepted);
+ notify->setInt32("err", err);
+ notify->post();
+}
+
+void CodecCallback::onSignaledInputEOS(status_t err) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatSignaledInputEOS);
+ if (err != OK) {
+ notify->setInt32("err", err);
+ }
+ notify->post();
+}
+
+void CodecCallback::onOutputFramesRendered(const std::list<FrameRenderTracker::Info> &done) {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatOutputFramesRendered);
+ if (MediaCodec::CreateFramesRenderedMessage(done, notify)) {
+ notify->post();
+ }
+}
+
+void CodecCallback::onOutputBuffersChanged() {
+ sp<AMessage> notify(mNotify->dup());
+ notify->setInt32("what", kWhatOutputBuffersChanged);
+ notify->post();
+}
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+
// static
sp<MediaCodec> MediaCodec::CreateByType(
- const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid) {
- sp<MediaCodec> codec = new MediaCodec(looper, pid);
+ const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err, pid_t pid,
+ uid_t uid) {
+ sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
const status_t ret = codec->init(mime, true /* nameIsType */, encoder);
if (err != NULL) {
@@ -182,8 +417,8 @@
// static
sp<MediaCodec> MediaCodec::CreateByComponentName(
- const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid) {
- sp<MediaCodec> codec = new MediaCodec(looper, pid);
+ const sp<ALooper> &looper, const AString &name, status_t *err, pid_t pid, uid_t uid) {
+ sp<MediaCodec> codec = new MediaCodec(looper, pid, uid);
const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
if (err != NULL) {
@@ -211,53 +446,27 @@
// static
sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
OMXClient client;
- CHECK_EQ(client.connect(), (status_t)OK);
+ if (client.connect() != OK) {
+ ALOGE("Failed to connect to OMX to create persistent input surface.");
+ return NULL;
+ }
+
sp<IOMX> omx = client.interface();
- const sp<IMediaCodecList> mediaCodecList = MediaCodecList::getInstance();
- if (mediaCodecList == NULL) {
- ALOGE("Failed to obtain MediaCodecList!");
- return NULL; // if called from Java should raise IOException
- }
-
- AString tmp;
- sp<AMessage> globalSettings = mediaCodecList->getGlobalSettings();
- if (globalSettings == NULL || !globalSettings->findString(
- kMaxEncoderInputBuffers, &tmp)) {
- ALOGE("Failed to get encoder input buffer count!");
- return NULL;
- }
-
- int32_t bufferCount = strtol(tmp.c_str(), NULL, 10);
- if (bufferCount <= 0
- || bufferCount > BufferQueue::MAX_MAX_ACQUIRED_BUFFERS) {
- ALOGE("Encoder input buffer count is invalid!");
- return NULL;
- }
-
sp<IGraphicBufferProducer> bufferProducer;
- sp<IGraphicBufferConsumer> bufferConsumer;
+ sp<IGraphicBufferSource> bufferSource;
- status_t err = omx->createPersistentInputSurface(
- &bufferProducer, &bufferConsumer);
+ status_t err = omx->createInputSurface(&bufferProducer, &bufferSource);
if (err != OK) {
ALOGE("Failed to create persistent input surface.");
return NULL;
}
- err = bufferConsumer->setMaxAcquiredBufferCount(bufferCount);
-
- if (err != NO_ERROR) {
- ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
- bufferCount, err);
- return NULL;
- }
-
- return new PersistentSurface(bufferProducer, bufferConsumer);
+ return new PersistentSurface(bufferProducer, bufferSource);
}
-MediaCodec::MediaCodec(const sp<ALooper> &looper, pid_t pid)
+MediaCodec::MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid)
: mState(UNINITIALIZED),
mReleasedByResourceManager(false),
mLooper(looper),
@@ -279,11 +488,32 @@
mDequeueOutputReplyID(0),
mHaveInputSurface(false),
mHavePendingInputBuffers(false) {
+ if (uid == kNoUid) {
+ mUid = IPCThreadState::self()->getCallingUid();
+ } else {
+ mUid = uid;
+ }
+ // set up our new record, get a sessionID, put it into the in-progress list
+ mAnalyticsItem = new MediaAnalyticsItem(kCodecKeyName);
+ if (mAnalyticsItem != NULL) {
+ (void) mAnalyticsItem->generateSessionID();
+ // don't record it yet; only at the end, when we have decided that we have
+ // data worth writing (e.g. .count() > 0)
+ }
}
MediaCodec::~MediaCodec() {
CHECK_EQ(mState, UNINITIALIZED);
mResourceManagerService->removeResource(getId(mResourceManagerClient));
+
+ if (mAnalyticsItem != NULL ) {
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->setFinalized(true);
+ mAnalyticsItem->selfrecord();
+ }
+ delete mAnalyticsItem;
+ mAnalyticsItem = NULL;
+ }
}
// static
@@ -387,7 +617,13 @@
mLooper->registerHandler(this);
- mCodec->setNotificationMessage(new AMessage(kWhatCodecNotify, this));
+ mCodec->setCallback(
+ std::unique_ptr<CodecBase::CodecCallback>(
+ new CodecCallback(new AMessage(kWhatCodecNotify, this))));
+ mBufferChannel = mCodec->getBufferChannel();
+ mBufferChannel->setCallback(
+ std::unique_ptr<CodecBase::BufferCallback>(
+ new BufferCallback(new AMessage(kWhatCodecNotify, this))));
sp<AMessage> msg = new AMessage(kWhatInit, this);
msg->setString("name", name);
@@ -397,6 +633,18 @@
msg->setInt32("encoder", encoder);
}
+ if (mAnalyticsItem != NULL) {
+ if (nameIsType) {
+ // name is the mime type
+ mAnalyticsItem->setCString(kCodecMime, name.c_str());
+ } else {
+ mAnalyticsItem->setCString(kCodecCodec, name.c_str());
+ }
+ mAnalyticsItem->setCString(kCodecMode, mIsVideo ? "video" : "audio");
+ if (nameIsType)
+ mAnalyticsItem->setInt32(kCodecEncoder, encoder);
+ }
+
status_t err;
Vector<MediaResource> resources;
MediaResource::Type type =
@@ -437,18 +685,33 @@
status_t MediaCodec::configure(
const sp<AMessage> &format,
+ const sp<Surface> &nativeWindow,
+ const sp<ICrypto> &crypto,
+ uint32_t flags) {
+ return configure(format, nativeWindow, crypto, NULL, flags);
+}
+
+status_t MediaCodec::configure(
+ const sp<AMessage> &format,
const sp<Surface> &surface,
const sp<ICrypto> &crypto,
+ const sp<IDescrambler> &descrambler,
uint32_t flags) {
sp<AMessage> msg = new AMessage(kWhatConfigure, this);
if (mIsVideo) {
format->findInt32("width", &mVideoWidth);
format->findInt32("height", &mVideoHeight);
- if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
+ if (!format->findInt32(kCodecRotation, &mRotationDegrees)) {
mRotationDegrees = 0;
}
+ if (mAnalyticsItem != NULL) {
+ mAnalyticsItem->setInt32(kCodecWidth, mVideoWidth);
+ mAnalyticsItem->setInt32(kCodecHeight, mVideoHeight);
+ mAnalyticsItem->setInt32(kCodecRotation, mRotationDegrees);
+ }
+
// Prevent possible integer overflow in downstream code.
if (mInitIsEncoder
&& (uint64_t)mVideoWidth * mVideoHeight > (uint64_t)INT32_MAX / 4) {
@@ -461,8 +724,18 @@
msg->setInt32("flags", flags);
msg->setObject("surface", surface);
- if (crypto != NULL) {
- msg->setPointer("crypto", crypto.get());
+ if (crypto != NULL || descrambler != NULL) {
+ if (crypto != NULL) {
+ msg->setPointer("crypto", crypto.get());
+ } else {
+ msg->setPointer("descrambler", descrambler.get());
+ }
+ if (mAnalyticsItem != NULL) {
+ // XXX: save indication that it's crypto in some way...
+ mAnalyticsItem->setInt32(kCodecCrypto, 1);
+ }
+ } else if (mFlags & kFlagIsSecure) {
+ ALOGW("Crypto or descrambler should be given for secure codec");
}
// save msg for reset
@@ -505,6 +778,51 @@
return err;
}
+status_t MediaCodec::releaseCrypto()
+{
+ ALOGV("releaseCrypto");
+
+ sp<AMessage> msg = new AMessage(kWhatDrmReleaseCrypto, this);
+
+ sp<AMessage> response;
+ status_t status = msg->postAndAwaitResponse(&response);
+
+ if (status == OK && response != NULL) {
+ CHECK(response->findInt32("status", &status));
+ ALOGV("releaseCrypto ret: %d ", status);
+ }
+ else {
+ ALOGE("releaseCrypto err: %d", status);
+ }
+
+ return status;
+}
+
+void MediaCodec::onReleaseCrypto(const sp<AMessage>& msg)
+{
+ status_t status = INVALID_OPERATION;
+ if (mCrypto != NULL) {
+ ALOGV("onReleaseCrypto: mCrypto: %p (%d)", mCrypto.get(), mCrypto->getStrongCount());
+ mBufferChannel->setCrypto(NULL);
+ // TODO change to ALOGV
+ ALOGD("onReleaseCrypto: [before clear] mCrypto: %p (%d)",
+ mCrypto.get(), mCrypto->getStrongCount());
+ mCrypto.clear();
+
+ status = OK;
+ }
+ else {
+ ALOGW("onReleaseCrypto: No mCrypto. err: %d", status);
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("status", status);
+
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+}
+
status_t MediaCodec::setInputSurface(
const sp<PersistentSurface> &surface) {
sp<AMessage> msg = new AMessage(kWhatSetInputSurface, this);
@@ -614,14 +932,9 @@
}
bool MediaCodec::hasPendingBuffer(int portIndex) {
- const Vector<BufferInfo> &buffers = mPortBuffers[portIndex];
- for (size_t i = 0; i < buffers.size(); ++i) {
- const BufferInfo &info = buffers.itemAt(i);
- if (info.mOwnedByClient) {
- return true;
- }
- }
- return false;
+ return std::any_of(
+ mPortBuffers[portIndex].begin(), mPortBuffers[portIndex].end(),
+ [](const BufferInfo &info) { return info.mOwnedByClient; });
}
bool MediaCodec::hasPendingBuffer() {
@@ -860,17 +1173,24 @@
return OK;
}
-status_t MediaCodec::getWidevineLegacyBuffers(Vector<sp<ABuffer> > *buffers) const {
- sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
- msg->setInt32("portIndex", kPortIndexInput);
- msg->setPointer("buffers", buffers);
- msg->setInt32("widevine", true);
+status_t MediaCodec::getMetrics(MediaAnalyticsItem * &reply) {
- sp<AMessage> response;
- return PostAndAwaitResponse(msg, &response);
+ reply = NULL;
+
+ // shouldn't happen, but be safe
+ if (mAnalyticsItem == NULL) {
+ return UNKNOWN_ERROR;
+ }
+
+ // XXX: go get current values for whatever in-flight data we want
+
+ // send it back to the caller.
+ reply = mAnalyticsItem->dup();
+
+ return OK;
}
-status_t MediaCodec::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
+status_t MediaCodec::getInputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const {
sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
msg->setInt32("portIndex", kPortIndexInput);
msg->setPointer("buffers", buffers);
@@ -879,7 +1199,7 @@
return PostAndAwaitResponse(msg, &response);
}
-status_t MediaCodec::getOutputBuffers(Vector<sp<ABuffer> > *buffers) const {
+status_t MediaCodec::getOutputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const {
sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
msg->setInt32("portIndex", kPortIndexOutput);
msg->setPointer("buffers", buffers);
@@ -888,17 +1208,17 @@
return PostAndAwaitResponse(msg, &response);
}
-status_t MediaCodec::getOutputBuffer(size_t index, sp<ABuffer> *buffer) {
+status_t MediaCodec::getOutputBuffer(size_t index, sp<MediaCodecBuffer> *buffer) {
sp<AMessage> format;
return getBufferAndFormat(kPortIndexOutput, index, buffer, &format);
}
status_t MediaCodec::getOutputFormat(size_t index, sp<AMessage> *format) {
- sp<ABuffer> buffer;
+ sp<MediaCodecBuffer> buffer;
return getBufferAndFormat(kPortIndexOutput, index, &buffer, format);
}
-status_t MediaCodec::getInputBuffer(size_t index, sp<ABuffer> *buffer) {
+status_t MediaCodec::getInputBuffer(size_t index, sp<MediaCodecBuffer> *buffer) {
sp<AMessage> format;
return getBufferAndFormat(kPortIndexInput, index, buffer, &format);
}
@@ -909,7 +1229,7 @@
status_t MediaCodec::getBufferAndFormat(
size_t portIndex, size_t index,
- sp<ABuffer> *buffer, sp<AMessage> *format) {
+ sp<MediaCodecBuffer> *buffer, sp<AMessage> *format) {
// use mutex instead of a context switch
if (mReleasedByResourceManager) {
ALOGE("getBufferAndFormat - resource already released");
@@ -917,7 +1237,7 @@
}
if (buffer == NULL) {
- ALOGE("getBufferAndFormat - null ABuffer");
+ ALOGE("getBufferAndFormat - null MediaCodecBuffer");
return INVALID_OPERATION;
}
@@ -938,26 +1258,22 @@
// we also don't want mOwnedByClient to change during this
Mutex::Autolock al(mBufferLock);
- Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
- if (index >= buffers->size()) {
+ std::vector<BufferInfo> &buffers = mPortBuffers[portIndex];
+ if (index >= buffers.size()) {
ALOGE("getBufferAndFormat - trying to get buffer with "
- "bad index (index=%zu buffer_size=%zu)", index, buffers->size());
+ "bad index (index=%zu buffer_size=%zu)", index, buffers.size());
return INVALID_OPERATION;
}
- const BufferInfo &info = buffers->itemAt(index);
+ const BufferInfo &info = buffers[index];
if (!info.mOwnedByClient) {
ALOGE("getBufferAndFormat - invalid operation "
"(the index %zu is not owned by client)", index);
return INVALID_OPERATION;
}
- // by the time buffers array is initialized, crypto is set
- *buffer = (portIndex == kPortIndexInput && mCrypto != NULL) ?
- info.mEncryptedData :
- info.mData;
-
- *format = info.mFormat;
+ *buffer = info.mData;
+ *format = info.mData->format();
return OK;
}
@@ -1046,8 +1362,8 @@
return false;
}
- const sp<ABuffer> &buffer =
- mPortBuffers[kPortIndexOutput].itemAt(index).mData;
+ const sp<MediaCodecBuffer> &buffer =
+ mPortBuffers[kPortIndexOutput][index].mData;
response->setSize("index", index);
response->setSize("offset", buffer->offset());
@@ -1058,19 +1374,8 @@
response->setInt64("timeUs", timeUs);
- int32_t omxFlags;
- CHECK(buffer->meta()->findInt32("omxFlags", &omxFlags));
-
- uint32_t flags = 0;
- if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) {
- flags |= BUFFER_FLAG_SYNCFRAME;
- }
- if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
- flags |= BUFFER_FLAG_CODECCONFIG;
- }
- if (omxFlags & OMX_BUFFERFLAG_EOS) {
- flags |= BUFFER_FLAG_EOS;
- }
+ int32_t flags;
+ CHECK(buffer->meta()->findInt32("flags", &flags));
response->setInt32("flags", flags);
response->postReply(replyID);
@@ -1087,7 +1392,7 @@
CHECK(msg->findInt32("what", &what));
switch (what) {
- case CodecBase::kWhatError:
+ case kWhatError:
{
int32_t err, actionCode;
CHECK(msg->findInt32("err", &err));
@@ -1123,14 +1428,16 @@
break;
}
- case STOPPING:
case RELEASING:
{
// Ignore the error, assuming we'll still get
- // the shutdown complete notification.
-
+ // the shutdown complete notification. If we
+ // don't, we'll timeout and force release.
sendErrorResponse = false;
-
+ }
+ // fall-thru
+ case STOPPING:
+ {
if (mFlags & kFlagSawMediaServerDie) {
// MediaServer died, there definitely won't
// be a shutdown complete notification after
@@ -1144,6 +1451,7 @@
mComponentName.clear();
}
(new AMessage)->postReply(mReplyID);
+ sendErrorResponse = false;
}
break;
}
@@ -1219,7 +1527,7 @@
break;
}
- case CodecBase::kWhatComponentAllocated:
+ case kWhatComponentAllocated:
{
CHECK_EQ(mState, INITIALIZING);
setState(INITIALIZED);
@@ -1227,6 +1535,10 @@
CHECK(msg->findString("componentName", &mComponentName));
+ if (mComponentName.c_str()) {
+ mAnalyticsItem->setCString(kCodecCodec, mComponentName.c_str());
+ }
+
if (mComponentName.startsWith("OMX.google.")) {
mFlags |= kFlagUsesSoftwareRenderer;
} else {
@@ -1237,9 +1549,11 @@
if (mComponentName.endsWith(".secure")) {
mFlags |= kFlagIsSecure;
resourceType = MediaResource::kSecureCodec;
+ mAnalyticsItem->setInt32(kCodecSecure, 1);
} else {
mFlags &= ~kFlagIsSecure;
resourceType = MediaResource::kNonSecureCodec;
+ mAnalyticsItem->setInt32(kCodecSecure, 0);
}
if (mIsVideo) {
@@ -1251,7 +1565,7 @@
break;
}
- case CodecBase::kWhatComponentConfigured:
+ case kWhatComponentConfigured:
{
if (mState == UNINITIALIZED || mState == INITIALIZED) {
// In case a kWhatError message came in and replied with error,
@@ -1280,7 +1594,7 @@
break;
}
- case CodecBase::kWhatInputSurfaceCreated:
+ case kWhatInputSurfaceCreated:
{
// response to initiateCreateInputSurface()
status_t err = NO_ERROR;
@@ -1304,12 +1618,14 @@
break;
}
- case CodecBase::kWhatInputSurfaceAccepted:
+ case kWhatInputSurfaceAccepted:
{
// response to initiateSetInputSurface()
status_t err = NO_ERROR;
sp<AMessage> response = new AMessage();
if (!msg->findInt32("err", &err)) {
+ CHECK(msg->findMessage("input-format", &mInputFormat));
+ CHECK(msg->findMessage("output-format", &mOutputFormat));
mHaveInputSurface = true;
} else {
response->setInt32("err", err);
@@ -1318,7 +1634,7 @@
break;
}
- case CodecBase::kWhatSignaledInputEOS:
+ case kWhatSignaledInputEOS:
{
// response to signalEndOfInputStream()
sp<AMessage> response = new AMessage;
@@ -1330,138 +1646,28 @@
break;
}
-
- case CodecBase::kWhatBuffersAllocated:
+ case kWhatStartCompleted:
{
- Mutex::Autolock al(mBufferLock);
- int32_t portIndex;
- CHECK(msg->findInt32("portIndex", &portIndex));
-
- ALOGV("%s buffers allocated",
- portIndex == kPortIndexInput ? "input" : "output");
-
- CHECK(portIndex == kPortIndexInput
- || portIndex == kPortIndexOutput);
-
- mPortBuffers[portIndex].clear();
-
- Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
-
- sp<RefBase> obj;
- CHECK(msg->findObject("portDesc", &obj));
-
- sp<CodecBase::PortDescription> portDesc =
- static_cast<CodecBase::PortDescription *>(obj.get());
-
- size_t numBuffers = portDesc->countBuffers();
-
- size_t totalSize = 0;
- for (size_t i = 0; i < numBuffers; ++i) {
- if (portIndex == kPortIndexInput && mCrypto != NULL) {
- totalSize += portDesc->bufferAt(i)->capacity();
- }
+ CHECK_EQ(mState, STARTING);
+ if (mIsVideo) {
+ addResource(
+ MediaResource::kGraphicMemory,
+ MediaResource::kUnspecifiedSubType,
+ getGraphicBufferSize());
}
-
- if (totalSize) {
- mDealer = new MemoryDealer(totalSize, "MediaCodec");
- }
-
- for (size_t i = 0; i < numBuffers; ++i) {
- BufferInfo info;
- info.mBufferID = portDesc->bufferIDAt(i);
- info.mOwnedByClient = false;
- info.mData = portDesc->bufferAt(i);
- info.mNativeHandle = portDesc->handleAt(i);
- info.mMemRef = portDesc->memRefAt(i);
-
- if (portIndex == kPortIndexInput && mCrypto != NULL) {
- sp<IMemory> mem = mDealer->allocate(info.mData->capacity());
- info.mEncryptedData =
- new ABuffer(mem->pointer(), info.mData->capacity());
- info.mSharedEncryptedBuffer = mem;
- }
-
- buffers->push_back(info);
- }
-
- if (portIndex == kPortIndexOutput) {
- if (mState == STARTING) {
- // We're always allocating output buffers after
- // allocating input buffers, so this is a good
- // indication that now all buffers are allocated.
- if (mIsVideo) {
- addResource(
- MediaResource::kGraphicMemory,
- MediaResource::kUnspecifiedSubType,
- getGraphicBufferSize());
- }
- setState(STARTED);
- (new AMessage)->postReply(mReplyID);
- } else {
- mFlags |= kFlagOutputBuffersChanged;
- postActivityNotificationIfPossible();
- }
- }
+ setState(STARTED);
+ (new AMessage)->postReply(mReplyID);
break;
}
- case CodecBase::kWhatOutputFormatChanged:
+ case kWhatOutputBuffersChanged:
{
- CHECK(msg->findMessage("format", &mOutputFormat));
-
- ALOGV("[%s] output format changed to: %s",
- mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
-
- if (mSoftRenderer == NULL &&
- mSurface != NULL &&
- (mFlags & kFlagUsesSoftwareRenderer)) {
- AString mime;
- CHECK(mOutputFormat->findString("mime", &mime));
-
- // TODO: propagate color aspects to software renderer to allow better
- // color conversion to RGB. For now, just mark dataspace for YUV
- // rendering.
- int32_t dataSpace;
- if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
- ALOGD("[%s] setting dataspace on output surface to #%x",
- mComponentName.c_str(), dataSpace);
- int err = native_window_set_buffers_data_space(
- mSurface.get(), (android_dataspace)dataSpace);
- ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
- }
-
- if (mime.startsWithIgnoreCase("video/")) {
- mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
- }
- }
-
- if (mFlags & kFlagIsEncoder) {
- // Before we announce the format change we should
- // collect codec specific data and amend the output
- // format as necessary.
- mFlags |= kFlagGatherCodecSpecificData;
- } else if (mFlags & kFlagIsAsync) {
- onOutputFormatChanged();
- } else {
- mFlags |= kFlagOutputFormatChanged;
- postActivityNotificationIfPossible();
- }
-
- // Notify mCrypto of video resolution changes
- if (mCrypto != NULL) {
- int32_t left, top, right, bottom, width, height;
- if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
- mCrypto->notifyResolution(right - left + 1, bottom - top + 1);
- } else if (mOutputFormat->findInt32("width", &width)
- && mOutputFormat->findInt32("height", &height)) {
- mCrypto->notifyResolution(width, height);
- }
- }
-
+ mFlags |= kFlagOutputBuffersChanged;
+ postActivityNotificationIfPossible();
break;
}
- case CodecBase::kWhatOutputFramesRendered:
+ case kWhatOutputFramesRendered:
{
// ignore these in all states except running, and check that we have a
// notification set
@@ -1473,7 +1679,7 @@
break;
}
- case CodecBase::kWhatFillThisBuffer:
+ case kWhatFillThisBuffer:
{
/* size_t index = */updateBuffers(kPortIndexInput, msg);
@@ -1528,7 +1734,7 @@
break;
}
- case CodecBase::kWhatDrainThisBuffer:
+ case kWhatDrainThisBuffer:
{
/* size_t index = */updateBuffers(kPortIndexOutput, msg);
@@ -1539,34 +1745,71 @@
break;
}
- sp<ABuffer> buffer;
- CHECK(msg->findBuffer("buffer", &buffer));
+ sp<RefBase> obj;
+ CHECK(msg->findObject("buffer", &obj));
+ sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
- int32_t omxFlags;
- CHECK(msg->findInt32("flags", &omxFlags));
+ if (mOutputFormat != buffer->format()) {
+ mOutputFormat = buffer->format();
+ ALOGV("[%s] output format changed to: %s",
+ mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
- buffer->meta()->setInt32("omxFlags", omxFlags);
+ if (mSoftRenderer == NULL &&
+ mSurface != NULL &&
+ (mFlags & kFlagUsesSoftwareRenderer)) {
+ AString mime;
+ CHECK(mOutputFormat->findString("mime", &mime));
- if (mFlags & kFlagGatherCodecSpecificData) {
- // This is the very first output buffer after a
- // format change was signalled, it'll either contain
- // the one piece of codec specific data we can expect
- // or there won't be codec specific data.
- if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
- status_t err =
- amendOutputFormatWithCodecSpecificData(buffer);
+ // TODO: propagate color aspects to software renderer to allow better
+ // color conversion to RGB. For now, just mark dataspace for YUV
+ // rendering.
+ int32_t dataSpace;
+ if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+ ALOGD("[%s] setting dataspace on output surface to #%x",
+ mComponentName.c_str(), dataSpace);
+ int err = native_window_set_buffers_data_space(
+ mSurface.get(), (android_dataspace)dataSpace);
+ ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
+ }
- if (err != OK) {
- ALOGE("Codec spit out malformed codec "
- "specific data!");
+ if (mime.startsWithIgnoreCase("video/")) {
+ mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
}
}
- mFlags &= ~kFlagGatherCodecSpecificData;
+ if (mFlags & kFlagIsEncoder) {
+ // Before we announce the format change we should
+ // collect codec specific data and amend the output
+ // format as necessary.
+ int32_t flags = 0;
+ (void) buffer->meta()->findInt32("flags", &flags);
+ if (flags & BUFFER_FLAG_CODECCONFIG) {
+ status_t err =
+ amendOutputFormatWithCodecSpecificData(buffer);
+
+ if (err != OK) {
+ ALOGE("Codec spit out malformed codec "
+ "specific data!");
+ }
+ }
+ }
+
if (mFlags & kFlagIsAsync) {
onOutputFormatChanged();
} else {
mFlags |= kFlagOutputFormatChanged;
+ postActivityNotificationIfPossible();
+ }
+
+ // Notify mCrypto of video resolution changes
+ if (mCrypto != NULL) {
+ int32_t left, top, right, bottom, width, height;
+ if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
+ mCrypto->notifyResolution(right - left + 1, bottom - top + 1);
+ } else if (mOutputFormat->findInt32("width", &width)
+ && mOutputFormat->findInt32("height", &height)) {
+ mCrypto->notifyResolution(width, height);
+ }
}
}
@@ -1585,22 +1828,33 @@
break;
}
- case CodecBase::kWhatEOS:
+ case kWhatEOS:
{
// We already notify the client of this by using the
// corresponding flag in "onOutputBufferReady".
break;
}
- case CodecBase::kWhatShutdownCompleted:
+ case kWhatStopCompleted:
{
- if (mState == STOPPING) {
- setState(INITIALIZED);
- } else {
- CHECK_EQ(mState, RELEASING);
- setState(UNINITIALIZED);
- mComponentName.clear();
+ if (mState != STOPPING) {
+ ALOGW("Received kWhatStopCompleted in state %d", mState);
+ break;
}
+ setState(INITIALIZED);
+ (new AMessage)->postReply(mReplyID);
+ break;
+ }
+
+ case kWhatReleaseCompleted:
+ {
+ if (mState != RELEASING) {
+ ALOGW("Received kWhatReleaseCompleted in state %d", mState);
+ break;
+ }
+ setState(UNINITIALIZED);
+ mComponentName.clear();
+
mFlags &= ~kFlagIsComponentAllocated;
mResourceManagerService->removeResource(getId(mResourceManagerClient));
@@ -1609,7 +1863,7 @@
break;
}
- case CodecBase::kWhatFlushCompleted:
+ case kWhatFlushCompleted:
{
if (mState != FLUSHING) {
ALOGW("received FlushCompleted message in state %d",
@@ -1751,7 +2005,22 @@
crypto = NULL;
}
+ ALOGV("kWhatConfigure: Old mCrypto: %p (%d)",
+ mCrypto.get(), (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+
mCrypto = static_cast<ICrypto *>(crypto);
+ mBufferChannel->setCrypto(mCrypto);
+
+ ALOGV("kWhatConfigure: New mCrypto: %p (%d)",
+ mCrypto.get(), (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+
+ void *descrambler;
+ if (!msg->findPointer("descrambler", &descrambler)) {
+ descrambler = NULL;
+ }
+
+ mDescrambler = static_cast<IDescrambler *>(descrambler);
+ mBufferChannel->setDescrambler(mDescrambler);
uint32_t flags;
CHECK(msg->findInt32("flags", (int32_t *)&flags));
@@ -1773,7 +2042,6 @@
CHECK(msg->senderAwaitsResponse(&replyID));
status_t err = OK;
- sp<Surface> surface;
switch (mState) {
case CONFIGURED:
@@ -1915,7 +2183,9 @@
}
}
- if (!((mFlags & kFlagIsComponentAllocated) && targetState == UNINITIALIZED) // See 1
+ bool isReleasingAllocatedComponent =
+ (mFlags & kFlagIsComponentAllocated) && targetState == UNINITIALIZED;
+ if (!isReleasingAllocatedComponent // See 1
&& mState != INITIALIZED
&& mState != CONFIGURED && !isExecuting()) {
// 1) Permit release to shut down the component if allocated.
@@ -1939,6 +2209,14 @@
break;
}
+ // If we're flushing, or we're stopping but received a release
+ // request, post the reply for the pending call first, and consider
+ // it done. The reply token will be replaced after this, and we'll
+ // no longer be able to reply.
+ if (mState == FLUSHING || mState == STOPPING) {
+ (new AMessage)->postReply(mReplyID);
+ }
+
if (mFlags & kFlagSawMediaServerDie) {
// It's dead, Jim. Don't expect initiateShutdown to yield
// any useful results now...
@@ -1950,6 +2228,15 @@
break;
}
+ // If we already have an error, component may not be able to
+ // complete the shutdown properly. If we're stopping, post the
+ // reply now with an error to unblock the client, client can
+ // release after the failure (instead of ANR).
+ if (msg->what() == kWhatStop && (mFlags & kFlagStickyError)) {
+ PostReplyWithError(replyID, getStickyError());
+ break;
+ }
+
mReplyID = replyID;
setState(msg->what() == kWhatStop ? STOPPING : RELEASING);
@@ -1961,6 +2248,7 @@
if (mSoftRenderer != NULL && (mFlags & kFlagPushBlankBuffersOnShutdown)) {
pushBlankBuffersToNativeWindow(mSurface.get());
}
+
break;
}
@@ -2123,7 +2411,7 @@
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- if (!isExecuting()) {
+ if (!isExecuting() || !mHaveInputSurface) {
PostReplyWithError(replyID, INVALID_OPERATION);
break;
} else if (mFlags & kFlagStickyError) {
@@ -2140,12 +2428,7 @@
{
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
- // Unfortunately widevine legacy source requires knowing all of the
- // codec input buffers, so we have to provide them even in async mode.
- int32_t widevine = 0;
- msg->findInt32("widevine", &widevine);
-
- if (!isExecuting() || ((mFlags & kFlagIsAsync) && !widevine)) {
+ if (!isExecuting() || (mFlags & kFlagIsAsync)) {
PostReplyWithError(replyID, INVALID_OPERATION);
break;
} else if (mFlags & kFlagStickyError) {
@@ -2156,7 +2439,7 @@
int32_t portIndex;
CHECK(msg->findInt32("portIndex", &portIndex));
- Vector<sp<ABuffer> > *dstBuffers;
+ Vector<sp<MediaCodecBuffer> > *dstBuffers;
CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
dstBuffers->clear();
@@ -2164,14 +2447,10 @@
// createInputSurface(), or persistent set by setInputSurface()),
// give the client an empty input buffers array.
if (portIndex != kPortIndexInput || !mHaveInputSurface) {
- const Vector<BufferInfo> &srcBuffers = mPortBuffers[portIndex];
-
- for (size_t i = 0; i < srcBuffers.size(); ++i) {
- const BufferInfo &info = srcBuffers.itemAt(i);
-
- dstBuffers->push_back(
- (portIndex == kPortIndexInput && mCrypto != NULL)
- ? info.mEncryptedData : info.mData);
+ if (portIndex == kPortIndexInput) {
+ mBufferChannel->getInputBufferArray(dstBuffers);
+ } else {
+ mBufferChannel->getOutputBufferArray(dstBuffers);
}
}
@@ -2272,6 +2551,12 @@
break;
}
+ case kWhatDrmReleaseCrypto:
+ {
+ onReleaseCrypto(msg);
+ break;
+ }
+
default:
TRESPASS();
}
@@ -2300,18 +2585,20 @@
status_t MediaCodec::queueCSDInputBuffer(size_t bufferIndex) {
CHECK(!mCSD.empty());
- const BufferInfo *info =
- &mPortBuffers[kPortIndexInput].itemAt(bufferIndex);
+ const BufferInfo &info = mPortBuffers[kPortIndexInput][bufferIndex];
sp<ABuffer> csd = *mCSD.begin();
mCSD.erase(mCSD.begin());
- const sp<ABuffer> &codecInputData =
- (mCrypto != NULL) ? info->mEncryptedData : info->mData;
+ const sp<MediaCodecBuffer> &codecInputData = info.mData;
if (csd->size() > codecInputData->capacity()) {
return -EINVAL;
}
+ if (codecInputData->data() == NULL) {
+ ALOGV("Input buffer %zu is not properly allocated", bufferIndex);
+ return -EINVAL;
+ }
memcpy(codecInputData->data(), csd->data(), csd->size());
@@ -2333,7 +2620,12 @@
delete mSoftRenderer;
mSoftRenderer = NULL;
+ if ( mCrypto != NULL ) {
+ ALOGV("setState: ~mCrypto: %p (%d)",
+ mCrypto.get(), (mCrypto != NULL ? mCrypto->getStrongCount() : 0));
+ }
mCrypto.clear();
+ mDescrambler.clear();
handleSetSurface(NULL);
mInputFormat.clear();
@@ -2342,7 +2634,6 @@
mFlags &= ~kFlagOutputBuffersChanged;
mFlags &= ~kFlagStickyError;
mFlags &= ~kFlagIsEncoder;
- mFlags &= ~kFlagGatherCodecSpecificData;
mFlags &= ~kFlagIsAsync;
mStickyError = OK;
@@ -2376,27 +2667,19 @@
CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
Mutex::Autolock al(mBufferLock);
- Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
+ for (size_t i = 0; i < mPortBuffers[portIndex].size(); ++i) {
+ BufferInfo *info = &mPortBuffers[portIndex][i];
- for (size_t i = 0; i < buffers->size(); ++i) {
- BufferInfo *info = &buffers->editItemAt(i);
-
- if (info->mNotify != NULL) {
- sp<AMessage> msg = info->mNotify;
- info->mNotify = NULL;
+ if (info->mData != nullptr) {
+ sp<MediaCodecBuffer> buffer = info->mData;
if (isReclaim && info->mOwnedByClient) {
ALOGD("port %d buffer %zu still owned by client when codec is reclaimed",
portIndex, i);
} else {
- info->mMemRef = NULL;
info->mOwnedByClient = false;
+ info->mData.clear();
}
-
- if (portIndex == kPortIndexInput) {
- /* no error, just returning buffers */
- msg->setInt32("err", OK);
- }
- msg->post();
+ mBufferChannel->discardBuffer(buffer);
}
}
@@ -2406,30 +2689,22 @@
size_t MediaCodec::updateBuffers(
int32_t portIndex, const sp<AMessage> &msg) {
CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+ size_t index;
+ CHECK(msg->findSize("index", &index));
+ sp<RefBase> obj;
+ CHECK(msg->findObject("buffer", &obj));
+ sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
- uint32_t bufferID;
- CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
-
- Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
-
- for (size_t i = 0; i < buffers->size(); ++i) {
- BufferInfo *info = &buffers->editItemAt(i);
-
- if (info->mBufferID == bufferID) {
- CHECK(info->mNotify == NULL);
- CHECK(msg->findMessage("reply", &info->mNotify));
-
- info->mFormat =
- (portIndex == kPortIndexInput) ? mInputFormat : mOutputFormat;
- mAvailPortBuffers[portIndex].push_back(i);
-
- return i;
+ {
+ Mutex::Autolock al(mBufferLock);
+ if (mPortBuffers[portIndex].size() <= index) {
+ mPortBuffers[portIndex].resize(align(index + 1, kNumBuffersAlign));
}
+ mPortBuffers[portIndex][index].mData = buffer;
}
+ mAvailPortBuffers[portIndex].push_back(index);
- TRESPASS();
-
- return 0;
+ return index;
}
status_t MediaCodec::onQueueInputBuffer(const sp<AMessage> &msg) {
@@ -2455,7 +2730,7 @@
CryptoPlugin::Pattern pattern;
if (msg->findSize("size", &size)) {
- if (mCrypto != NULL) {
+ if (hasCryptoOrDescrambler()) {
ss.mNumBytesOfClearData = size;
ss.mNumBytesOfEncryptedData = 0;
@@ -2467,7 +2742,9 @@
pattern.mSkipBlocks = 0;
}
} else {
- if (mCrypto == NULL) {
+ if (!hasCryptoOrDescrambler()) {
+ ALOGE("[%s] queuing secure buffer without mCrypto or mDescrambler!",
+ mComponentName.c_str());
return -EINVAL;
}
@@ -2494,9 +2771,9 @@
return -ERANGE;
}
- BufferInfo *info = &mPortBuffers[kPortIndexInput].editItemAt(index);
+ BufferInfo *info = &mPortBuffers[kPortIndexInput][index];
- if (info->mNotify == NULL || !info->mOwnedByClient) {
+ if (info->mData == nullptr || !info->mOwnedByClient) {
return -EACCES;
}
@@ -2504,10 +2781,8 @@
return -EINVAL;
}
- sp<AMessage> reply = info->mNotify;
info->mData->setRange(offset, size);
info->mData->meta()->setInt64("timeUs", timeUs);
-
if (flags & BUFFER_FLAG_EOS) {
info->mData->meta()->setInt32("eos", true);
}
@@ -2516,55 +2791,34 @@
info->mData->meta()->setInt32("csd", true);
}
- if (mCrypto != NULL) {
- if (size > info->mEncryptedData->capacity()) {
- return -ERANGE;
- }
-
+ sp<MediaCodecBuffer> buffer = info->mData;
+ status_t err = OK;
+ if (hasCryptoOrDescrambler()) {
AString *errorDetailMsg;
CHECK(msg->findPointer("errorDetailMsg", (void **)&errorDetailMsg));
- void *dst_pointer = info->mData->base();
- ICrypto::DestinationType dst_type = ICrypto::kDestinationTypeOpaqueHandle;
-
- if (info->mNativeHandle != NULL) {
- dst_pointer = (void *)info->mNativeHandle->handle();
- dst_type = ICrypto::kDestinationTypeNativeHandle;
- } else if ((mFlags & kFlagIsSecure) == 0) {
- dst_type = ICrypto::kDestinationTypeVmPointer;
- }
-
- ssize_t result = mCrypto->decrypt(
- dst_type,
+ err = mBufferChannel->queueSecureInputBuffer(
+ buffer,
+ (mFlags & kFlagIsSecure),
key,
iv,
mode,
pattern,
- info->mSharedEncryptedBuffer,
- offset,
subSamples,
numSubSamples,
- dst_pointer,
errorDetailMsg);
-
- if (result < 0) {
- return result;
- }
-
- info->mData->setRange(0, result);
+ } else {
+ err = mBufferChannel->queueInputBuffer(buffer);
}
- // synchronization boundary for getBufferAndFormat
- {
+ if (err == OK) {
+ // synchronization boundary for getBufferAndFormat
Mutex::Autolock al(mBufferLock);
info->mOwnedByClient = false;
+ info->mData.clear();
}
- reply->setBuffer("buffer", info->mData);
- reply->post();
- info->mNotify = NULL;
-
- return OK;
+ return err;
}
//static
@@ -2601,23 +2855,24 @@
return -ERANGE;
}
- BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(index);
+ BufferInfo *info = &mPortBuffers[kPortIndexOutput][index];
- if (info->mNotify == NULL || !info->mOwnedByClient) {
+ if (info->mData == nullptr || !info->mOwnedByClient) {
return -EACCES;
}
// synchronization boundary for getBufferAndFormat
+ sp<MediaCodecBuffer> buffer;
{
Mutex::Autolock al(mBufferLock);
info->mOwnedByClient = false;
+ buffer = info->mData;
+ info->mData.clear();
}
- if (render && info->mData != NULL && info->mData->size() != 0) {
- info->mNotify->setInt32("render", true);
-
+ if (render && buffer->size() != 0) {
int64_t mediaTimeUs = -1;
- info->mData->meta()->findInt64("timeUs", &mediaTimeUs);
+ buffer->meta()->findInt64("timeUs", &mediaTimeUs);
int64_t renderTimeNs = 0;
if (!msg->findInt64("timestampNs", &renderTimeNs)) {
@@ -2625,12 +2880,11 @@
ALOGV("using buffer PTS of %lld", (long long)mediaTimeUs);
renderTimeNs = mediaTimeUs * 1000;
}
- info->mNotify->setInt64("timestampNs", renderTimeNs);
if (mSoftRenderer != NULL) {
std::list<FrameRenderTracker::Info> doneFrames = mSoftRenderer->render(
- info->mData->data(), info->mData->size(),
- mediaTimeUs, renderTimeNs, NULL, info->mFormat);
+ buffer->data(), buffer->size(),
+ mediaTimeUs, renderTimeNs, NULL, buffer->format());
// if we are running, notify rendered frames
if (!doneFrames.empty() && mState == STARTED && mOnFrameRenderedNotification != NULL) {
@@ -2642,11 +2896,11 @@
}
}
}
+ mBufferChannel->renderOutputBuffer(buffer, renderTimeNs);
+ } else {
+ mBufferChannel->discardBuffer(buffer);
}
- info->mNotify->post();
- info->mNotify = NULL;
-
return OK;
}
@@ -2662,20 +2916,20 @@
size_t index = *availBuffers->begin();
availBuffers->erase(availBuffers->begin());
- BufferInfo *info = &mPortBuffers[portIndex].editItemAt(index);
+ BufferInfo *info = &mPortBuffers[portIndex][index];
CHECK(!info->mOwnedByClient);
{
Mutex::Autolock al(mBufferLock);
info->mOwnedByClient = true;
// set image-data
- if (info->mFormat != NULL) {
+ if (info->mData->format() != NULL) {
sp<ABuffer> imageData;
- if (info->mFormat->findBuffer("image-data", &imageData)) {
+ if (info->mData->format()->findBuffer("image-data", &imageData)) {
info->mData->meta()->setBuffer("image-data", imageData);
}
int32_t left, top, right, bottom;
- if (info->mFormat->findRect("crop", &left, &top, &right, &bottom)) {
+ if (info->mData->format()->findRect("crop", &left, &top, &right, &bottom)) {
info->mData->meta()->setRect("crop-rect", left, top, right, bottom);
}
}
@@ -2696,7 +2950,7 @@
return ALREADY_EXISTS;
}
- err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+ err = nativeWindowConnect(surface.get(), "connectToSurface");
if (err == OK) {
// Require a fresh set of buffers after each connect by using a unique generation
// number. Rely on the fact that max supported process id by Linux is 2^22.
@@ -2711,12 +2965,12 @@
// This is needed as the consumer may be holding onto stale frames that it can reattach
// to this surface after disconnect/connect, and those free frames would inherit the new
// generation number. Disconnecting after setting a unique generation prevents this.
- native_window_api_disconnect(surface.get(), NATIVE_WINDOW_API_MEDIA);
- err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+ nativeWindowDisconnect(surface.get(), "connectToSurface(reconnect)");
+ err = nativeWindowConnect(surface.get(), "connectToSurface(reconnect)");
}
if (err != OK) {
- ALOGE("native_window_api_connect returned an error: %s (%d)", strerror(-err), err);
+ ALOGE("nativeWindowConnect returned an error: %s (%d)", strerror(-err), err);
}
}
// do not return ALREADY_EXISTS unless surfaces are the same
@@ -2728,9 +2982,9 @@
if (mSurface != NULL) {
// Resetting generation is not technically needed, but there is no need to keep it either
mSurface->setGenerationNumber(0);
- err = native_window_api_disconnect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+ err = nativeWindowDisconnect(mSurface.get(), "disconnectFromSurface");
if (err != OK) {
- ALOGW("native_window_api_disconnect returned an error: %s (%d)", strerror(-err), err);
+ ALOGW("nativeWindowDisconnect returned an error: %s (%d)", strerror(-err), err);
}
// assume disconnected even on error
mSurface.clear();
@@ -2765,8 +3019,8 @@
void MediaCodec::onOutputBufferAvailable() {
int32_t index;
while ((index = dequeuePortBuffer(kPortIndexOutput)) >= 0) {
- const sp<ABuffer> &buffer =
- mPortBuffers[kPortIndexOutput].itemAt(index).mData;
+ const sp<MediaCodecBuffer> &buffer =
+ mPortBuffers[kPortIndexOutput][index].mData;
sp<AMessage> msg = mCallback->dup();
msg->setInt32("callbackID", CB_OUTPUT_AVAILABLE);
msg->setInt32("index", index);
@@ -2778,19 +3032,8 @@
msg->setInt64("timeUs", timeUs);
- int32_t omxFlags;
- CHECK(buffer->meta()->findInt32("omxFlags", &omxFlags));
-
- uint32_t flags = 0;
- if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) {
- flags |= BUFFER_FLAG_SYNCFRAME;
- }
- if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
- flags |= BUFFER_FLAG_CODECCONFIG;
- }
- if (omxFlags & OMX_BUFFERFLAG_EOS) {
- flags |= BUFFER_FLAG_EOS;
- }
+ int32_t flags;
+ CHECK(buffer->meta()->findInt32("flags", &flags));
msg->setInt32("flags", flags);
@@ -2822,7 +3065,6 @@
}
}
-
void MediaCodec::postActivityNotificationIfPossible() {
if (mActivityNotify == NULL) {
return;
@@ -2866,7 +3108,7 @@
}
status_t MediaCodec::amendOutputFormatWithCodecSpecificData(
- const sp<ABuffer> &buffer) {
+ const sp<MediaCodecBuffer> &buffer) {
AString mime;
CHECK(mOutputFormat->findString("mime", &mime));
@@ -2900,7 +3142,10 @@
} else {
// For everything else we just stash the codec specific data into
// the output format as a single piece of csd under "csd-0".
- mOutputFormat->setBuffer("csd-0", buffer);
+ sp<ABuffer> csd = new ABuffer(buffer->size());
+ memcpy(csd->data(), buffer->data(), buffer->size());
+ csd->setRange(0, buffer->size());
+ mOutputFormat->setBuffer("csd-0", csd);
}
return OK;
@@ -2912,10 +3157,10 @@
}
if (mState == CONFIGURED && !mBatteryStatNotified) {
- BatteryNotifier::getInstance().noteStartVideo();
+ BatteryNotifier::getInstance().noteStartVideo(mUid);
mBatteryStatNotified = true;
} else if (mState == UNINITIALIZED && mBatteryStatNotified) {
- BatteryNotifier::getInstance().noteStopVideo();
+ BatteryNotifier::getInstance().noteStopVideo(mUid);
mBatteryStatNotified = false;
}
}
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 23d49f0..1dcba29 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -44,8 +44,6 @@
namespace android {
-const char *kMaxEncoderInputBuffers = "max-video-encoder-input-buffers";
-
static Mutex sInitMutex;
static bool parseBoolean(const char *s) {
@@ -171,12 +169,41 @@
return sRemoteList;
}
+// Treblized media codec list will be located in /odm/etc or /vendor/etc.
+static const char *kConfigLocationList[] =
+ {"/odm/etc", "/vendor/etc", "/etc"};
+static const int kConfigLocationListSize =
+ (sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
+
+#define MEDIA_CODECS_CONFIG_FILE_PATH_MAX_LENGTH 128
+
+static bool findMediaCodecListFileFullPath(const char *file_name, char *out_path) {
+ for (int i = 0; i < kConfigLocationListSize; i++) {
+ snprintf(out_path,
+ MEDIA_CODECS_CONFIG_FILE_PATH_MAX_LENGTH,
+ "%s/%s",
+ kConfigLocationList[i],
+ file_name);
+ struct stat file_stat;
+ if (stat(out_path, &file_stat) == 0 && S_ISREG(file_stat.st_mode)) {
+ return true;
+ }
+ }
+ return false;
+}
+
MediaCodecList::MediaCodecList()
: mInitCheck(NO_INIT),
mUpdate(false),
mGlobalSettings(new AMessage()) {
- parseTopLevelXMLFile("/etc/media_codecs.xml");
- parseTopLevelXMLFile("/etc/media_codecs_performance.xml", true/* ignore_errors */);
+ char config_file_path[MEDIA_CODECS_CONFIG_FILE_PATH_MAX_LENGTH];
+ if (findMediaCodecListFileFullPath("media_codecs.xml", config_file_path)) {
+ parseTopLevelXMLFile(config_file_path);
+ }
+ if (findMediaCodecListFileFullPath("media_codecs_performance.xml",
+ config_file_path)) {
+ parseTopLevelXMLFile(config_file_path, true/* ignore_errors */);
+ }
parseTopLevelXMLFile(kProfilingResults, true/* ignore_errors */);
}
@@ -196,9 +223,7 @@
if (mInitCheck != OK) {
return; // this may fail if IMediaPlayerService is not available.
}
- mOMX = client.interface();
parseXMLFile(codecs_xml);
- mOMX.clear();
if (mInitCheck != OK) {
if (ignore_errors) {
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index 33795f3..095fc6a 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -127,61 +127,6 @@
return format;
}
-static size_t doProfileEncoderInputBuffers(
- const AString &name, const AString &mime, const sp<MediaCodecInfo::Capabilities> &caps) {
- ALOGV("doProfileEncoderInputBuffers: name %s, mime %s", name.c_str(), mime.c_str());
-
- sp<AMessage> format = getMeasureFormat(true /* isEncoder */, mime, caps);
- if (format == NULL) {
- return 0;
- }
-
- format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
- ALOGV("doProfileEncoderInputBuffers: format %s", format->debugString().c_str());
-
- status_t err = OK;
- sp<ALooper> looper = new ALooper;
- looper->setName("MediaCodec_looper");
- looper->start(
- false /* runOnCallingThread */, false /* canCallJava */, ANDROID_PRIORITY_AUDIO);
-
- sp<MediaCodec> codec = MediaCodec::CreateByComponentName(looper, name.c_str(), &err);
- if (err != OK) {
- ALOGE("Failed to create codec: %s", name.c_str());
- return 0;
- }
-
- err = codec->configure(format, NULL, NULL, MediaCodec::CONFIGURE_FLAG_ENCODE);
- if (err != OK) {
- ALOGE("Failed to configure codec: %s with mime: %s", name.c_str(), mime.c_str());
- codec->release();
- return 0;
- }
-
- sp<IGraphicBufferProducer> bufferProducer;
- err = codec->createInputSurface(&bufferProducer);
- if (err != OK) {
- ALOGE("Failed to create surface: %s with mime: %s", name.c_str(), mime.c_str());
- codec->release();
- return 0;
- }
-
- int minUndequeued = 0;
- err = bufferProducer->query(
- NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeued);
- if (err != OK) {
- ALOGE("Failed to query NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS");
- minUndequeued = 0;
- }
-
- err = codec->release();
- if (err != OK) {
- ALOGW("Failed to release codec: %s with mime: %s", name.c_str(), mime.c_str());
- }
-
- return minUndequeued;
-}
-
static size_t doProfileCodecs(
bool isEncoder, const AString &name, const AString &mime, const sp<MediaCodecInfo::Capabilities> &caps) {
sp<AMessage> format = getMeasureFormat(isEncoder, mime, caps);
@@ -276,7 +221,6 @@
bool forceToMeasure) {
KeyedVector<AString, sp<MediaCodecInfo::Capabilities>> codecsNeedMeasure;
AString supportMultipleSecureCodecs = "true";
- size_t maxEncoderInputBuffers = 0;
for (size_t i = 0; i < infos.size(); ++i) {
const sp<MediaCodecInfo> info = infos[i];
AString name = info->getCodecName();
@@ -319,21 +263,9 @@
supportMultipleSecureCodecs = "false";
}
}
- if (info->isEncoder() && mimes[i].startsWith("video/")) {
- size_t encoderInputBuffers =
- doProfileEncoderInputBuffers(name, mimes[i], caps);
- if (encoderInputBuffers > maxEncoderInputBuffers) {
- maxEncoderInputBuffers = encoderInputBuffers;
- }
- }
}
}
}
- if (maxEncoderInputBuffers > 0) {
- char tmp[32];
- sprintf(tmp, "%zu", maxEncoderInputBuffers);
- global_results->add(kMaxEncoderInputBuffers, tmp);
- }
global_results->add(kPolicySupportsMultipleSecureCodecs, supportMultipleSecureCodecs);
}
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 35c07ca..bb20850 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -20,10 +20,10 @@
#include <inttypes.h>
-#include <gui/IGraphicBufferConsumer.h>
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -35,7 +35,6 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/Utils.h>
namespace android {
@@ -325,10 +324,10 @@
const sp<ALooper> &looper,
const sp<AMessage> &format,
const sp<MediaSource> &source,
- const sp<IGraphicBufferConsumer> &consumer,
+ const sp<PersistentSurface> &persistentSurface,
uint32_t flags) {
- sp<MediaCodecSource> mediaSource =
- new MediaCodecSource(looper, format, source, consumer, flags);
+ sp<MediaCodecSource> mediaSource = new MediaCodecSource(
+ looper, format, source, persistentSurface, flags);
if (mediaSource->init() == OK) {
return mediaSource;
@@ -364,8 +363,20 @@
return postSynchronouslyAndReturnError(msg);
}
-status_t MediaCodecSource::pause() {
- (new AMessage(kWhatPause, mReflector))->post();
+
+status_t MediaCodecSource::setStopStimeUs(int64_t stopTimeUs) {
+ if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+ return OK;
+ }
+ sp<AMessage> msg = new AMessage(kWhatSetStopTimeOffset, mReflector);
+ msg->setInt64("stop-time-us", stopTimeUs);
+ return postSynchronouslyAndReturnError(msg);
+}
+
+status_t MediaCodecSource::pause(MetaData* params) {
+ sp<AMessage> msg = new AMessage(kWhatPause, mReflector);
+ msg->setObject("meta", params);
+ msg->post();
return OK;
}
@@ -404,7 +415,7 @@
const sp<ALooper> &looper,
const sp<AMessage> &outputFormat,
const sp<MediaSource> &source,
- const sp<IGraphicBufferConsumer> &consumer,
+ const sp<PersistentSurface> &persistentSurface,
uint32_t flags)
: mLooper(looper),
mOutputFormat(outputFormat),
@@ -417,7 +428,7 @@
mSetEncoderFormat(false),
mEncoderFormat(0),
mEncoderDataSpace(0),
- mGraphicBufferConsumer(consumer),
+ mPersistentSurface(persistentSurface),
mInputBufferTimeOffsetUs(0),
mFirstSampleSystemTimeUs(-1ll),
mPausePending(false),
@@ -514,12 +525,11 @@
if (mFlags & FLAG_USE_SURFACE_INPUT) {
CHECK(mIsVideo);
- if (mGraphicBufferConsumer != NULL) {
+ if (mPersistentSurface != NULL) {
// When using persistent surface, we are only interested in the
// consumer, but have to use PersistentSurface as a wrapper to
// pass consumer over messages (similar to BufferProducerWrapper)
- err = mEncoder->setInputSurface(
- new PersistentSurface(NULL, mGraphicBufferConsumer));
+ err = mEncoder->setInputSurface(mPersistentSurface);
} else {
err = mEncoder->createInputSurface(&mGraphicBufferProducer);
}
@@ -626,22 +636,13 @@
}
}
-void MediaCodecSource::suspend() {
- CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
- if (mEncoder != NULL) {
- sp<AMessage> params = new AMessage;
- params->setInt32("drop-input-frames", true);
- mEncoder->setParameters(params);
- }
-}
-
-void MediaCodecSource::resume(int64_t skipFramesBeforeUs) {
+void MediaCodecSource::resume(int64_t resumeStartTimeUs) {
CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
if (mEncoder != NULL) {
sp<AMessage> params = new AMessage;
params->setInt32("drop-input-frames", false);
- if (skipFramesBeforeUs > 0) {
- params->setInt64("skip-frames-before", skipFramesBeforeUs);
+ if (resumeStartTimeUs > 0) {
+ params->setInt64("drop-start-time-us", resumeStartTimeUs);
}
mEncoder->setParameters(params);
}
@@ -663,7 +664,7 @@
mFirstSampleSystemTimeUs = systemTime() / 1000;
if (mPausePending) {
mPausePending = false;
- onPause();
+ onPause(mFirstSampleSystemTimeUs);
mbuf->release();
mAvailEncoderInputIndices.push_back(bufferIndex);
return OK;
@@ -689,9 +690,11 @@
#endif // DEBUG_DRIFT_TIME
}
- sp<ABuffer> inbuf;
+ sp<MediaCodecBuffer> inbuf;
status_t err = mEncoder->getInputBuffer(bufferIndex, &inbuf);
- if (err != OK || inbuf == NULL) {
+
+ if (err != OK || inbuf == NULL || inbuf->data() == NULL
+ || mbuf->data() == NULL || mbuf->size() == 0) {
mbuf->release();
signalEOS();
break;
@@ -728,6 +731,10 @@
ALOGE("Failed to start while we're stopping");
return INVALID_OPERATION;
}
+ int64_t startTimeUs;
+ if (params == NULL || !params->findInt64(kKeyTime, &startTimeUs)) {
+ startTimeUs = -1ll;
+ }
if (mStarted) {
ALOGI("MediaCodecSource (%s) resuming", mIsVideo ? "video" : "audio");
@@ -739,7 +746,7 @@
mEncoder->requestIDRFrame();
}
if (mFlags & FLAG_USE_SURFACE_INPUT) {
- resume();
+ resume(startTimeUs);
} else {
CHECK(mPuller != NULL);
mPuller->resume();
@@ -752,11 +759,14 @@
status_t err = OK;
if (mFlags & FLAG_USE_SURFACE_INPUT) {
- int64_t startTimeUs;
- if (!params || !params->findInt64(kKeyTime, &startTimeUs)) {
- startTimeUs = -1ll;
+ if (mEncoder != NULL) {
+ sp<AMessage> params = new AMessage;
+ params->setInt32("drop-input-frames", false);
+ if (startTimeUs >= 0) {
+ params->setInt64("skip-frames-before", startTimeUs);
+ }
+ mEncoder->setParameters(params);
}
- resume(startTimeUs);
} else {
CHECK(mPuller != NULL);
sp<MetaData> meta = params;
@@ -781,9 +791,12 @@
return OK;
}
-void MediaCodecSource::onPause() {
- if (mFlags & FLAG_USE_SURFACE_INPUT) {
- suspend();
+void MediaCodecSource::onPause(int64_t pauseStartTimeUs) {
+ if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
+ sp<AMessage> params = new AMessage;
+ params->setInt32("drop-input-frames", true);
+ params->setInt64("drop-start-time-us", pauseStartTimeUs);
+ mEncoder->setParameters(params);
} else {
CHECK(mPuller != NULL);
mPuller->pause();
@@ -851,9 +864,10 @@
break;
}
- sp<ABuffer> outbuf;
+ sp<MediaCodecBuffer> outbuf;
status_t err = mEncoder->getOutputBuffer(index, &outbuf);
- if (err != OK || outbuf == NULL) {
+ if (err != OK || outbuf == NULL || outbuf->data() == NULL
+ || outbuf->size() == 0) {
signalEOS();
break;
}
@@ -870,7 +884,7 @@
mFirstSampleSystemTimeUs = systemTime() / 1000;
if (mPausePending) {
mPausePending = false;
- onPause();
+ onPause(mFirstSampleSystemTimeUs);
mbuf->release();
break;
}
@@ -906,6 +920,7 @@
}
mbuf->meta_data()->setInt64(kKeyTime, timeUs);
} else {
+ mbuf->meta_data()->setInt64(kKeyTime, 0ll);
mbuf->meta_data()->setInt32(kKeyIsCodecConfig, true);
}
if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
@@ -925,6 +940,10 @@
CHECK(msg->findInt32("err", &err));
ALOGE("Encoder (%s) reported error : 0x%x",
mIsVideo ? "video" : "audio", err);
+ if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+ mStopping = true;
+ mPuller->stop();
+ }
signalEOS();
}
break;
@@ -998,6 +1017,7 @@
ALOGV("source (%s) stopped", mIsVideo ? "video" : "audio");
}
signalEOS();
+ break;
}
case kWhatPause:
@@ -1005,7 +1025,14 @@
if (mFirstSampleSystemTimeUs < 0) {
mPausePending = true;
} else {
- onPause();
+ sp<RefBase> obj;
+ CHECK(msg->findObject("meta", &obj));
+ MetaData *params = static_cast<MetaData *>(obj.get());
+ int64_t pauseStartTimeUs = -1;
+ if (params == NULL || !params->findInt64(kKeyTime, &pauseStartTimeUs)) {
+ pauseStartTimeUs = -1ll;
+ }
+ onPause(pauseStartTimeUs);
}
break;
}
@@ -1017,7 +1044,7 @@
CHECK(msg->findInt64("time-offset-us", &mInputBufferTimeOffsetUs));
// Propagate the timestamp offset to GraphicBufferSource.
- if (mIsVideo) {
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
sp<AMessage> params = new AMessage;
params->setInt64("time-offset-us", mInputBufferTimeOffsetUs);
err = mEncoder->setParameters(params);
@@ -1028,6 +1055,26 @@
response->postReply(replyID);
break;
}
+ case kWhatSetStopTimeOffset:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ status_t err = OK;
+ int64_t stopTimeUs;
+ CHECK(msg->findInt64("stop-time-us", &stopTimeUs));
+
+ // Propagate the timestamp offset to GraphicBufferSource.
+ if (mFlags & FLAG_USE_SURFACE_INPUT) {
+ sp<AMessage> params = new AMessage;
+ params->setInt64("stop-time-us", stopTimeUs);
+ err = mEncoder->setParameters(params);
+ }
+
+ sp<AMessage> response = new AMessage;
+ response->setInt32("err", err);
+ response->postReply(replyID);
+ break;
+ }
case kWhatGetFirstSampleSystemTimeUs:
{
sp<AReplyToken> replyID;
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 92ce88c..c91c82b 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -27,8 +27,6 @@
#include "include/OggExtractor.h"
#include "include/MPEG2PSExtractor.h"
#include "include/MPEG2TSExtractor.h"
-#include "include/DRMExtractor.h"
-#include "include/WVMExtractor.h"
#include "include/FLACExtractor.h"
#include "include/AACExtractor.h"
#include "include/MidiExtractor.h"
@@ -38,6 +36,7 @@
#include <binder/IServiceManager.h>
#include <binder/MemoryDealer.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/DataSource.h>
@@ -49,136 +48,92 @@
#include <utils/String8.h>
#include <private/android_filesystem_config.h>
+// still doing some on/off toggling here.
+#define MEDIA_LOG 1
+
namespace android {
-MediaExtractor::MediaExtractor():
- mIsDrm(false) {
+// key for media statistics
+static const char *kKeyExtractor = "extractor";
+// attrs for media statistics
+static const char *kExtractorMime = "android.media.mediaextractor.mime";
+static const char *kExtractorTracks = "android.media.mediaextractor.ntrk";
+static const char *kExtractorFormat = "android.media.mediaextractor.fmt";
+
+MediaExtractor::MediaExtractor() {
if (!LOG_NDEBUG) {
uid_t uid = getuid();
struct passwd *pw = getpwuid(uid);
ALOGI("extractor created in uid: %d (%s)", getuid(), pw->pw_name);
}
+ mAnalyticsItem = NULL;
+ if (MEDIA_LOG) {
+ mAnalyticsItem = new MediaAnalyticsItem(kKeyExtractor);
+ (void) mAnalyticsItem->generateSessionID();
+ }
}
+MediaExtractor::~MediaExtractor() {
+
+ // log the current record, provided it has some information worth recording
+ if (MEDIA_LOG) {
+ if (mAnalyticsItem != NULL) {
+ if (mAnalyticsItem->count() > 0) {
+ mAnalyticsItem->setFinalized(true);
+ mAnalyticsItem->selfrecord();
+ }
+ }
+ }
+ if (mAnalyticsItem != NULL) {
+ delete mAnalyticsItem;
+ mAnalyticsItem = NULL;
+ }
+}
sp<MetaData> MediaExtractor::getMetaData() {
return new MetaData;
}
+status_t MediaExtractor::getMetrics(Parcel *reply) {
+
+ if (mAnalyticsItem == NULL || reply == NULL) {
+ return UNKNOWN_ERROR;
+ }
+
+ populateMetrics();
+ mAnalyticsItem->writeToParcel(reply);
+
+ return OK;
+}
+
+void MediaExtractor::populateMetrics() {
+ ALOGV("MediaExtractor::populateMetrics");
+ // normally overridden in subclasses
+}
+
uint32_t MediaExtractor::flags() const {
return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_PAUSE | CAN_SEEK;
}
-
-
-class RemoteDataSource : public BnDataSource {
-public:
- enum {
- kBufferSize = 64 * 1024,
- };
-
- static sp<IDataSource> wrap(const sp<DataSource> &source);
- virtual ~RemoteDataSource();
-
- virtual sp<IMemory> getIMemory();
- virtual ssize_t readAt(off64_t offset, size_t size);
- virtual status_t getSize(off64_t* size);
- virtual void close();
- virtual uint32_t getFlags();
- virtual String8 toString();
- virtual sp<DecryptHandle> DrmInitialization(const char *mime);
-
-private:
- sp<IMemory> mMemory;
- sp<DataSource> mSource;
- String8 mName;
- RemoteDataSource(const sp<DataSource> &source);
- DISALLOW_EVIL_CONSTRUCTORS(RemoteDataSource);
-};
-
-
-sp<IDataSource> RemoteDataSource::wrap(const sp<DataSource> &source) {
- return new RemoteDataSource(source);
-}
-RemoteDataSource::RemoteDataSource(const sp<DataSource> &source) {
- mSource = source;
- sp<MemoryDealer> memoryDealer = new MemoryDealer(kBufferSize, "RemoteDataSource");
- mMemory = memoryDealer->allocate(kBufferSize);
- if (mMemory == NULL) {
- ALOGE("Failed to allocate memory!");
- }
- mName = String8::format("RemoteDataSource(%s)", mSource->toString().string());
-}
-RemoteDataSource::~RemoteDataSource() {
- close();
-}
-sp<IMemory> RemoteDataSource::getIMemory() {
- return mMemory;
-}
-ssize_t RemoteDataSource::readAt(off64_t offset, size_t size) {
- ALOGV("readAt(%" PRId64 ", %zu)", offset, size);
- return mSource->readAt(offset, mMemory->pointer(), size);
-}
-status_t RemoteDataSource::getSize(off64_t* size) {
- return mSource->getSize(size);
-}
-void RemoteDataSource::close() {
- mSource = NULL;
-}
-uint32_t RemoteDataSource::getFlags() {
- return mSource->flags();
-}
-
-String8 RemoteDataSource::toString() {
- return mName;
-}
-
-sp<DecryptHandle> RemoteDataSource::DrmInitialization(const char *mime) {
- return mSource->DrmInitialization(mime);
-}
-
// static
sp<IMediaExtractor> MediaExtractor::Create(
const sp<DataSource> &source, const char *mime) {
ALOGV("MediaExtractor::Create %s", mime);
- char value[PROPERTY_VALUE_MAX];
- if (property_get("media.stagefright.extractremote", value, NULL)
- && (!strcmp("0", value) || !strcasecmp("false", value))) {
+ if (!property_get_bool("media.stagefright.extractremote", true)) {
// local extractor
ALOGW("creating media extractor in calling process");
return CreateFromService(source, mime);
} else {
- // Check if it's WVM, since WVMExtractor needs to be created in the media server process,
- // not the extractor process.
- String8 mime8;
- float confidence;
- sp<AMessage> meta;
- if (SniffWVM(source, &mime8, &confidence, &meta) &&
- !strcasecmp(mime8, MEDIA_MIMETYPE_CONTAINER_WVM)) {
- return new WVMExtractor(source);
- }
-
- // Check if it's es-based DRM, since DRMExtractor needs to be created in the media server
- // process, not the extractor process.
- if (SniffDRM(source, &mime8, &confidence, &meta)) {
- const char *drmMime = mime8.string();
- ALOGV("Detected media content as '%s' with confidence %.2f", drmMime, confidence);
- if (!strncmp(drmMime, "drm+es_based+", 13)) {
- // DRMExtractor sets container metadata kKeyIsDRM to 1
- return new DRMExtractor(source, drmMime + 14);
- }
- }
-
// remote extractor
ALOGV("get service manager");
sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
if (binder != 0) {
sp<IMediaExtractorService> mediaExService(interface_cast<IMediaExtractorService>(binder));
- sp<IMediaExtractor> ex = mediaExService->makeExtractor(RemoteDataSource::wrap(source), mime);
+ sp<IMediaExtractor> ex = mediaExService->makeExtractor(source->asIDataSource(), mime);
return ex;
} else {
ALOGE("extractor service not running");
@@ -192,15 +147,18 @@
const sp<DataSource> &source, const char *mime) {
ALOGV("MediaExtractor::CreateFromService %s", mime);
- DataSource::RegisterDefaultSniffers();
+ RegisterDefaultSniffers();
+
+ // initialize source decryption if needed
+ source->DrmInitialization(nullptr /* mime */);
sp<AMessage> meta;
String8 tmp;
if (mime == NULL) {
float confidence;
- if (!source->sniff(&tmp, &confidence, &meta)) {
- ALOGV("FAILED to autodetect media content.");
+ if (!sniff(source, &tmp, &confidence, &meta)) {
+ ALOGW("FAILED to autodetect media content.");
return NULL;
}
@@ -210,28 +168,6 @@
mime, confidence);
}
- bool isDrm = false;
- // DRM MIME type syntax is "drm+type+original" where
- // type is "es_based" or "container_based" and
- // original is the content's cleartext MIME type
- if (!strncmp(mime, "drm+", 4)) {
- const char *originalMime = strchr(mime+4, '+');
- if (originalMime == NULL) {
- // second + not found
- return NULL;
- }
- ++originalMime;
- if (!strncmp(mime, "drm+es_based+", 13)) {
- // DRMExtractor sets container metadata kKeyIsDRM to 1
- return new DRMExtractor(source, originalMime);
- } else if (!strncmp(mime, "drm+container_based+", 20)) {
- mime = originalMime;
- isDrm = true;
- } else {
- return NULL;
- }
- }
-
MediaExtractor *ret = NULL;
if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
|| !strcasecmp(mime, "audio/mp4")) {
@@ -251,9 +187,6 @@
ret = new MatroskaExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
ret = new MPEG2TSExtractor(source);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM) && getuid() == AID_MEDIA) {
- // Return now. WVExtractor should not have the DrmFlag set in the block below.
- return new WVMExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) {
ret = new AACExtractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
@@ -263,14 +196,100 @@
}
if (ret != NULL) {
- if (isDrm) {
- ret->setDrmFlag(true);
- } else {
- ret->setDrmFlag(false);
+ // track the container format (mpeg, aac, wvm, etc)
+ if (MEDIA_LOG) {
+ if (ret->mAnalyticsItem != NULL) {
+ size_t ntracks = ret->countTracks();
+ ret->mAnalyticsItem->setCString(kExtractorFormat, ret->name());
+ // tracks (size_t)
+ ret->mAnalyticsItem->setInt32(kExtractorTracks, ntracks);
+ // metadata
+ sp<MetaData> pMetaData = ret->getMetaData();
+ if (pMetaData != NULL) {
+ String8 xx = pMetaData->toString();
+ // 'titl' -- but this verges into PII
+ // 'mime'
+ const char *mime = NULL;
+ if (pMetaData->findCString(kKeyMIMEType, &mime)) {
+ ret->mAnalyticsItem->setCString(kExtractorMime, mime);
+ }
+ // what else is interesting and not already available?
+ }
+ }
}
}
return ret;
}
+Mutex MediaExtractor::gSnifferMutex;
+List<MediaExtractor::SnifferFunc> MediaExtractor::gSniffers;
+bool MediaExtractor::gSniffersRegistered = false;
+
+// static
+bool MediaExtractor::sniff(
+ const sp<DataSource> &source, String8 *mimeType, float *confidence, sp<AMessage> *meta) {
+ *mimeType = "";
+ *confidence = 0.0f;
+ meta->clear();
+
+ {
+ Mutex::Autolock autoLock(gSnifferMutex);
+ if (!gSniffersRegistered) {
+ return false;
+ }
+ }
+
+ for (List<SnifferFunc>::iterator it = gSniffers.begin();
+ it != gSniffers.end(); ++it) {
+ String8 newMimeType;
+ float newConfidence;
+ sp<AMessage> newMeta;
+ if ((*it)(source, &newMimeType, &newConfidence, &newMeta)) {
+ if (newConfidence > *confidence) {
+ *mimeType = newMimeType;
+ *confidence = newConfidence;
+ *meta = newMeta;
+ }
+ }
+ }
+
+ return *confidence > 0.0;
+}
+
+// static
+void MediaExtractor::RegisterSniffer_l(SnifferFunc func) {
+ for (List<SnifferFunc>::iterator it = gSniffers.begin();
+ it != gSniffers.end(); ++it) {
+ if (*it == func) {
+ return;
+ }
+ }
+
+ gSniffers.push_back(func);
+}
+
+// static
+void MediaExtractor::RegisterDefaultSniffers() {
+ Mutex::Autolock autoLock(gSnifferMutex);
+ if (gSniffersRegistered) {
+ return;
+ }
+
+ RegisterSniffer_l(SniffMPEG4);
+ RegisterSniffer_l(SniffMatroska);
+ RegisterSniffer_l(SniffOgg);
+ RegisterSniffer_l(SniffWAV);
+ RegisterSniffer_l(SniffFLAC);
+ RegisterSniffer_l(SniffAMR);
+ RegisterSniffer_l(SniffMPEG2TS);
+ RegisterSniffer_l(SniffMP3);
+ RegisterSniffer_l(SniffAAC);
+ RegisterSniffer_l(SniffMPEG2PS);
+ RegisterSniffer_l(SniffMidi);
+
+ gSniffersRegistered = true;
+}
+
+
} // namespace android
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index b13877d..c7b8888 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -41,7 +41,7 @@
MediaMuxer::MediaMuxer(int fd, OutputFormat format)
: mFormat(format),
mState(UNINITIALIZED) {
- if (format == OUTPUT_FORMAT_MPEG_4) {
+ if (format == OUTPUT_FORMAT_MPEG_4 || format == OUTPUT_FORMAT_THREE_GPP) {
mWriter = new MPEG4Writer(fd);
} else if (format == OUTPUT_FORMAT_WEBM) {
mWriter = new WebmWriter(fd);
@@ -108,8 +108,8 @@
ALOGE("setLocation() must be called before start().");
return INVALID_OPERATION;
}
- if (mFormat != OUTPUT_FORMAT_MPEG_4) {
- ALOGE("setLocation() is only supported for .mp4 output.");
+ if (mFormat != OUTPUT_FORMAT_MPEG_4 && mFormat != OUTPUT_FORMAT_THREE_GPP) {
+ ALOGE("setLocation() is only supported for .mp4 pr .3gp output.");
return INVALID_OPERATION;
}
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
index 6f2d868..0cf6fbf 100644
--- a/media/libstagefright/MediaSync.cpp
+++ b/media/libstagefright/MediaSync.cpp
@@ -478,59 +478,43 @@
CHECK(mAudioTrack != NULL);
uint32_t numFramesPlayed;
- int64_t numFramesPlayedAt;
+ int64_t numFramesPlayedAtUs;
AudioTimestamp ts;
- static const int64_t kStaleTimestamp100ms = 100000;
status_t res = mAudioTrack->getTimestamp(ts);
if (res == OK) {
// case 1: mixing audio tracks.
numFramesPlayed = ts.mPosition;
- numFramesPlayedAt =
- ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
- const int64_t timestampAge = nowUs - numFramesPlayedAt;
- if (timestampAge > kStaleTimestamp100ms) {
- // This is an audio FIXME.
- // getTimestamp returns a timestamp which may come from audio
- // mixing threads. After pausing, the MixerThread may go idle,
- // thus the mTime estimate may become stale. Assuming that the
- // MixerThread runs 20ms, with FastMixer at 5ms, the max latency
- // should be about 25ms with an average around 12ms (to be
- // verified). For safety we use 100ms.
- ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) "
- "numFramesPlayedAt(%lld)",
- (long long)nowUs, (long long)numFramesPlayedAt);
- numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
- }
+ numFramesPlayedAtUs = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
//ALOGD("getTimestamp: OK %d %lld",
- // numFramesPlayed, (long long)numFramesPlayedAt);
+ // numFramesPlayed, (long long)numFramesPlayedAtUs);
} else if (res == WOULD_BLOCK) {
// case 2: transitory state on start of a new track
numFramesPlayed = 0;
- numFramesPlayedAt = nowUs;
+ numFramesPlayedAtUs = nowUs;
//ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
- // numFramesPlayed, (long long)numFramesPlayedAt);
+ // numFramesPlayed, (long long)numFramesPlayedAtUs);
} else {
// case 3: transitory at new track or audio fast tracks.
res = mAudioTrack->getPosition(&numFramesPlayed);
CHECK_EQ(res, (status_t)OK);
- numFramesPlayedAt = nowUs;
- numFramesPlayedAt += 1000LL * mAudioTrack->latency() / 2; /* XXX */
- //ALOGD("getPosition: %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+ numFramesPlayedAtUs = nowUs;
+ numFramesPlayedAtUs += 1000LL * mAudioTrack->latency() / 2; /* XXX */
+ //ALOGD("getPosition: %d %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
}
//can't be negative until 12.4 hrs, test.
//CHECK_EQ(numFramesPlayed & (1 << 31), 0);
int64_t durationUs =
getDurationIfPlayedAtNativeSampleRate_l(numFramesPlayed)
- + nowUs - numFramesPlayedAt;
+ + nowUs - numFramesPlayedAtUs;
if (durationUs < 0) {
// Occurs when numFramesPlayed position is very small and the following:
// (1) In case 1, the time nowUs is computed before getTimestamp() is
- // called and numFramesPlayedAt is greater than nowUs by time more
+ // called and numFramesPlayedAtUs is greater than nowUs by time more
// than numFramesPlayed.
// (2) In case 3, using getPosition and adding mAudioTrack->latency()
- // to numFramesPlayedAt, by a time amount greater than
+ // to numFramesPlayedAtUs, by a time amount greater than
// numFramesPlayed.
//
// Both of these are transitory conditions.
@@ -541,7 +525,7 @@
ALOGV("getPlayedOutAudioDurationMedia_l(%lld) nowUs(%lld) frames(%u) "
"framesAt(%lld)",
(long long)durationUs, (long long)nowUs, numFramesPlayed,
- (long long)numFramesPlayedAt);
+ (long long)numFramesPlayedAtUs);
return durationUs;
}
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 276d731..51f1ba3 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -22,7 +22,6 @@
#include "include/ESDS.h"
#include "include/NuCachedSource2.h"
-#include "include/WVMExtractor.h"
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -36,12 +35,12 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#include <android/media/ICas.h>
namespace android {
NuMediaExtractor::NuMediaExtractor()
- : mIsWidevineExtractor(false),
- mTotalBitrate(-1ll),
+ : mTotalBitrate(-1ll),
mDurationUs(-1ll) {
}
@@ -51,7 +50,8 @@
for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
TrackInfo *info = &mSelectedTracks.editItemAt(i);
- CHECK_EQ((status_t)OK, info->mSource->stop());
+ status_t err = info->mSource->stop();
+ ALOGE_IF(err != OK, "error %d stopping track %zu", err, i);
}
mSelectedTracks.clear();
@@ -66,7 +66,7 @@
const KeyedVector<String8, String8> *headers) {
Mutex::Autolock autoLock(mLock);
- if (mImpl != NULL) {
+ if (mImpl != NULL || path == NULL) {
return -EINVAL;
}
@@ -77,51 +77,14 @@
return -ENOENT;
}
- mIsWidevineExtractor = false;
- if (!strncasecmp("widevine://", path, 11)) {
- String8 mimeType;
- float confidence;
- sp<AMessage> dummy;
- bool success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
-
- if (!success
- || strcasecmp(
- mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
- return ERROR_UNSUPPORTED;
- }
-
- sp<WVMExtractor> extractor = new WVMExtractor(dataSource);
- extractor->setAdaptiveStreamingMode(true);
-
- mImpl = extractor;
- mIsWidevineExtractor = true;
- } else {
- mImpl = MediaExtractor::Create(dataSource);
- }
+ mImpl = MediaExtractor::Create(dataSource);
if (mImpl == NULL) {
return ERROR_UNSUPPORTED;
}
- sp<MetaData> fileMeta = mImpl->getMetaData();
- const char *containerMime;
- if (fileMeta != NULL
- && fileMeta->findCString(kKeyMIMEType, &containerMime)
- && !strcasecmp(containerMime, "video/wvm")) {
- // We always want to use "cryptoPluginMode" when using the wvm
- // extractor. We can tell that it is this extractor by looking
- // at the container mime type.
- // The cryptoPluginMode ensures that the extractor will actually
- // give us data in a call to MediaSource::read(), unlike its
- // default mode that we used in AwesomePlayer.
- // TODO: change default mode
- static_cast<WVMExtractor *>(mImpl.get())->setCryptoPluginMode(true);
- } else if (mImpl->getDrmFlag()) {
- // For all other drm content, we don't want to expose decrypted
- // content to Java application.
- mImpl.clear();
- mImpl = NULL;
- return ERROR_UNSUPPORTED;
+ if (mCas != NULL) {
+ mImpl->setMediaCas(mCas);
}
status_t err = updateDurationAndBitrate();
@@ -156,6 +119,10 @@
return ERROR_UNSUPPORTED;
}
+ if (mCas != NULL) {
+ mImpl->setMediaCas(mCas);
+ }
+
err = updateDurationAndBitrate();
if (err == OK) {
mDataSource = fileSource;
@@ -182,6 +149,10 @@
return ERROR_UNSUPPORTED;
}
+ if (mCas != NULL) {
+ mImpl->setMediaCas(mCas);
+ }
+
err = updateDurationAndBitrate();
if (err == OK) {
mDataSource = source;
@@ -190,6 +161,27 @@
return err;
}
+status_t NuMediaExtractor::setMediaCas(const sp<ICas> &cas) {
+ ALOGV("setMediaCas: cas=%p", cas.get());
+
+ Mutex::Autolock autoLock(mLock);
+
+ if (cas == NULL) {
+ return BAD_VALUE;
+ }
+
+ if (mImpl != NULL) {
+ mImpl->setMediaCas(cas);
+ status_t err = updateDurationAndBitrate();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ mCas = cas;
+ return OK;
+}
+
status_t NuMediaExtractor::updateDurationAndBitrate() {
if (mImpl->countTracks() > kMaxTrackCount) {
return ERROR_UNSUPPORTED;
@@ -305,6 +297,10 @@
sp<IMediaSource> source = mImpl->getTrack(index);
+ if (source == nullptr) {
+ return ERROR_MALFORMED;
+ }
+
status_t ret = source->start();
if (ret != OK) {
return ret;
@@ -611,6 +607,11 @@
return OK;
}
+status_t NuMediaExtractor::getMetrics(Parcel *reply) {
+ status_t status = mImpl->getMetrics(reply);
+ return status;
+}
+
bool NuMediaExtractor::getTotalBitrate(int64_t *bitrate) const {
if (mTotalBitrate >= 0) {
*bitrate = mTotalBitrate;
@@ -618,7 +619,7 @@
}
off64_t size;
- if (mDurationUs >= 0 && mDataSource->getSize(&size) == OK) {
+ if (mDurationUs > 0 && mDataSource->getSize(&size) == OK) {
*bitrate = size * 8000000ll / mDurationUs; // in bits/sec
return true;
}
@@ -632,15 +633,7 @@
Mutex::Autolock autoLock(mLock);
int64_t bitrate;
- if (mIsWidevineExtractor) {
- sp<WVMExtractor> wvmExtractor =
- static_cast<WVMExtractor *>(mImpl.get());
-
- status_t finalStatus;
- *durationUs = wvmExtractor->getCachedDurationUs(&finalStatus);
- *eos = (finalStatus != OK);
- return true;
- } else if ((mDataSource->flags() & DataSource::kIsCachingDataSource)
+ if ((mDataSource->flags() & DataSource::kIsCachingDataSource)
&& getTotalBitrate(&bitrate)) {
sp<NuCachedSource2> cachedSource =
static_cast<NuCachedSource2 *>(mDataSource.get());
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index e40dbcf..5f9aa01 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -22,508 +22,40 @@
#endif
#include <utils/Log.h>
+#include <cutils/properties.h>
#include <binder/IServiceManager.h>
-#include <media/IMediaPlayerService.h>
#include <media/IMediaCodecService.h>
-#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/OMXClient.h>
-#include <cutils/properties.h>
-#include <utils/KeyedVector.h>
-#include "include/OMX.h"
+#include <media/IOMX.h>
+
+#include <media/omx/1.0/WOmx.h>
namespace android {
-static bool sCodecProcessEnabled = true;
-
-struct MuxOMX : public IOMX {
- MuxOMX(const sp<IOMX> &mediaServerOMX, const sp<IOMX> &mediaCodecOMX);
- virtual ~MuxOMX();
-
- // Nobody should be calling this. In case someone does anyway, just
- // return the media server IOMX.
- // TODO: return NULL
- virtual IBinder *onAsBinder() {
- ALOGE("MuxOMX::onAsBinder should not be called");
- return IInterface::asBinder(mMediaServerOMX).get();
- }
-
- virtual bool livesLocally(node_id node, pid_t pid);
-
- virtual status_t listNodes(List<ComponentInfo> *list);
-
- virtual status_t allocateNode(
- const char *name, const sp<IOMXObserver> &observer,
- sp<IBinder> *nodeBinder,
- node_id *node);
-
- virtual status_t freeNode(node_id node);
-
- virtual status_t sendCommand(
- node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param);
-
- virtual status_t getParameter(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size);
-
- virtual status_t setParameter(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size);
-
- virtual status_t getConfig(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size);
-
- virtual status_t setConfig(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size);
-
- virtual status_t getState(
- node_id node, OMX_STATETYPE* state);
-
- virtual status_t storeMetaDataInBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type);
-
- virtual status_t prepareForAdaptivePlayback(
- node_id node, OMX_U32 port_index, OMX_BOOL enable,
- OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight);
-
- virtual status_t configureVideoTunnelMode(
- node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
- OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
-
- virtual status_t enableNativeBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable);
-
- virtual status_t getGraphicBufferUsage(
- node_id node, OMX_U32 port_index, OMX_U32* usage);
-
- virtual status_t useBuffer(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize);
-
- virtual status_t useGraphicBuffer(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
-
- virtual status_t updateGraphicBufferInMeta(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
-
- virtual status_t updateNativeHandleInMeta(
- node_id node, OMX_U32 port_index,
- const sp<NativeHandle> &nativeHandle, buffer_id buffer);
-
- virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index, android_dataspace dataSpace,
- sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type);
-
- virtual status_t createPersistentInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferConsumer> *bufferConsumer);
-
- virtual status_t setInputSurface(
- node_id node, OMX_U32 port_index,
- const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type);
-
- virtual status_t signalEndOfInputStream(node_id node);
-
- virtual status_t allocateSecureBuffer(
- node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle);
-
- virtual status_t allocateBufferWithBackup(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize);
-
- virtual status_t freeBuffer(
- node_id node, OMX_U32 port_index, buffer_id buffer);
-
- virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd);
-
- virtual status_t emptyBuffer(
- node_id node,
- buffer_id buffer,
- OMX_U32 range_offset, OMX_U32 range_length,
- OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
-
- virtual status_t getExtensionIndex(
- node_id node,
- const char *parameter_name,
- OMX_INDEXTYPE *index);
-
- virtual status_t setInternalOption(
- node_id node,
- OMX_U32 port_index,
- InternalOptionType type,
- const void *data,
- size_t size);
-
-private:
- mutable Mutex mLock;
-
- sp<IOMX> mMediaServerOMX;
- sp<IOMX> mMediaCodecOMX;
- sp<IOMX> mLocalOMX;
-
- typedef enum {
- LOCAL,
- MEDIAPROCESS,
- CODECPROCESS
- } node_location;
-
- KeyedVector<node_id, node_location> mNodeLocation;
-
- bool isLocalNode(node_id node) const;
- bool isLocalNode_l(node_id node) const;
- const sp<IOMX> &getOMX(node_id node) const;
- const sp<IOMX> &getOMX_l(node_id node) const;
-
- static node_location getPreferredCodecLocation(const char *name);
-
- DISALLOW_EVIL_CONSTRUCTORS(MuxOMX);
-};
-
-MuxOMX::MuxOMX(const sp<IOMX> &mediaServerOMX, const sp<IOMX> &mediaCodecOMX)
- : mMediaServerOMX(mediaServerOMX),
- mMediaCodecOMX(mediaCodecOMX) {
- ALOGI("MuxOMX ctor");
-}
-
-MuxOMX::~MuxOMX() {
-}
-
-bool MuxOMX::isLocalNode(node_id node) const {
- Mutex::Autolock autoLock(mLock);
-
- return isLocalNode_l(node);
-}
-
-bool MuxOMX::isLocalNode_l(node_id node) const {
- return mNodeLocation.valueFor(node) == LOCAL;
-}
-
-// static
-MuxOMX::node_location MuxOMX::getPreferredCodecLocation(const char *name) {
- if (sCodecProcessEnabled) {
- // all codecs go to codec process unless excluded using system property, in which case
- // all non-secure decoders, OMX.google.* codecs and encoders can go in the codec process
- // (non-OMX.google.* encoders can be excluded using system property.)
- if ((strcasestr(name, "decoder")
- && strcasestr(name, ".secure") != name + strlen(name) - 7)
- || (strcasestr(name, "encoder")
- && !property_get_bool("media.stagefright.legacyencoder", false))
- || !property_get_bool("media.stagefright.less-secure", false)
- || !strncasecmp(name, "OMX.google.", 11)) {
- return CODECPROCESS;
- }
- // everything else runs in the media server
- return MEDIAPROCESS;
- } else {
-#ifdef __LP64__
- // 64 bit processes always run OMX remote on MediaServer
- return MEDIAPROCESS;
-#else
- // 32 bit processes run only OMX.google.* components locally
- if (!strncasecmp(name, "OMX.google.", 11)) {
- return LOCAL;
- }
- return MEDIAPROCESS;
-#endif
- }
-}
-
-const sp<IOMX> &MuxOMX::getOMX(node_id node) const {
- Mutex::Autolock autoLock(mLock);
- return getOMX_l(node);
-}
-
-const sp<IOMX> &MuxOMX::getOMX_l(node_id node) const {
- node_location loc = mNodeLocation.valueFor(node);
- if (loc == LOCAL) {
- return mLocalOMX;
- } else if (loc == MEDIAPROCESS) {
- return mMediaServerOMX;
- } else if (loc == CODECPROCESS) {
- return mMediaCodecOMX;
- }
- ALOGE("Couldn't determine node location for node %d: %d, using local", node, loc);
- return mLocalOMX;
-}
-
-bool MuxOMX::livesLocally(node_id node, pid_t pid) {
- return getOMX(node)->livesLocally(node, pid);
-}
-
-status_t MuxOMX::listNodes(List<ComponentInfo> *list) {
- Mutex::Autolock autoLock(mLock);
-
- if (mLocalOMX == NULL) {
- mLocalOMX = new OMX;
- }
-
- return mLocalOMX->listNodes(list);
-}
-
-status_t MuxOMX::allocateNode(
- const char *name, const sp<IOMXObserver> &observer,
- sp<IBinder> *nodeBinder,
- node_id *node) {
- Mutex::Autolock autoLock(mLock);
-
- sp<IOMX> omx;
-
- node_location loc = getPreferredCodecLocation(name);
- if (loc == CODECPROCESS) {
- omx = mMediaCodecOMX;
- } else if (loc == MEDIAPROCESS) {
- omx = mMediaServerOMX;
- } else {
- if (mLocalOMX == NULL) {
- mLocalOMX = new OMX;
- }
- omx = mLocalOMX;
- }
-
- status_t err = omx->allocateNode(name, observer, nodeBinder, node);
- ALOGV("allocated node_id %x on %s OMX", *node, omx == mMediaCodecOMX ? "codecprocess" :
- omx == mMediaServerOMX ? "mediaserver" : "local");
-
-
- if (err != OK) {
- return err;
- }
-
- mNodeLocation.add(*node, loc);
-
- return OK;
-}
-
-status_t MuxOMX::freeNode(node_id node) {
- Mutex::Autolock autoLock(mLock);
-
- // exit if we have already freed the node
- if (mNodeLocation.indexOfKey(node) < 0) {
- ALOGD("MuxOMX::freeNode: node %d seems to be released already --- ignoring.", node);
- return OK;
- }
-
- status_t err = getOMX_l(node)->freeNode(node);
-
- if (err != OK) {
- return err;
- }
-
- mNodeLocation.removeItem(node);
-
- return OK;
-}
-
-status_t MuxOMX::sendCommand(
- node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
- return getOMX(node)->sendCommand(node, cmd, param);
-}
-
-status_t MuxOMX::getParameter(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size) {
- return getOMX(node)->getParameter(node, index, params, size);
-}
-
-status_t MuxOMX::setParameter(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size) {
- return getOMX(node)->setParameter(node, index, params, size);
-}
-
-status_t MuxOMX::getConfig(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size) {
- return getOMX(node)->getConfig(node, index, params, size);
-}
-
-status_t MuxOMX::setConfig(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size) {
- return getOMX(node)->setConfig(node, index, params, size);
-}
-
-status_t MuxOMX::getState(
- node_id node, OMX_STATETYPE* state) {
- return getOMX(node)->getState(node, state);
-}
-
-status_t MuxOMX::storeMetaDataInBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
- return getOMX(node)->storeMetaDataInBuffers(node, port_index, enable, type);
-}
-
-status_t MuxOMX::prepareForAdaptivePlayback(
- node_id node, OMX_U32 port_index, OMX_BOOL enable,
- OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
- return getOMX(node)->prepareForAdaptivePlayback(
- node, port_index, enable, maxFrameWidth, maxFrameHeight);
-}
-
-status_t MuxOMX::configureVideoTunnelMode(
- node_id node, OMX_U32 portIndex, OMX_BOOL enable,
- OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
- return getOMX(node)->configureVideoTunnelMode(
- node, portIndex, enable, audioHwSync, sidebandHandle);
-}
-
-status_t MuxOMX::enableNativeBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
- return getOMX(node)->enableNativeBuffers(node, port_index, graphic, enable);
-}
-
-status_t MuxOMX::getGraphicBufferUsage(
- node_id node, OMX_U32 port_index, OMX_U32* usage) {
- return getOMX(node)->getGraphicBufferUsage(node, port_index, usage);
-}
-
-status_t MuxOMX::useBuffer(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize) {
- return getOMX(node)->useBuffer(node, port_index, params, buffer, allottedSize);
-}
-
-status_t MuxOMX::useGraphicBuffer(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
- return getOMX(node)->useGraphicBuffer(
- node, port_index, graphicBuffer, buffer);
-}
-
-status_t MuxOMX::updateGraphicBufferInMeta(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
- return getOMX(node)->updateGraphicBufferInMeta(
- node, port_index, graphicBuffer, buffer);
-}
-
-status_t MuxOMX::updateNativeHandleInMeta(
- node_id node, OMX_U32 port_index,
- const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
- return getOMX(node)->updateNativeHandleInMeta(
- node, port_index, nativeHandle, buffer);
-}
-
-status_t MuxOMX::createInputSurface(
- node_id node, OMX_U32 port_index, android_dataspace dataSpace,
- sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
- status_t err = getOMX(node)->createInputSurface(
- node, port_index, dataSpace, bufferProducer, type);
- return err;
-}
-
-status_t MuxOMX::createPersistentInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferConsumer> *bufferConsumer) {
- sp<IOMX> omx;
- {
- Mutex::Autolock autoLock(mLock);
- if (property_get_bool("media.stagefright.legacyencoder", false)) {
- omx = mMediaServerOMX;
- } else {
- omx = mMediaCodecOMX;
- }
- }
- return omx->createPersistentInputSurface(
- bufferProducer, bufferConsumer);
-}
-
-status_t MuxOMX::setInputSurface(
- node_id node, OMX_U32 port_index,
- const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
- return getOMX(node)->setInputSurface(node, port_index, bufferConsumer, type);
-}
-
-status_t MuxOMX::signalEndOfInputStream(node_id node) {
- return getOMX(node)->signalEndOfInputStream(node);
-}
-
-status_t MuxOMX::allocateSecureBuffer(
- node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
- return getOMX(node)->allocateSecureBuffer(
- node, port_index, size, buffer, buffer_data, native_handle);
-}
-
-status_t MuxOMX::allocateBufferWithBackup(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize) {
- return getOMX(node)->allocateBufferWithBackup(
- node, port_index, params, buffer, allottedSize);
-}
-
-status_t MuxOMX::freeBuffer(
- node_id node, OMX_U32 port_index, buffer_id buffer) {
- return getOMX(node)->freeBuffer(node, port_index, buffer);
-}
-
-status_t MuxOMX::fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
- return getOMX(node)->fillBuffer(node, buffer, fenceFd);
-}
-
-status_t MuxOMX::emptyBuffer(
- node_id node,
- buffer_id buffer,
- OMX_U32 range_offset, OMX_U32 range_length,
- OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
- return getOMX(node)->emptyBuffer(
- node, buffer, range_offset, range_length, flags, timestamp, fenceFd);
-}
-
-status_t MuxOMX::getExtensionIndex(
- node_id node,
- const char *parameter_name,
- OMX_INDEXTYPE *index) {
- return getOMX(node)->getExtensionIndex(node, parameter_name, index);
-}
-
-status_t MuxOMX::setInternalOption(
- node_id node,
- OMX_U32 port_index,
- InternalOptionType type,
- const void *data,
- size_t size) {
- return getOMX(node)->setInternalOption(node, port_index, type, data, size);
-}
-
OMXClient::OMXClient() {
- char value[PROPERTY_VALUE_MAX];
- if (property_get("media.stagefright.codecremote", value, NULL)
- && (!strcmp("0", value) || !strcasecmp("false", value))) {
- sCodecProcessEnabled = false;
- }
}
status_t OMXClient::connect() {
+ return connect(nullptr);
+}
+
+status_t OMXClient::connect(bool* trebleFlag) {
+ if (property_get_bool("persist.media.treble_omx", true)) {
+ if (trebleFlag != nullptr) {
+ *trebleFlag = true;
+ }
+ return connectTreble();
+ }
+ if (trebleFlag != nullptr) {
+ *trebleFlag = false;
+ }
+ return connectLegacy();
+}
+
+status_t OMXClient::connectLegacy() {
sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> playerbinder = sm->getService(String16("media.player"));
- sp<IMediaPlayerService> mediaservice = interface_cast<IMediaPlayerService>(playerbinder);
-
- if (mediaservice.get() == NULL) {
- ALOGE("Cannot obtain IMediaPlayerService");
- return NO_INIT;
- }
-
- sp<IOMX> mediaServerOMX = mediaservice->getOMX();
- if (mediaServerOMX.get() == NULL) {
- ALOGE("Cannot obtain mediaserver IOMX");
- return NO_INIT;
- }
-
- // If we don't want to use the codec process, and the media server OMX
- // is local, use it directly instead of going through MuxOMX
- if (!sCodecProcessEnabled &&
- mediaServerOMX->livesLocally(0 /* node */, getpid())) {
- mOMX = mediaServerOMX;
- return OK;
- }
-
sp<IBinder> codecbinder = sm->getService(String16("media.codec"));
sp<IMediaCodecService> codecservice = interface_cast<IMediaCodecService>(codecbinder);
@@ -532,22 +64,33 @@
return NO_INIT;
}
- sp<IOMX> mediaCodecOMX = codecservice->getOMX();
- if (mediaCodecOMX.get() == NULL) {
+ mOMX = codecservice->getOMX();
+ if (mOMX.get() == NULL) {
ALOGE("Cannot obtain mediacodec IOMX");
return NO_INIT;
}
- mOMX = new MuxOMX(mediaServerOMX, mediaCodecOMX);
+ return OK;
+}
+status_t OMXClient::connectTreble() {
+ using namespace ::android::hardware::media::omx::V1_0;
+ sp<IOmx> tOmx = IOmx::getService("default");
+ if (tOmx.get() == nullptr) {
+ ALOGE("Cannot obtain Treble IOmx.");
+ return NO_INIT;
+ }
+ if (!tOmx->isRemote()) {
+ ALOGE("Treble IOmx is in passthrough mode.");
+ return NO_INIT;
+ }
+ mOMX = new utils::LWOmx(tOmx);
+ ALOGI("Treble IOmx obtained");
return OK;
}
void OMXClient::disconnect() {
- if (mOMX.get() != NULL) {
- mOMX.clear();
- mOMX = NULL;
- }
+ mOMX.clear();
}
} // namespace android
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 0343786..e31c37c 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -697,7 +697,21 @@
if (buffer != NULL) {
fullSize += buffer->range_length();
}
- MediaBuffer *tmp = new MediaBuffer(fullSize);
+ if (fullSize > 16 * 1024 * 1024) { // arbitrary limit of 16 MB packet size
+ if (buffer != NULL) {
+ buffer->release();
+ }
+ ALOGE("b/36592202");
+ return ERROR_MALFORMED;
+ }
+ MediaBuffer *tmp = new (std::nothrow) MediaBuffer(fullSize);
+ if (tmp == NULL) {
+ if (buffer != NULL) {
+ buffer->release();
+ }
+ ALOGE("b/36592202");
+ return ERROR_MALFORMED;
+ }
if (buffer != NULL) {
memcpy(tmp->data(), buffer->data(), buffer->range_length());
tmp->set_range(0, buffer->range_length());
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 93cd275..1d2a931 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -566,7 +566,7 @@
if (mDataSource->readAt(data_offset + 8, mSyncSamples,
(size_t)allocSize) != (ssize_t)allocSize) {
- delete mSyncSamples;
+ delete[] mSyncSamples;
mSyncSamples = NULL;
return ERROR_IO;
}
@@ -998,4 +998,3 @@
}
} // namespace android
-
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index 2503a32..ea7d5af 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -17,7 +17,7 @@
#include <gui/Surface.h>
#include <media/ICrypto.h>
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -230,7 +230,7 @@
break;
}
- sp<ABuffer> in_buffer;
+ sp<MediaCodecBuffer> in_buffer;
if (res == OK) {
res = mCodec->getInputBuffer(in_ix, &in_buffer);
}
@@ -344,7 +344,7 @@
return res;
}
- sp<ABuffer> out_buffer;
+ sp<MediaCodecBuffer> out_buffer;
res = mCodec->getOutputBuffer(out_ix, &out_buffer);
if (res != OK) {
ALOGW("[%s] could not get output buffer #%zu",
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index d30be88..ee9016d 100644
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -88,7 +88,8 @@
buffer->set_range(0, copied);
}
-void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
+template <typename T>
+void SkipCutBuffer::submitInternal(const sp<T>& buffer) {
if (mCutBuffer == NULL) {
// passthrough mode
return;
@@ -120,6 +121,14 @@
buffer->setRange(0, copied);
}
+void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
+ submitInternal(buffer);
+}
+
+void SkipCutBuffer::submit(const sp<MediaCodecBuffer>& buffer) {
+ submitInternal(buffer);
+}
+
void SkipCutBuffer::clear() {
mWriteHead = mReadHead = 0;
mFrontPadding = mSkip;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 96e506e..03dc9df 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -27,8 +27,8 @@
#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
+#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/ColorConverter.h>
@@ -55,8 +55,6 @@
: mParsedMetaData(false),
mAlbumArt(NULL) {
ALOGV("StagefrightMetadataRetriever()");
-
- DataSource::RegisterDefaultSniffers();
}
StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
@@ -223,7 +221,7 @@
return NULL;
}
- Vector<sp<ABuffer> > inputBuffers;
+ Vector<sp<MediaCodecBuffer> > inputBuffers;
err = decoder->getInputBuffers(&inputBuffers);
if (err != OK) {
ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
@@ -232,7 +230,7 @@
return NULL;
}
- Vector<sp<ABuffer> > outputBuffers;
+ Vector<sp<MediaCodecBuffer> > outputBuffers;
err = decoder->getOutputBuffers(&outputBuffers);
if (err != OK) {
ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
@@ -264,7 +262,7 @@
size_t inputIndex = -1;
int64_t ptsUs = 0ll;
uint32_t flags = 0;
- sp<ABuffer> codecBuffer = NULL;
+ sp<MediaCodecBuffer> codecBuffer = NULL;
while (haveMoreInputs) {
err = decoder->dequeueInputBuffer(&inputIndex, kBufferTimeOutUs);
@@ -376,7 +374,7 @@
}
ALOGV("successfully decoded video frame.");
- sp<ABuffer> videoFrameBuffer = outputBuffers.itemAt(index);
+ sp<MediaCodecBuffer> videoFrameBuffer = outputBuffers.itemAt(index);
if (thumbNailTime >= 0) {
if (timeUs != thumbNailTime) {
@@ -418,6 +416,22 @@
&& trackMeta->findInt32(kKeySARHeight, &sarHeight)
&& sarHeight != 0) {
frame->mDisplayWidth = (frame->mDisplayWidth * sarWidth) / sarHeight;
+ } else {
+ int32_t width, height;
+ if (trackMeta->findInt32(kKeyDisplayWidth, &width)
+ && trackMeta->findInt32(kKeyDisplayHeight, &height)
+ && frame->mDisplayWidth > 0 && frame->mDisplayHeight > 0
+ && width > 0 && height > 0) {
+ if (frame->mDisplayHeight * (int64_t)width / height > (int64_t)frame->mDisplayWidth) {
+ frame->mDisplayHeight =
+ (int32_t)(height * (int64_t)frame->mDisplayWidth / width);
+ } else {
+ frame->mDisplayWidth =
+ (int32_t)(frame->mDisplayHeight * (int64_t)width / height);
+ }
+ ALOGV("thumbNail width and height are overridden to %d x %d",
+ frame->mDisplayWidth, frame->mDisplayHeight);
+ }
}
int32_t srcFormat;
@@ -754,9 +768,9 @@
if (numTracks == 1) {
const char *fileMIME;
- CHECK(meta->findCString(kKeyMIMEType, &fileMIME));
- if (!strcasecmp(fileMIME, "video/x-matroska")) {
+ if (meta->findCString(kKeyMIMEType, &fileMIME) &&
+ !strcasecmp(fileMIME, "video/x-matroska")) {
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(0);
const char *trackMIME;
CHECK(trackMeta->findCString(kKeyMIMEType, &trackMIME));
@@ -769,11 +783,6 @@
}
}
}
-
- // To check whether the media file is drm-protected
- if (mExtractor->getDrmFlag()) {
- mMetaData.add(METADATA_KEY_IS_DRM, String8("1"));
- }
}
void StagefrightMetadataRetriever::clearMetadata() {
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index eadab86..d14e86b 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -29,7 +29,6 @@
#include <ui/GraphicBuffer.h>
#include <gui/BufferItem.h>
#include <gui/ISurfaceComposer.h>
-#include <gui/IGraphicBufferAlloc.h>
#include <OMX_Component.h>
#include <utils/Log.h>
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 568837a..b7c1598 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -31,15 +31,15 @@
// In some cases we need to reconnect so that we can dequeue all buffers
if (reconnect) {
- err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+ err = nativeWindowDisconnect(nativeWindow, "setNativeWindowSizeFormatAndUsage");
if (err != NO_ERROR) {
- ALOGE("native_window_api_disconnect failed: %s (%d)", strerror(-err), -err);
+ ALOGE("nativeWindowDisconnect failed: %s (%d)", strerror(-err), -err);
return err;
}
- err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+ err = nativeWindowConnect(nativeWindow, "setNativeWindowSizeFormatAndUsage");
if (err != NO_ERROR) {
- ALOGE("native_window_api_connect failed: %s (%d)", strerror(-err), -err);
+ ALOGE("nativeWindowConnect failed: %s (%d)", strerror(-err), -err);
return err;
}
}
@@ -91,9 +91,19 @@
return err;
}
- // Check if the ANativeWindow uses hardware protected buffers.
- if (queuesToNativeWindow != 1 && !(consumerUsage & GRALLOC_USAGE_PROTECTED)) {
- ALOGE("native window could not be authenticated");
+ // Check if the consumer end of the ANativeWindow can handle protected content.
+ int isConsumerProtected = 0;
+ err = nativeWindow->query(
+ nativeWindow, NATIVE_WINDOW_CONSUMER_IS_PROTECTED, &isConsumerProtected);
+ if (err != NO_ERROR) {
+ ALOGE("error query native window: %s (%d)", strerror(-err), -err);
+ return err;
+ }
+
+ // Deny queuing into native window if neither condition is satisfied.
+ if (queuesToNativeWindow != 1 && isConsumerProtected != 1) {
+ ALOGE("native window cannot handle protected buffers: the consumer should either be "
+ "a hardware composer or support hardware protection");
return PERMISSION_DENIED;
}
}
@@ -127,7 +137,7 @@
// We need to reconnect to the ANativeWindow as a CPU client to ensure that
// no frames get dropped by SurfaceFlinger assuming that these are video
// frames.
- err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+ err = nativeWindowDisconnect(nativeWindow, "pushBlankBuffersToNativeWindow");
if (err != NO_ERROR) {
ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err), -err);
return err;
@@ -136,7 +146,7 @@
err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_CPU);
if (err != NO_ERROR) {
ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
- (void)native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+ (void)nativeWindowConnect(nativeWindow, "pushBlankBuffersToNativeWindow(err)");
return err;
}
@@ -176,7 +186,7 @@
break;
}
- sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+ sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
// Fill the buffer with the a 1x1 checkerboard pattern ;)
uint32_t *img = NULL;
@@ -219,7 +229,7 @@
}
}
- err2 = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+ err2 = nativeWindowConnect(nativeWindow, "pushBlankBuffersToNativeWindow(err2)");
if (err2 != NO_ERROR) {
ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
if (err == NO_ERROR) {
@@ -230,5 +240,22 @@
return err;
}
+status_t nativeWindowConnect(ANativeWindow *surface, const char *reason) {
+ ALOGD("connecting to surface %p, reason %s", surface, reason);
+
+ status_t err = native_window_api_connect(surface, NATIVE_WINDOW_API_MEDIA);
+ ALOGE_IF(err != OK, "Failed to connect to surface %p, err %d", surface, err);
+
+ return err;
+}
+
+status_t nativeWindowDisconnect(ANativeWindow *surface, const char *reason) {
+ ALOGD("disconnecting from surface %p, reason %s", surface, reason);
+
+ status_t err = native_window_api_disconnect(surface, NATIVE_WINDOW_API_MEDIA);
+ ALOGE_IF(err != OK, "Failed to disconnect from surface %p, err %d", surface, err);
+
+ return err;
+}
} // namespace android
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 36be7a0..621c2ce 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -41,9 +41,9 @@
#include <media/stagefright/MediaDefs.h>
#include <media/AudioSystem.h>
#include <media/MediaPlayerInterface.h>
-#include <hardware/audio.h>
#include <media/stagefright/Utils.h>
#include <media/AudioParameter.h>
+#include <system/audio.h>
namespace android {
@@ -610,6 +610,31 @@
sp<AMessage> msg = new AMessage;
msg->setString("mime", mime);
+ uint32_t type;
+ const void *data;
+ size_t size;
+ if (meta->findData(kKeyCASessionID, &type, &data, &size)) {
+ sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+ if (buffer.get() == NULL || buffer->base() == NULL) {
+ return NO_MEMORY;
+ }
+
+ msg->setBuffer("ca-session-id", buffer);
+ memcpy(buffer->data(), data, size);
+ }
+
+ int32_t systemId;
+ if (meta->findInt32(kKeyCASystemID, &systemId)) {
+ msg->setInt32("ca-system-id", systemId);
+ }
+
+ if (!strncasecmp("video/scrambled", mime, 15)
+ || !strncasecmp("audio/scrambled", mime, 15)) {
+
+ *format = msg;
+ return OK;
+ }
+
int64_t durationUs;
if (meta->findInt64(kKeyDuration, &durationUs)) {
msg->setInt64("durationUs", durationUs);
@@ -637,6 +662,11 @@
msg->setInt32("track-id", trackID);
}
+ const char *lang;
+ if (meta->findCString(kKeyMediaLanguage, &lang)) {
+ msg->setString("language", lang);
+ }
+
if (!strncasecmp("video/", mime, 6)) {
int32_t width, height;
if (!meta->findInt32(kKeyWidth, &width)
@@ -647,6 +677,13 @@
msg->setInt32("width", width);
msg->setInt32("height", height);
+ int32_t displayWidth, displayHeight;
+ if (meta->findInt32(kKeyDisplayWidth, &displayWidth)
+ && meta->findInt32(kKeyDisplayHeight, &displayHeight)) {
+ msg->setInt32("display-width", displayWidth);
+ msg->setInt32("display-height", displayHeight);
+ }
+
int32_t sarWidth, sarHeight;
if (meta->findInt32(kKeySARWidth, &sarWidth)
&& meta->findInt32(kKeySARHeight, &sarHeight)) {
@@ -747,9 +784,6 @@
msg->setInt32("frame-rate", fps);
}
- uint32_t type;
- const void *data;
- size_t size;
if (meta->findData(kKeyAVCC, &type, &data, &size)) {
// Parse the AVCDecoderConfigurationRecord
@@ -1273,6 +1307,11 @@
meta->setInt32(kKeyMaxBitRate, maxBitrate);
}
+ AString lang;
+ if (msg->findString("language", &lang)) {
+ meta->setCString(kKeyMediaLanguage, lang.c_str());
+ }
+
if (mime.startsWith("video/")) {
int32_t width;
int32_t height;
@@ -1290,6 +1329,13 @@
meta->setInt32(kKeySARHeight, sarHeight);
}
+ int32_t displayWidth, displayHeight;
+ if (msg->findInt32("display-width", &displayWidth)
+ && msg->findInt32("display-height", &displayHeight)) {
+ meta->setInt32(kKeyDisplayWidth, displayWidth);
+ meta->setInt32(kKeyDisplayHeight, displayHeight);
+ }
+
int32_t colorFormat;
if (msg->findInt32("color-format", &colorFormat)) {
meta->setInt32(kKeyColorFormat, colorFormat);
@@ -1505,6 +1551,7 @@
{ MEDIA_MIMETYPE_AUDIO_AAC, AUDIO_FORMAT_AAC },
{ MEDIA_MIMETYPE_AUDIO_VORBIS, AUDIO_FORMAT_VORBIS },
{ MEDIA_MIMETYPE_AUDIO_OPUS, AUDIO_FORMAT_OPUS},
+ { MEDIA_MIMETYPE_AUDIO_AC3, AUDIO_FORMAT_AC3},
{ 0, AUDIO_FORMAT_INVALID }
};
@@ -1634,9 +1681,7 @@
return AString("<URI suppressed>");
}
- char prop[PROPERTY_VALUE_MAX];
- if (property_get("media.stagefright.log-uri", prop, "false") &&
- (!strcmp(prop, "1") || !strcmp(prop, "true"))) {
+ if (property_get_bool("media.stagefright.log-uri", false)) {
return uri;
}
@@ -1747,6 +1792,45 @@
*sync = settings;
}
+void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering) {
+ msg->setInt32("init-mode", buffering.mInitialBufferingMode);
+ msg->setInt32("rebuffer-mode", buffering.mRebufferingMode);
+ msg->setInt32("init-ms", buffering.mInitialWatermarkMs);
+ msg->setInt32("init-kb", buffering.mInitialWatermarkKB);
+ msg->setInt32("rebuffer-low-ms", buffering.mRebufferingWatermarkLowMs);
+ msg->setInt32("rebuffer-high-ms", buffering.mRebufferingWatermarkHighMs);
+ msg->setInt32("rebuffer-low-kb", buffering.mRebufferingWatermarkLowKB);
+ msg->setInt32("rebuffer-high-kb", buffering.mRebufferingWatermarkHighKB);
+}
+
+void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */) {
+ int32_t value;
+ if (msg->findInt32("init-mode", &value)) {
+ buffering->mInitialBufferingMode = (BufferingMode)value;
+ }
+ if (msg->findInt32("rebuffer-mode", &value)) {
+ buffering->mRebufferingMode = (BufferingMode)value;
+ }
+ if (msg->findInt32("init-ms", &value)) {
+ buffering->mInitialWatermarkMs = value;
+ }
+ if (msg->findInt32("init-kb", &value)) {
+ buffering->mInitialWatermarkKB = value;
+ }
+ if (msg->findInt32("rebuffer-low-ms", &value)) {
+ buffering->mRebufferingWatermarkLowMs = value;
+ }
+ if (msg->findInt32("rebuffer-high-ms", &value)) {
+ buffering->mRebufferingWatermarkHighMs = value;
+ }
+ if (msg->findInt32("rebuffer-low-kb", &value)) {
+ buffering->mRebufferingWatermarkLowKB = value;
+ }
+ if (msg->findInt32("rebuffer-high-kb", &value)) {
+ buffering->mRebufferingWatermarkHighKB = value;
+ }
+}
+
AString nameForFd(int fd) {
const size_t SIZE = 256;
char buffer[SIZE];
diff --git a/media/libstagefright/WVMExtractor.cpp b/media/libstagefright/WVMExtractor.cpp
deleted file mode 100644
index 1c170b8..0000000
--- a/media/libstagefright/WVMExtractor.cpp
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "WVMExtractor"
-#include <utils/Log.h>
-
-#include "include/WVMExtractor.h"
-
-#include <arpa/inet.h>
-#include <utils/String8.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <dlfcn.h>
-
-#include <utils/Errors.h>
-
-/* The extractor lifetime is short - just long enough to get
- * the media sources constructed - so the shared lib needs to remain open
- * beyond the lifetime of the extractor. So keep the handle as a global
- * rather than a member of the extractor
- */
-void *gVendorLibHandle = NULL;
-
-namespace android {
-
-WVMExtractor::WVMExtractor(const sp<DataSource> &source)
- : mDataSource(source)
-{
- if (!getVendorLibHandle()) {
- return;
- }
-
- typedef WVMLoadableExtractor *(*GetInstanceFunc)(sp<DataSource>);
- GetInstanceFunc getInstanceFunc =
- (GetInstanceFunc) dlsym(gVendorLibHandle,
- "_ZN7android11GetInstanceENS_2spINS_10DataSourceEEE");
-
- if (getInstanceFunc) {
- if (source->DrmInitialization(
- MEDIA_MIMETYPE_CONTAINER_WVM) != NULL) {
- mImpl = (*getInstanceFunc)(source);
- CHECK(mImpl != NULL);
- setDrmFlag(true);
- } else {
- ALOGE("Drm manager failed to initialize.");
- }
- } else {
- ALOGE("Failed to locate GetInstance in libwvm.so");
- }
-}
-
-static void init_routine()
-{
- gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW);
- if (gVendorLibHandle == NULL) {
- ALOGE("Failed to open libwvm.so: %s", dlerror());
- }
-}
-
-bool WVMExtractor::getVendorLibHandle()
-{
- static pthread_once_t sOnceControl = PTHREAD_ONCE_INIT;
- pthread_once(&sOnceControl, init_routine);
-
- return gVendorLibHandle != NULL;
-}
-
-WVMExtractor::~WVMExtractor() {
-}
-
-size_t WVMExtractor::countTracks() {
- return (mImpl != NULL) ? mImpl->countTracks() : 0;
-}
-
-sp<IMediaSource> WVMExtractor::getTrack(size_t index) {
- if (mImpl == NULL) {
- return NULL;
- }
- return mImpl->getTrack(index);
-}
-
-sp<MetaData> WVMExtractor::getTrackMetaData(size_t index, uint32_t flags) {
- if (mImpl == NULL) {
- return NULL;
- }
- return mImpl->getTrackMetaData(index, flags);
-}
-
-sp<MetaData> WVMExtractor::getMetaData() {
- if (mImpl == NULL) {
- return NULL;
- }
- return mImpl->getMetaData();
-}
-
-int64_t WVMExtractor::getCachedDurationUs(status_t *finalStatus) {
- if (mImpl == NULL) {
- return 0;
- }
-
- return mImpl->getCachedDurationUs(finalStatus);
-}
-
-status_t WVMExtractor::getEstimatedBandwidthKbps(int32_t *kbps) {
- if (mImpl == NULL) {
- return UNKNOWN_ERROR;
- }
-
- return mImpl->getEstimatedBandwidthKbps(kbps);
-}
-
-
-void WVMExtractor::setAdaptiveStreamingMode(bool adaptive) {
- if (mImpl != NULL) {
- mImpl->setAdaptiveStreamingMode(adaptive);
- }
-}
-
-void WVMExtractor::setCryptoPluginMode(bool cryptoPluginMode) {
- if (mImpl != NULL) {
- mImpl->setCryptoPluginMode(cryptoPluginMode);
- }
-}
-
-void WVMExtractor::setUID(uid_t uid) {
- if (mImpl != NULL) {
- mImpl->setUID(uid);
- }
-}
-
-status_t WVMExtractor::getError() {
- if (mImpl == NULL) {
- return UNKNOWN_ERROR;
- }
-
- return mImpl->getError();
-}
-
-void WVMExtractor::setError(status_t err) {
- if (mImpl != NULL) {
- mImpl->setError(err);
- }
-}
-
-bool SniffWVM(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *) {
-
- if (!WVMExtractor::getVendorLibHandle()) {
- return false;
- }
-
- typedef WVMLoadableExtractor *(*SnifferFunc)(const sp<DataSource>&);
- SnifferFunc snifferFunc =
- (SnifferFunc) dlsym(gVendorLibHandle,
- "_ZN7android15IsWidevineMediaERKNS_2spINS_10DataSourceEEE");
-
- if (snifferFunc) {
- if ((*snifferFunc)(source)) {
- *mimeType = MEDIA_MIMETYPE_CONTAINER_WVM;
- *confidence = 10.0f;
- return true;
- }
- } else {
- ALOGE("IsWidevineMedia not found in libwvm.so");
- }
-
- return false;
-}
-
-} //namespace android
-
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 596f386..ce6d50f 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -432,8 +432,8 @@
meta->setInt32(kKeyWidth, width);
meta->setInt32(kKeyHeight, height);
- if (sarWidth > 1 || sarHeight > 1) {
- // We treat 0:0 (unspecified) as 1:1.
+ if ((sarWidth > 0 && sarHeight > 0) && (sarWidth != 1 || sarHeight != 1)) {
+ // We treat *:0 and 0:* (unspecified) as 1:1.
meta->setInt32(kKeySARWidth, sarWidth);
meta->setInt32(kKeySARHeight, sarHeight);
@@ -459,7 +459,8 @@
return meta;
}
-bool IsIDR(const sp<ABuffer> &buffer) {
+template <typename T>
+bool IsIDRInternal(const sp<T> &buffer) {
const uint8_t *data = buffer->data();
size_t size = buffer->size();
@@ -484,14 +485,29 @@
return foundIDR;
}
+bool IsIDR(const sp<ABuffer> &buffer) {
+ return IsIDRInternal(buffer);
+}
+
+bool IsIDR(const sp<MediaCodecBuffer> &buffer) {
+ return IsIDRInternal(buffer);
+}
+
bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit) {
const uint8_t *data = accessUnit->data();
size_t size = accessUnit->size();
+ if (data == NULL) {
+ ALOGE("IsAVCReferenceFrame: called on NULL data (%p, %zu)", accessUnit.get(), size);
+ return false;
+ }
const uint8_t *nalStart;
size_t nalSize;
while (getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
- CHECK_GT(nalSize, 0u);
+ if (nalSize == 0) {
+ ALOGE("IsAVCReferenceFrame: invalid nalSize: 0 (%p, %zu)", accessUnit.get(), size);
+ return false;
+ }
unsigned nalType = nalStart[0] & 0x1f;
diff --git a/media/libstagefright/codec2/Android.mk b/media/libstagefright/codec2/Android.mk
new file mode 100644
index 0000000..ef06ed7
--- /dev/null
+++ b/media/libstagefright/codec2/Android.mk
@@ -0,0 +1,21 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ C2.cpp \
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/frameworks/av/media/libstagefright/codec2/include \
+ $(TOP)/frameworks/native/include/media/hardware \
+
+LOCAL_MODULE:= libstagefright_codec2
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
+
+include $(BUILD_SHARED_LIBRARY)
+
+################################################################################
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libstagefright/codec2/C2.cpp b/media/libstagefright/codec2/C2.cpp
new file mode 100644
index 0000000..a51b073
--- /dev/null
+++ b/media/libstagefright/codec2/C2.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <C2.h>
+#include <C2Buffer.h>
+#include <C2Component.h>
+#include <C2Config.h>
+#include <C2Param.h>
+#include <C2ParamDef.h>
+#include <C2Work.h>
+
+namespace android {
+
+/**
+ * There is nothing here yet. This library is built to see what symbols and methods get
+ * defined as part of the API include files.
+ *
+ * Going forward, the Codec2 library will contain utility methods that are useful for
+ * Codec2 clients.
+ */
+
+} // namespace android
+
diff --git a/media/libstagefright/codec2/include/C2.h b/media/libstagefright/codec2/include/C2.h
new file mode 100644
index 0000000..7d00a03
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2_H_
+#define C2_H_
+
+#include <string>
+#include <vector>
+#include <list>
+
+#ifdef __ANDROID__
+
+#include <utils/Errors.h> // for status_t
+#include <utils/Timers.h> // for nsecs_t
+
+namespace android {
+
+#else
+
+#include <errno.h>
+typedef int64_t nsecs_t;
+
+enum {
+ GRALLOC_USAGE_SW_READ_OFTEN,
+ GRALLOC_USAGE_RENDERSCRIPT,
+ GRALLOC_USAGE_HW_TEXTURE,
+ GRALLOC_USAGE_HW_COMPOSER,
+ GRALLOC_USAGE_HW_VIDEO_ENCODER,
+ GRALLOC_USAGE_PROTECTED,
+ GRALLOC_USAGE_SW_WRITE_OFTEN,
+ GRALLOC_USAGE_HW_RENDER,
+};
+
+#endif
+
+/** \mainpage Codec2
+ *
+ * Codec2 is a frame-based data processing API used by android.
+ *
+ * The framework accesses components via the \ref API.
+ */
+
+/** \ingroup API
+ *
+ * The Codec2 API defines the operation of data processing components and their interaction with
+ * the rest of the system.
+ *
+ * Coding Conventions
+ *
+ * Mitigating Binary Compatibility.
+ *
+ * While full binary compatibility is not a goal of the API (due to our use of STL), we try to
+ * mitigate binary breaks by adhering to the following conventions:
+ *
+ * - at most one vtable with placeholder virtual methods
+ * - all optional/placeholder virtual methods returning a status_t, with C2_NOT_IMPLEMENTED not
+ * requiring any update to input/output arguments.
+ * - limiting symbol export of inline methods
+ * - use of pimpl (or shared-pimpl)
+ *
+ * Naming
+ *
+ * - all classes and types prefix with C2
+ * - classes for internal use prefix with _C2
+ * - enum values in global namespace prefix with C2_ all caps
+ * - enum values inside classes have no C2_ prefix as class already has it
+ * - supporting two kinds of enum naming: all-caps and kCamelCase
+ * \todo revisit kCamelCase for param-type
+ *
+ * Aspects
+ *
+ * Aspects define certain common behavior across a group of objects.
+ * - classes whose name matches _C2.*Aspect
+ * - only protected constructors
+ * - no desctructor and copiable
+ * - all methods are inline or static (this is opposite of the interface paradigm where all methods
+ * are virtual, which would not work due to the at most one vtable rule.)
+ * - only private variables (this prevents subclasses interfering with the aspects.)
+ */
+
+/// \defgroup types Common Types
+/// @{
+
+/**
+ * C2String: basic string implementation
+ */
+typedef std::string C2String;
+typedef const char *C2StringLiteral;
+
+/**
+ * C2Error: status codes used.
+ */
+typedef int32_t C2Error;
+enum {
+#ifndef __ANDROID__
+ OK = 0,
+ BAD_VALUE = -EINVAL,
+ BAD_INDEX = -EOVERFLOW,
+ UNKNOWN_TRANSACTION = -EBADMSG,
+ ALREADY_EXISTS = -EEXIST,
+ NAME_NOT_FOUND = -ENOENT,
+ INVALID_OPERATION = -ENOSYS,
+ NO_MEMORY = -ENOMEM,
+ PERMISSION_DENIED = -EPERM,
+ TIMED_OUT = -ETIMEDOUT,
+ UNKNOWN_ERROR = -EINVAL,
+#endif
+
+ C2_OK = OK, ///< operation completed successfully
+
+ // bad input
+ C2_BAD_VALUE = BAD_VALUE, ///< argument has invalid value (user error)
+ C2_BAD_INDEX = BAD_INDEX, ///< argument uses invalid index (user error)
+ C2_UNSUPPORTED = UNKNOWN_TRANSACTION, ///< argument/index is value but not supported \todo is this really BAD_INDEX/VALUE?
+
+ // bad sequencing of events
+ C2_DUPLICATE = ALREADY_EXISTS, ///< object already exists
+ C2_NOT_FOUND = NAME_NOT_FOUND, ///< object not found
+ C2_BAD_STATE = INVALID_OPERATION, ///< operation is not permitted in the current state
+
+ // bad environment
+ C2_NO_MEMORY = NO_MEMORY, ///< not enough memory to complete operation
+ C2_NO_PERMISSION = PERMISSION_DENIED, ///< missing permission to complete operation
+ C2_TIMED_OUT = TIMED_OUT, ///< operation did not complete within timeout
+
+ // bad versioning
+ C2_NOT_IMPLEMENTED = UNKNOWN_TRANSACTION, ///< operation is not implemented (optional only) \todo for now reuse error code
+
+ // unknown fatal
+ C2_CORRUPTED = UNKNOWN_ERROR, ///< some unexpected error prevented the operation
+};
+
+/// @}
+
+/// \defgroup utils Utilities
+/// @{
+
+#define C2_DO_NOT_COPY(type, args...) \
+ type args& operator=(const type args&) = delete; \
+ type(const type args&) = delete; \
+
+#define C2_PURE __attribute__((pure))
+#define C2_CONST __attribute__((const))
+#define C2_HIDE __attribute__((visibility("hidden")))
+#define C2_INTERNAL __attribute__((internal_linkage))
+
+#define DEFINE_OTHER_COMPARISON_OPERATORS(type) \
+ inline bool operator!=(const type &other) { return !(*this == other); } \
+ inline bool operator<=(const type &other) { return (*this == other) || (*this < other); } \
+ inline bool operator>=(const type &other) { return !(*this < other); } \
+ inline bool operator>(const type &other) { return !(*this < other) && !(*this == other); }
+
+#define DEFINE_FIELD_BASED_COMPARISON_OPERATORS(type, field) \
+ inline bool operator<(const type &other) const { return field < other.field; } \
+ inline bool operator==(const type &other) const { return field == other.field; } \
+ DEFINE_OTHER_COMPARISON_OPERATORS(type)
+
+/// \cond INTERNAL
+
+/// \defgroup utils_internal
+/// @{
+
+template<typename... T> struct c2_types;
+
+/** specialization for a single type */
+template<typename T>
+struct c2_types<T> {
+ typedef typename std::decay<T>::type wide_type;
+ typedef wide_type narrow_type;
+ typedef wide_type mintype;
+};
+
+/** specialization for two types */
+template<typename T, typename U>
+struct c2_types<T, U> {
+ static_assert(std::is_floating_point<T>::value == std::is_floating_point<U>::value,
+ "mixing floating point and non-floating point types is disallowed");
+ static_assert(std::is_signed<T>::value == std::is_signed<U>::value,
+ "mixing signed and unsigned types is disallowed");
+
+ typedef typename std::decay<
+ decltype(true ? std::declval<T>() : std::declval<U>())>::type wide_type;
+ typedef typename std::decay<
+ typename std::conditional<sizeof(T) < sizeof(U), T, U>::type>::type narrow_type;
+ typedef typename std::conditional<
+ std::is_signed<T>::value, wide_type, narrow_type>::type mintype;
+};
+
+/// @}
+
+/// \endcond
+
+/**
+ * Type support utility class. Only supports similar classes, such as:
+ * - all floating point
+ * - all unsigned/all signed
+ * - all pointer
+ */
+template<typename T, typename U, typename... V>
+struct c2_types<T, U, V...> {
+ /** Common type that accommodates all template parameter types. */
+ typedef typename c2_types<typename c2_types<T, U>::wide_type, V...>::wide_type wide_type;
+ /** Narrowest type of the template parameter types. */
+ typedef typename c2_types<typename c2_types<T, U>::narrow_type, V...>::narrow_type narrow_type;
+ /** Type that accommodates the minimum value for any input for the template parameter types. */
+ typedef typename c2_types<typename c2_types<T, U>::mintype, V...>::mintype mintype;
+};
+
+/**
+ * \ingroup utils_internal
+ * specialization for two values */
+template<typename T, typename U>
+inline constexpr typename c2_types<T, U>::wide_type c2_max(const T a, const U b) {
+ typedef typename c2_types<T, U>::wide_type wide_type;
+ return ({ wide_type a_(a), b_(b); a_ > b_ ? a_ : b_; });
+}
+
+/**
+ * Finds the maximum value of a list of "similarly typed" values.
+ *
+ * This is an extension to std::max where the types do not have to be identical, and the smallest
+ * resulting type is used that accommodates the argument types.
+ *
+ * \note Value types must be similar, e.g. all floating point, all pointers, all signed, or all
+ * unsigned.
+ *
+ * @return the largest of the input arguments.
+ */
+template<typename T, typename U, typename... V>
+constexpr typename c2_types<T, U, V...>::wide_type c2_max(const T a, const U b, const V ... c) {
+ typedef typename c2_types<T, U, V...>::wide_type wide_type;
+ return ({ wide_type a_(a), b_(c2_max(b, c...)); a_ > b_ ? a_ : b_; });
+}
+
+/**
+ * \ingroup utils_internal
+ * specialization for two values */
+template<typename T, typename U>
+inline constexpr typename c2_types<T, U>::mintype c2_min(const T a, const U b) {
+ typedef typename c2_types<T, U>::wide_type wide_type;
+ return ({
+ wide_type a_(a), b_(b);
+ static_cast<typename c2_types<T, U>::mintype>(a_ < b_ ? a_ : b_);
+ });
+}
+
+/**
+ * Finds the minimum value of a list of "similarly typed" values.
+ *
+ * This is an extension to std::min where the types do not have to be identical, and the smallest
+ * resulting type is used that accommodates the argument types.
+ *
+ * \note Value types must be similar, e.g. all floating point, all pointers, all signed, or all
+ * unsigned.
+ *
+ * @return the smallest of the input arguments.
+ */
+template<typename T, typename U, typename... V>
+constexpr typename c2_types<T, U, V...>::mintype c2_min(const T a, const U b, const V ... c) {
+ typedef typename c2_types<U, V...>::mintype rest_type;
+ typedef typename c2_types<T, rest_type>::wide_type wide_type;
+ return ({
+ wide_type a_(a), b_(c2_min(b, c...));
+ static_cast<typename c2_types<T, rest_type>::mintype>(a_ < b_ ? a_ : b_);
+ });
+}
+
+/// @}
+
+#ifdef __ANDROID__
+} // namespace android
+#endif
+
+#endif // C2_H_
diff --git a/media/libstagefright/codec2/include/C2Buffer.h b/media/libstagefright/codec2/include/C2Buffer.h
new file mode 100644
index 0000000..9f6b487
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Buffer.h
@@ -0,0 +1,1777 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2BUFFER_H_
+#define C2BUFFER_H_
+
+#include <C2.h>
+#include <C2Param.h> // for C2Info
+
+#include <list>
+#include <memory>
+
+typedef int C2Fence;
+
+#ifdef __ANDROID__
+
+// #include <system/window.h>
+#include <cutils/native_handle.h>
+#include <hardware/gralloc.h> // TODO: remove
+
+typedef native_handle_t C2Handle;
+
+#else
+
+typedef void* C2Handle;
+
+#endif
+
+namespace android {
+
+/// \defgroup buffer Buffers
+/// @{
+
+/// \defgroup buffer_sync Synchronization
+/// @{
+
+/**
+ * Synchronization is accomplished using event and fence objects.
+ *
+ * These are cross-process extensions of promise/future infrastructure.
+ * Events are analogous to std::promise<void>, whereas fences are to std::shared_future<void>.
+ *
+ * Fences and events are shareable/copyable.
+ *
+ * Fences are used in two scenarios, and all copied instances refer to the same event.
+ * \todo do events need to be copyable or should they be unique?
+ *
+ * acquire sync fence object: signaled when it is safe for the component or client to access
+ * (the contents of) an object.
+ *
+ * release sync fence object: \todo
+ *
+ * Fences can be backed by hardware. Hardware fences are guaranteed to signal NO MATTER WHAT within
+ * a short (platform specific) amount of time; this guarantee is usually less than 15 msecs.
+ */
+
+/**
+ * Fence object used by components and the framework.
+ *
+ * Implements the waiting for an event, analogous to a 'future'.
+ *
+ * To be implemented by vendors if using HW fences.
+ */
+class C2Fence {
+public:
+ /**
+ * Waits for a fence to be signaled with a timeout.
+ *
+ * \todo a mechanism to cancel a wait - for now the only way to do this is to abandon the
+ * event, but fences are shared so canceling a wait will cancel all waits.
+ *
+ * \param timeoutNs the maximum time to wait in nsecs
+ *
+ * \retval C2_OK the fence has been signaled
+ * \retval C2_TIMED_OUT the fence has not been signaled within the timeout
+ * \retval C2_BAD_STATE the fence has been abandoned without being signaled (it will never
+ * be signaled)
+ * \retval C2_NO_PERMISSION no permission to wait for the fence (unexpected - system)
+ * \retval C2_CORRUPTED some unknown error prevented waiting for the fence (unexpected)
+ */
+ C2Error wait(nsecs_t timeoutNs);
+
+ /**
+ * Used to check if this fence is valid (if there is a chance for it to be signaled.)
+ * A fence becomes invalid if the controling event is destroyed without it signaling the fence.
+ *
+ * \return whether this fence is valid
+ */
+ bool valid() const;
+
+ /**
+ * Used to check if this fence has been signaled (is ready).
+ *
+ * \return whether this fence has been signaled
+ */
+ bool ready() const;
+
+ /**
+ * Returns a file descriptor that can be used to wait for this fence in a select system call.
+ * \note The returned file descriptor, if valid, must be closed by the caller.
+ *
+ * This can be used in e.g. poll() system calls. This file becomes readable (POLLIN) when the
+ * fence is signaled, and bad (POLLERR) if the fence is abandoned.
+ *
+ * \return a file descriptor representing this fence (with ownership), or -1 if the fence
+ * has already been signaled (\todo or abandoned).
+ *
+ * \todo this must be compatible with fences used by gralloc
+ */
+ int fd() const;
+
+ /**
+ * Returns whether this fence is a hardware-backed fence.
+ * \return whether this is a hardware fence
+ */
+ bool isHW() const;
+
+private:
+ class Impl;
+ std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * Event object used by components and the framework.
+ *
+ * Implements the signaling of an event, analogous to a 'promise'.
+ *
+ * Hardware backed events do not go through this object, and must be exposed directly as fences
+ * by vendors.
+ */
+class C2Event {
+public:
+ /**
+ * Returns a fence for this event.
+ */
+ C2Fence fence() const;
+
+ /**
+ * Signals (all) associated fence(s).
+ * This has no effect no effect if the event was already signaled or abandoned.
+ *
+ * \retval C2_OK the fence(s) were successfully signaled
+ * \retval C2_BAD_STATE the fence(s) have already been abandoned or merged (caller error)
+ * \retval C2_ALREADY_EXISTS the fence(s) have already been signaled (caller error)
+ * \retval C2_NO_PERMISSION no permission to signal the fence (unexpected - system)
+ * \retval C2_CORRUPTED some unknown error prevented signaling the fence(s) (unexpected)
+ */
+ C2Error fire();
+
+ /**
+ * Trigger this event from the merging of the supplied fences. This means that it will be
+ * abandoned if any of these fences have been abandoned, and it will be fired if all of these
+ * fences have been signaled.
+ *
+ * \retval C2_OK the merging was successfully done
+ * \retval C2_NO_MEMORY not enough memory to perform the merging
+ * \retval C2_ALREADY_EXISTS the fence have already been merged (caller error)
+ * \retval C2_BAD_STATE the fence have already been signaled or abandoned (caller error)
+ * \retval C2_NO_PERMISSION no permission to merge the fence (unexpected - system)
+ * \retval C2_CORRUPTED some unknown error prevented merging the fence(s) (unexpected)
+ */
+ C2Error merge(std::vector<C2Fence> fences);
+
+ /**
+ * Abandons the event and any associated fence(s).
+ * \note Call this to explicitly abandon an event before it is destructed to avoid a warning.
+ *
+ * This has no effect no effect if the event was already signaled or abandoned.
+ *
+ * \retval C2_OK the fence(s) were successfully signaled
+ * \retval C2_BAD_STATE the fence(s) have already been signaled or merged (caller error)
+ * \retval C2_ALREADY_EXISTS the fence(s) have already been abandoned (caller error)
+ * \retval C2_NO_PERMISSION no permission to abandon the fence (unexpected - system)
+ * \retval C2_CORRUPTED some unknown error prevented signaling the fence(s) (unexpected)
+ */
+ C2Error abandon();
+
+private:
+ class Impl;
+ std::shared_ptr<Impl> mImpl;
+};
+
+/// \addtogroup buf_internal Internal
+/// @{
+
+/**
+ * Interface for objects that encapsulate an updatable error value.
+ */
+struct _C2InnateError {
+ inline C2Error error() const { return mError; }
+
+protected:
+ _C2InnateError(C2Error error) : mError(error) { }
+
+ C2Error mError; // this error is updatable by the object
+};
+
+/// @}
+
+/**
+ * This is a utility template for objects protected by an acquire fence, so that errors during
+ * acquiring the object are propagated to the object itself.
+ */
+template<typename T>
+class C2Acquirable : public C2Fence {
+public:
+ /**
+ * Acquires the object protected by an acquire fence. Any errors during the mapping will be
+ * passed to the object.
+ *
+ * \return acquired object potentially invalidated if waiting for the fence failed.
+ */
+ T get();
+
+protected:
+ C2Acquirable(C2Error error, C2Fence fence, T t) : C2Fence(fence), mInitialError(error), mT(t) { }
+
+private:
+ C2Error mInitialError;
+ T mT; // TODO: move instead of copy
+};
+
+/// @}
+
+/// \defgroup linear Linear Data Blocks
+/// @{
+
+/**************************************************************************************************
+ LINEAR ASPECTS, BLOCKS AND VIEWS
+**************************************************************************************************/
+
+/**
+ * Common aspect for all objects that have a linear capacity.
+ */
+class _C2LinearCapacityAspect {
+/// \name Linear capacity interface
+/// @{
+public:
+ inline uint32_t capacity() const { return mCapacity; }
+
+protected:
+
+#if UINTPTR_MAX == 0xffffffff
+ static_assert(sizeof(size_t) == sizeof(uint32_t), "size_t is too big");
+#else
+ static_assert(sizeof(size_t) > sizeof(uint32_t), "size_t is too small");
+ // explicitly disable construction from size_t
+ inline explicit _C2LinearCapacityAspect(size_t capacity) = delete;
+#endif
+
+ inline explicit _C2LinearCapacityAspect(uint32_t capacity)
+ : mCapacity(capacity) { }
+
+ inline explicit _C2LinearCapacityAspect(const _C2LinearCapacityAspect *parent)
+ : mCapacity(parent == nullptr ? 0 : parent->capacity()) { }
+
+private:
+ const uint32_t mCapacity;
+/// @}
+};
+
+/**
+ * Aspect for objects that have a linear range.
+ *
+ * This class is copiable.
+ */
+class _C2LinearRangeAspect : public _C2LinearCapacityAspect {
+/// \name Linear range interface
+/// @{
+public:
+ inline uint32_t offset() const { return mOffset; }
+ inline uint32_t size() const { return mSize; }
+
+protected:
+ inline explicit _C2LinearRangeAspect(const _C2LinearCapacityAspect *parent)
+ : _C2LinearCapacityAspect(parent),
+ mOffset(0),
+ mSize(capacity()) { }
+
+ inline _C2LinearRangeAspect(const _C2LinearCapacityAspect *parent, size_t offset, size_t size)
+ : _C2LinearCapacityAspect(parent),
+ mOffset(c2_min(offset, capacity())),
+ mSize(c2_min(size, capacity() - mOffset)) { }
+
+ // subsection of the two [offset, offset + size] ranges
+ inline _C2LinearRangeAspect(const _C2LinearRangeAspect *parent, size_t offset, size_t size)
+ : _C2LinearCapacityAspect(parent == nullptr ? 0 : parent->capacity()),
+ mOffset(c2_min(c2_max(offset, parent == nullptr ? 0 : parent->offset()), capacity())),
+ mSize(c2_min(c2_min(size, parent == nullptr ? 0 : parent->size()), capacity() - mOffset)) { }
+
+private:
+ friend class _C2EditableLinearRange;
+ // invariants 0 <= mOffset <= mOffset + mSize <= capacity()
+ uint32_t mOffset;
+ uint32_t mSize;
+/// @}
+};
+
+/**
+ * Aspect for objects that have an editable linear range.
+ *
+ * This class is copiable.
+ */
+class _C2EditableLinearRange : public _C2LinearRangeAspect {
+protected:
+ inline explicit _C2EditableLinearRange(const _C2LinearCapacityAspect *parent)
+ : _C2LinearRangeAspect(parent) { }
+
+ inline _C2EditableLinearRange(const _C2LinearCapacityAspect *parent, size_t offset, size_t size)
+ : _C2LinearRangeAspect(parent, offset, size) { }
+
+ // subsection of the two [offset, offset + size] ranges
+ inline _C2EditableLinearRange(const _C2LinearRangeAspect *parent, size_t offset, size_t size)
+ : _C2LinearRangeAspect(parent, offset, size) { }
+
+/// \name Editable linear range interface
+/// @{
+
+ /**
+ * Sets the offset to |offset|, while trying to keep the end of the buffer unchanged (e.g.
+ * size will grow if offset is decreased, and may shrink if offset is increased.) Returns
+ * true if successful, which is equivalent to if 0 <= |offset| <= capacity().
+ *
+ * Note: setting offset and size will yield different result depending on the order of the
+ * operations. Always set offset first to ensure proper size.
+ */
+ inline bool setOffset(uint32_t offset) {
+ if (offset > capacity()) {
+ return false;
+ }
+
+ if (offset > mOffset + mSize) {
+ mSize = 0;
+ } else {
+ mSize = mOffset + mSize - offset;
+ }
+ mOffset = offset;
+ return true;
+ }
+ /**
+ * Sets the size to |size|. Returns true if successful, which is equivalent to
+ * if 0 <= |size| <= capacity() - offset().
+ *
+ * Note: setting offset and size will yield different result depending on the order of the
+ * operations. Always set offset first to ensure proper size.
+ */
+ inline bool setSize(uint32_t size) {
+ if (size > capacity() - mOffset) {
+ return false;
+ } else {
+ mSize = size;
+ return true;
+ }
+ }
+ /**
+ * Sets the offset to |offset| with best effort. Same as setOffset() except that offset will
+ * be clamped to the buffer capacity.
+ *
+ * Note: setting offset and size (even using best effort) will yield different result depending
+ * on the order of the operations. Always set offset first to ensure proper size.
+ */
+ inline void setOffset_be(uint32_t offset) {
+ if (offset > capacity()) {
+ offset = capacity();
+ }
+ if (offset > mOffset + mSize) {
+ mSize = 0;
+ } else {
+ mSize = mOffset + mSize - offset;
+ }
+ mOffset = offset;
+ }
+ /**
+ * Sets the size to |size| with best effort. Same as setSize() except that the selected region
+ * will be clamped to the buffer capacity (e.g. size is clamped to [0, capacity() - offset()]).
+ *
+ * Note: setting offset and size (even using best effort) will yield different result depending
+ * on the order of the operations. Always set offset first to ensure proper size.
+ */
+ inline void setSize_be(uint32_t size) {
+ mSize = std::min(size, capacity() - mOffset);
+ }
+/// @}
+};
+
+// ================================================================================================
+// BLOCKS
+// ================================================================================================
+
+/**
+ * Blocks are sections of allocations. They can be either 1D or 2D.
+ */
+
+class C2LinearAllocation;
+
+class C2Block1D : public _C2LinearRangeAspect {
+public:
+ const C2Handle *handle() const;
+
+protected:
+ C2Block1D(std::shared_ptr<C2LinearAllocation> alloc);
+ C2Block1D(std::shared_ptr<C2LinearAllocation> alloc, size_t offset, size_t size);
+
+private:
+ class Impl;
+ std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * Read view provides read-only access for a linear memory segment.
+ *
+ * This class is copiable.
+ */
+class C2ReadView : public _C2LinearCapacityAspect {
+public:
+ /**
+ * \return pointer to the start of the block or nullptr on error.
+ */
+ const uint8_t *data();
+
+ /**
+ * Returns a portion of this view.
+ *
+ * \param offset the start offset of the portion. \note This is clamped to the capacity of this
+ * view.
+ * \param size the size of the portion. \note This is clamped to the remaining data from offset.
+ *
+ * \return a read view containing a portion of this view
+ */
+ C2ReadView subView(size_t offset, size_t size) const;
+
+ /**
+ * \return error during the creation/mapping of this view.
+ */
+ C2Error error();
+
+private:
+ class Impl;
+ std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * Write view provides read/write access for a linear memory segment.
+ *
+ * This class is copiable. \todo movable only?
+ */
+class C2WriteView : public _C2EditableLinearRange {
+public:
+ /**
+ * Start of the block.
+ *
+ * \return pointer to the start of the block or nullptr on error.
+ */
+ uint8_t *base();
+
+ /**
+ * \return pointer to the block at the current offset or nullptr on error.
+ */
+ uint8_t *data();
+
+ /**
+ * \return error during the creation/mapping of this view.
+ */
+ C2Error error();
+
+private:
+ class Impl;
+ /// \todo should this be unique_ptr to make this movable only - to avoid inconsistent regions
+ /// between copies.
+ std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * A constant (read-only) linear block (portion of an allocation) with an acquire fence.
+ * Blocks are unmapped when created, and can be mapped into a read view on demand.
+ *
+ * This class is copiable and contains a reference to the allocation that it is based on.
+ */
+class C2ConstLinearBlock : public C2Block1D {
+public:
+ /**
+ * Maps this block into memory and returns a read view for it.
+ *
+ * \return a read view for this block.
+ */
+ C2Acquirable<C2ReadView> map() const;
+
+ /**
+ * Returns a portion of this block.
+ *
+ * \param offset the start offset of the portion. \note This is clamped to the capacity of this
+ * block.
+ * \param size the size of the portion. \note This is clamped to the remaining data from offset.
+ *
+ * \return a constant linear block containing a portion of this block
+ */
+ C2ConstLinearBlock subBlock(size_t offset, size_t size) const;
+
+ /**
+ * Returns the acquire fence for this block.
+ *
+ * \return a fence that must be waited on before reading the block.
+ */
+ C2Fence fence() const { return mFence; }
+
+private:
+ C2Fence mFence;
+};
+
+/**
+ * Linear block is a writeable 1D block. Once written, it can be shared in whole or in parts with
+ * consumers/readers as read-only const linear block(s).
+ */
+class C2LinearBlock : public C2Block1D {
+public:
+ /**
+ * Maps this block into memory and returns a write view for it.
+ *
+ * \return a write view for this block.
+ */
+ C2Acquirable<C2WriteView> map();
+
+ /**
+ * Creates a read-only const linear block for a portion of this block; optionally protected
+ * by an acquire fence. There are two ways to use this:
+ *
+ * 1) share ready block after writing data into the block. In this case no fence shall be
+ * supplied, and the block shall not be modified after calling this method.
+ * 2) share block metadata before actually (finishing) writing the data into the block. In
+ * this case a fence must be supplied that will be triggered when the data is written.
+ * The block shall be modified only until firing the event for the fence.
+ */
+ C2ConstLinearBlock share(size_t offset, size_t size, C2Fence fence);
+};
+
+/// @}
+
+/**************************************************************************************************
+ CIRCULAR BLOCKS AND VIEWS
+**************************************************************************************************/
+
+/// \defgroup circular Circular buffer support
+/// @{
+
+/**
+ * Circular blocks can be used to share data between a writer and a reader (and/or other consumers)-
+ * in a memory-efficient way by reusing a section of memory. Circular blocks are a bit more complex
+ * than single reader/single writer schemes to facilitate block-based consuming of data.
+ *
+ * They can operate in two modes:
+ *
+ * 1) one writer that creates blocks to be consumed (this model can be used by components)
+ *
+ * 2) one writer that writes continuously, and one reader that can creates blocks to be consumed
+ * by further recipients (this model is used by the framework, and cannot be used by components.)
+ *
+ * Circular blocks have four segments with running pointers:
+ * - reserved: data reserved and available for the writer
+ * - committed: data committed by the writer and available to the reader (if present)
+ * - used: data used by consumers (if present)
+ * - available: unused data available to be reserved
+ */
+class C2CircularBlock : public C2Block1D {
+ // TODO: add methods
+
+private:
+ size_t mReserved __unused; // end of reserved section
+ size_t mCommitted __unused; // end of committed section
+ size_t mUsed __unused; // end of used section
+ size_t mFree __unused; // end of free section
+};
+
+class _C2CircularBlockSegment : public _C2LinearCapacityAspect {
+public:
+ /**
+ * Returns the available size for this segment.
+ *
+ * \return currently available size for this segment
+ */
+ size_t available() const;
+
+ /**
+ * Reserve some space for this segment from its current start.
+ *
+ * \param size desired space in bytes
+ * \param fence a pointer to an acquire fence. If non-null, the reservation is asynchronous and
+ * a fence will be stored here that will be signaled when the reservation is
+ * complete. If null, the reservation is synchronous.
+ *
+ * \retval C2_OK the space was successfully reserved
+ * \retval C2_NO_MEMORY the space requested cannot be reserved
+ * \retval C2_TIMED_OUT the reservation timed out \todo when?
+ * \retval C2_CORRUPTED some unknown error prevented reserving space. (unexpected)
+ */
+ C2Error reserve(size_t size, C2Fence *fence /* nullable */);
+
+ /**
+ * Abandons a portion of this segment. This will move to the beginning of this segment.
+ *
+ * \note This methods is only allowed if this segment is producing blocks.
+ *
+ * \param size number of bytes to abandon
+ *
+ * \retval C2_OK the data was successfully abandoned
+ * \retval C2_TIMED_OUT the operation timed out (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented abandoning the data (unexpected)
+ */
+ C2Error abandon(size_t size);
+
+ /**
+ * Share a portion as block(s) with consumers (these are moved to the used section).
+ *
+ * \note This methods is only allowed if this segment is producing blocks.
+ * \note Share does not move the beginning of the segment. (\todo add abandon/offset?)
+ *
+ * \param size number of bytes to share
+ * \param fence fence to be used for the section
+ * \param blocks list where the blocks of the section are appended to
+ *
+ * \retval C2_OK the portion was successfully shared
+ * \retval C2_NO_MEMORY not enough memory to share the portion
+ * \retval C2_TIMED_OUT the operation timed out (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented sharing the data (unexpected)
+ */
+ C2Error share(size_t size, C2Fence fence, std::list<C2ConstLinearBlock> &blocks);
+
+ /**
+ * Returns the beginning offset of this segment from the start of this circular block.
+ *
+ * @return beginning offset
+ */
+ size_t begin();
+
+ /**
+ * Returns the end offset of this segment from the start of this circular block.
+ *
+ * @return end offset
+ */
+ size_t end();
+};
+
+/**
+ * A circular write-view is a dynamic mapped view for a segment of a circular block. Care must be
+ * taken when using this view so that only the section owned by the segment is modified.
+ */
+class C2CircularWriteView : public _C2LinearCapacityAspect {
+public:
+ /**
+ * Start of the circular block.
+ * \note the segment does not own this pointer.
+ *
+ * \return pointer to the start of the circular block or nullptr on error.
+ */
+ uint8_t *base();
+
+ /**
+ * \return error during the creation/mapping of this view.
+ */
+ C2Error error();
+};
+
+/**
+ * The writer of a circular buffer.
+ *
+ * Can commit data to a reader (not supported for components) OR share data blocks directly with a
+ * consumer.
+ *
+ * If a component supports outputting data into circular buffers, it must allocate a circular
+ * block and use a circular writer.
+ */
+class C2CircularWriter : public _C2CircularBlockSegment {
+public:
+ /**
+ * Commits a portion of this segment to the next segment. This moves the beginning of the
+ * segment.
+ *
+ * \param size number of bytes to commit to the next segment
+ * \param fence fence used for the commit (the fence must signal before the data is committed)
+ */
+ C2Error commit(size_t size, C2Fence fence);
+
+ /**
+ * Maps this block into memory and returns a write view for it.
+ *
+ * \return a write view for this block.
+ */
+ C2Acquirable<C2CircularWriteView> map();
+};
+
+/// @}
+
+/// \defgroup graphic Graphic Data Blocks
+/// @{
+
+/**
+ * Interface for objects that have a width and height (planar capacity).
+ */
+class _C2PlanarCapacityAspect {
+/// \name Planar capacity interface
+/// @{
+public:
+ inline uint32_t width() const { return mWidth; }
+ inline uint32_t height() const { return mHeight; }
+
+protected:
+ inline _C2PlanarCapacityAspect(uint32_t width, uint32_t height)
+ : mWidth(width), mHeight(height) { }
+
+ inline _C2PlanarCapacityAspect(const _C2PlanarCapacityAspect *parent)
+ : mWidth(parent == nullptr ? 0 : parent->width()),
+ mHeight(parent == nullptr ? 0 : parent->height()) { }
+
+private:
+ const uint32_t mWidth;
+ const uint32_t mHeight;
+/// @}
+};
+
+/**
+ * C2Rect: rectangle type with non-negative coordinates.
+ *
+ * \note This struct has public fields without getters/setters. All methods are inline.
+ */
+struct C2Rect {
+// public:
+ uint32_t mLeft;
+ uint32_t mTop;
+ uint32_t mWidth;
+ uint32_t mHeight;
+
+ inline C2Rect(uint32_t width, uint32_t height)
+ : C2Rect(width, height, 0, 0) { }
+
+ inline C2Rect(uint32_t width, uint32_t height, uint32_t left, uint32_t top)
+ : mLeft(left), mTop(top), mWidth(width), mHeight(height) { }
+
+ // utility methods
+
+ inline bool isEmpty() const {
+ return mWidth == 0 || mHeight == 0;
+ }
+
+ inline bool isValid() const {
+ return mLeft <= ~mWidth && mTop <= ~mHeight;
+ }
+
+ inline operator bool() const {
+ return isValid() && !isEmpty();
+ }
+
+ inline bool operator!() const {
+ return !bool(*this);
+ }
+
+ inline bool contains(const C2Rect &other) const {
+ if (!isValid() || !other.isValid()) {
+ return false;
+ } else if (other.isEmpty()) {
+ return true;
+ } else {
+ return mLeft <= other.mLeft && mTop <= other.mTop
+ && mLeft + mWidth >= other.mLeft + other.mWidth
+ && mTop + mHeight >= other.mTop + other.mHeight;
+ }
+ }
+
+ inline bool operator==(const C2Rect &other) const {
+ if (!isValid()) {
+ return !other.isValid();
+ } else if (isEmpty()) {
+ return other.isEmpty();
+ } else {
+ return mLeft == other.mLeft && mTop == other.mTop
+ && mWidth == other.mWidth && mHeight == other.mHeight;
+ }
+ }
+
+ inline bool operator!=(const C2Rect &other) const {
+ return !operator==(other);
+ }
+
+ inline bool operator>=(const C2Rect &other) const {
+ return contains(other);
+ }
+
+ inline bool operator>(const C2Rect &other) const {
+ return contains(other) && !operator==(other);
+ }
+
+ inline bool operator<=(const C2Rect &other) const {
+ return other.contains(*this);
+ }
+
+ inline bool operator<(const C2Rect &other) const {
+ return other.contains(*this) && !operator==(other);
+ }
+};
+
+/**
+ * C2PlaneInfo: information on the layout of flexible planes.
+ *
+ * Public fields without getters/setters.
+ */
+struct C2PlaneInfo {
+// public:
+ enum Channel : uint32_t {
+ Y,
+ R,
+ G,
+ B,
+ A,
+ Cr,
+ Cb,
+ } mChannel;
+
+ int32_t mColInc; // column increment in bytes. may be negative
+ int32_t mRowInc; // row increment in bytes. may be negative
+ uint32_t mHorizSubsampling; // subsampling compared to width
+ uint32_t mVertSubsampling; // subsampling compared to height
+
+ uint32_t mBitDepth;
+ uint32_t mAllocatedDepth;
+
+ inline ssize_t minOffset(uint32_t width, uint32_t height) {
+ ssize_t offs = 0;
+ if (width > 0 && mColInc < 0) {
+ offs += mColInc * (ssize_t)(width - 1);
+ }
+ if (height > 0 && mRowInc < 0) {
+ offs += mRowInc * (ssize_t)(height - 1);
+ }
+ return offs;
+ }
+
+ inline ssize_t maxOffset(uint32_t width, uint32_t height, uint32_t allocatedDepth) {
+ ssize_t offs = (allocatedDepth + 7) >> 3;
+ if (width > 0 && mColInc > 0) {
+ offs += mColInc * (ssize_t)(width - 1);
+ }
+ if (height > 0 && mRowInc > 0) {
+ offs += mRowInc * (ssize_t)(height - 1);
+ }
+ return offs;
+ }
+};
+
+struct C2PlaneLayout {
+public:
+ enum Type : uint32_t {
+ MEDIA_IMAGE_TYPE_UNKNOWN = 0,
+ MEDIA_IMAGE_TYPE_YUV = 0x100,
+ MEDIA_IMAGE_TYPE_YUVA,
+ MEDIA_IMAGE_TYPE_RGB,
+ MEDIA_IMAGE_TYPE_RGBA,
+ };
+
+ Type mType;
+ uint32_t mNumPlanes; // number of planes
+
+ enum PlaneIndex : uint32_t {
+ Y = 0,
+ U = 1,
+ V = 2,
+ R = 0,
+ G = 1,
+ B = 2,
+ A = 3,
+ MAX_NUM_PLANES = 4,
+ };
+
+ C2PlaneInfo mPlanes[MAX_NUM_PLANES];
+};
+
+/**
+ * Aspect for objects that have a planar section (crop rectangle).
+ *
+ * This class is copiable.
+ */
+class _C2PlanarSection : public _C2PlanarCapacityAspect {
+/// \name Planar section interface
+/// @{
+public:
+ // crop can be an empty rect, does not have to line up with subsampling
+ // NOTE: we do not support floating-point crop
+ inline const C2Rect crop() { return mCrop; }
+
+ /**
+ * Sets crop to crop intersected with [(0,0) .. (width, height)]
+ */
+ inline void setCrop_be(const C2Rect &crop);
+
+ /**
+ * If crop is within the dimensions of this object, it sets crop to it.
+ *
+ * \return true iff crop is within the dimensions of this object
+ */
+ inline bool setCrop(const C2Rect &crop);
+
+private:
+ C2Rect mCrop;
+/// @}
+};
+
+class C2Block2D : public _C2PlanarSection {
+public:
+ const C2Handle *handle() const;
+
+private:
+ class Impl;
+ std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * Graphic view provides read or read-write access for a graphic block.
+ *
+ * This class is copiable.
+ *
+ * \note Due to the subsampling of graphic buffers, a read view must still contain a crop rectangle
+ * to ensure subsampling is followed. This results in nearly identical interface between read and
+ * write views, so C2GraphicView can encompass both of them.
+ */
+class C2GraphicView : public _C2PlanarSection {
+public:
+ /**
+ * \return pointer to the start of the block or nullptr on error.
+ */
+ const uint8_t *data() const;
+
+ /**
+ * \return pointer to the start of the block or nullptr on error.
+ */
+ uint8_t *data();
+
+ /**
+ * Returns a section of this view.
+ *
+ * \param rect the dimension of the section. \note This is clamped to the crop of this view.
+ *
+ * \return a read view containing the requested section of this view
+ */
+ const C2GraphicView subView(const C2Rect &rect) const;
+ C2GraphicView subView(const C2Rect &rect);
+
+ /**
+ * \return error during the creation/mapping of this view.
+ */
+ C2Error error() const;
+
+private:
+ class Impl;
+ std::shared_ptr<Impl> mImpl;
+};
+
+/**
+ * A constant (read-only) graphic block (portion of an allocation) with an acquire fence.
+ * Blocks are unmapped when created, and can be mapped into a read view on demand.
+ *
+ * This class is copiable and contains a reference to the allocation that it is based on.
+ */
+class C2ConstGraphicBlock : public C2Block2D {
+public:
+ /**
+ * Maps this block into memory and returns a read view for it.
+ *
+ * \return a read view for this block.
+ */
+ C2Acquirable<const C2GraphicView> map() const;
+
+ /**
+ * Returns a section of this block.
+ *
+ * \param rect the coordinates of the section. \note This is clamped to the crop rectangle of
+ * this block.
+ *
+ * \return a constant graphic block containing a portion of this block
+ */
+ C2ConstGraphicBlock subBlock(const C2Rect &rect) const;
+
+ /**
+ * Returns the acquire fence for this block.
+ *
+ * \return a fence that must be waited on before reading the block.
+ */
+ C2Fence fence() const { return mFence; }
+
+private:
+ C2Fence mFence;
+};
+
+/**
+ * Graphic block is a writeable 2D block. Once written, it can be shared in whole or in part with
+ * consumers/readers as read-only const graphic block.
+ */
+class C2GraphicBlock : public C2Block2D {
+public:
+ /**
+ * Maps this block into memory and returns a write view for it.
+ *
+ * \return a write view for this block.
+ */
+ C2Acquirable<C2GraphicView> map();
+
+ /**
+ * Creates a read-only const linear block for a portion of this block; optionally protected
+ * by an acquire fence. There are two ways to use this:
+ *
+ * 1) share ready block after writing data into the block. In this case no fence shall be
+ * supplied, and the block shall not be modified after calling this method.
+ * 2) share block metadata before actually (finishing) writing the data into the block. In
+ * this case a fence must be supplied that will be triggered when the data is written.
+ * The block shall be modified only until firing the event for the fence.
+ */
+ C2ConstGraphicBlock share(const C2Rect &crop, C2Fence fence);
+};
+
+/// @}
+
+/// \defgroup buffer_onj Buffer objects
+/// @{
+
+// ================================================================================================
+// BUFFERS
+// ================================================================================================
+
+/// \todo: Do we still need this?
+///
+// There are 2 kinds of buffers: linear or graphic. Linear buffers can contain a single block, or
+// a list of blocks (LINEAR_CHUNKS). Support for list of blocks is optional, and can allow consuming
+// data from circular buffers or scattered data sources without extra memcpy. Currently, list of
+// graphic blocks is not supported.
+
+class C2LinearBuffer; // read-write buffer
+class C2GraphicBuffer; // read-write buffer
+class C2LinearChunksBuffer;
+
+/**
+ * C2BufferData: the main, non-meta data of a buffer. A buffer can contain either linear blocks
+ * or graphic blocks, and can contain either a single block or multiple blocks. This is determined
+ * by its type.
+ */
+class C2BufferData {
+public:
+ /**
+ * The type of buffer data.
+ */
+ enum Type : uint32_t {
+ LINEAR, ///< the buffer contains a single linear block
+ LINEAR_CHUNKS, ///< the buffer contains one or more linear blocks
+ GRAPHIC, ///< the buffer contains a single graphic block
+ GRAPHIC_CHUNKS, ///< the buffer contains one of more graphic blocks
+ };
+
+ /**
+ * Gets the type of this buffer (data).
+ * \return the type of this buffer data.
+ */
+ Type type() const;
+
+ /**
+ * Gets the linear blocks of this buffer.
+ * \return a constant list of const linear blocks of this buffer.
+ * \retval empty list if this buffer does not contain linear block(s).
+ */
+ const std::list<C2ConstLinearBlock> linearBlocks() const;
+
+ /**
+ * Gets the graphic blocks of this buffer.
+ * \return a constant list of const graphic blocks of this buffer.
+ * \retval empty list if this buffer does not contain graphic block(s).
+ */
+ const std::list<C2ConstGraphicBlock> graphicBlocks() const;
+
+private:
+ class Impl;
+ std::shared_ptr<Impl> mImpl;
+
+protected:
+ // no public constructor
+ // C2BufferData(const std::shared_ptr<const Impl> &impl) : mImpl(impl) {}
+};
+
+/**
+ * C2Buffer: buffer base class. These are always used as shared_ptrs. Though the underlying buffer
+ * objects (native buffers, ion buffers, or dmabufs) are reference-counted by the system,
+ * C2Buffers hold only a single reference.
+ *
+ * These objects cannot be used on the stack.
+ */
+class C2Buffer {
+public:
+ /**
+ * Gets the buffer's data.
+ *
+ * \return the buffer's data.
+ */
+ const C2BufferData data() const;
+
+ /**
+ * These will still work if used in onDeathNotify.
+ */
+#if 0
+ inline std::shared_ptr<C2LinearBuffer> asLinearBuffer() const {
+ return mType == LINEAR ? std::shared_ptr::reinterpret_cast<C2LinearBuffer>(this) : nullptr;
+ }
+
+ inline std::shared_ptr<C2GraphicBuffer> asGraphicBuffer() const {
+ return mType == GRAPHIC ? std::shared_ptr::reinterpret_cast<C2GraphicBuffer>(this) : nullptr;
+ }
+
+ inline std::shared_ptr<C2CircularBuffer> asCircularBuffer() const {
+ return mType == CIRCULAR ? std::shared_ptr::reinterpret_cast<C2CircularBuffer>(this) : nullptr;
+ }
+#endif
+
+ ///@name Pre-destroy notification handling
+ ///@{
+
+ /**
+ * Register for notification just prior to the destruction of this object.
+ */
+ typedef void (*OnDestroyNotify) (const C2Buffer *buf, void *arg);
+
+ /**
+ * Registers for a pre-destroy notification. This is called just prior to the destruction of
+ * this buffer (when this buffer is no longer valid.)
+ *
+ * \param onDestroyNotify the notification callback
+ * \param arg an arbitrary parameter passed to the callback
+ *
+ * \retval C2_OK the registration was successful.
+ * \retval C2_DUPLICATE a notification was already registered for this callback and argument
+ * \retval C2_NO_MEMORY not enough memory to register for this callback
+ * \retval C2_CORRUPTED an unknown error prevented the registration (unexpected)
+ */
+ C2Error registerOnDestroyNotify(OnDestroyNotify *onDestroyNotify, void *arg = nullptr);
+
+ /**
+ * Unregisters a previously registered pre-destroy notification.
+ *
+ * \param onDestroyNotify the notification callback
+ * \param arg an arbitrary parameter passed to the callback
+ *
+ * \retval C2_OK the unregistration was successful.
+ * \retval C2_NOT_FOUND the notification was not found
+ * \retval C2_CORRUPTED an unknown error prevented the registration (unexpected)
+ */
+ C2Error unregisterOnDestroyNotify(OnDestroyNotify *onDestroyNotify, void *arg = nullptr);
+
+ ///@}
+
+ virtual ~C2Buffer() = default;
+
+ ///@name Buffer-specific arbitrary metadata handling
+ ///@{
+
+ /**
+ * Gets the list of metadata associated with this buffer.
+ *
+ * \return a constant list of info objects associated with this buffer.
+ */
+ const std::list<std::shared_ptr<const C2Info>> infos() const;
+
+ /**
+ * Attaches (or updates) an (existing) metadata for this buffer.
+ * If the metadata is stream specific, the stream information will be reset.
+ *
+ * \param info Metadata to update
+ *
+ * \retval C2_OK the metadata was successfully attached/updated.
+ * \retval C2_NO_MEMORY not enough memory to attach the metadata (this return value is not
+ * used if the same kind of metadata is already attached to the buffer).
+ */
+ C2Error setInfo(const std::shared_ptr<C2Info> &info);
+
+ /**
+ * Checks if there is a certain type of metadata attached to this buffer.
+ *
+ * \param index the parameter type of the metadata
+ *
+ * \return true iff there is a metadata with the parameter type attached to this buffer.
+ */
+ bool hasInfo(C2Param::Type index) const;
+ std::shared_ptr<C2Info> removeInfo(C2Param::Type index) const;
+ ///@}
+
+protected:
+ // no public constructor
+ inline C2Buffer() = default;
+
+private:
+// Type _mType;
+};
+
+/**
+ * An extension of C2Info objects that can contain arbitrary buffer data.
+ *
+ * \note This object is not describable and contains opaque data.
+ */
+class C2InfoBuffer {
+public:
+ /**
+ * Gets the index of this info object.
+ *
+ * \return the parameter index.
+ */
+ const C2Param::Index index() const;
+
+ /**
+ * Gets the buffer's data.
+ *
+ * \return the buffer's data.
+ */
+ const C2BufferData data() const;
+};
+
+/// @}
+
+/**************************************************************************************************
+ ALLOCATIONS
+**************************************************************************************************/
+
+/// \defgroup allocator Allocation and memory placement
+/// @{
+
+/**
+ * Buffer/memory usage bits. These are used by the allocators to select optimal memory type/pool and
+ * buffer layout.
+ *
+ * \note This struct has public fields without getters/setters. All methods are inline.
+ */
+struct C2MemoryUsage {
+// public:
+ // TODO: match these to gralloc1.h
+ enum Consumer : uint64_t {
+ kSoftwareRead = GRALLOC_USAGE_SW_READ_OFTEN,
+ kRenderScriptRead = GRALLOC_USAGE_RENDERSCRIPT,
+ kTextureRead = GRALLOC_USAGE_HW_TEXTURE,
+ kHardwareComposer = GRALLOC_USAGE_HW_COMPOSER,
+ kHardwareEncoder = GRALLOC_USAGE_HW_VIDEO_ENCODER,
+ kProtectedRead = GRALLOC_USAGE_PROTECTED,
+ };
+
+ enum Producer : uint64_t {
+ kSoftwareWrite = GRALLOC_USAGE_SW_WRITE_OFTEN,
+ kRenderScriptWrite = GRALLOC_USAGE_RENDERSCRIPT,
+ kTextureWrite = GRALLOC_USAGE_HW_RENDER,
+ kCompositionTarget = GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_RENDER,
+ kHardwareDecoder = GRALLOC_USAGE_HW_VIDEO_ENCODER,
+ kProtectedWrite = GRALLOC_USAGE_PROTECTED,
+ };
+
+ uint64_t mConsumer; // e.g. input
+ uint64_t mProducer; // e.g. output
+};
+
+/**
+ * \ingroup linear allocator
+ * 1D allocation interface.
+ */
+class C2LinearAllocation : public _C2LinearCapacityAspect {
+public:
+ /**
+ * Maps a portion of an allocation starting from |offset| with |size| into local process memory.
+ * Stores the starting address into |addr|, or NULL if the operation was unsuccessful.
+ * |fenceFd| is a file descriptor referring to an acquire sync fence object. If it is already
+ * safe to access the buffer contents, then -1.
+ *
+ * \param offset starting position of the portion to be mapped (this does not have to
+ * be page aligned)
+ * \param size size of the portion to be mapped (this does not have to be page
+ * aligned)
+ * \param usage the desired usage. \todo this must be kSoftwareRead and/or
+ * kSoftwareWrite.
+ * \param fenceFd a pointer to a file descriptor if an async mapping is requested. If
+ * not-null, and acquire fence FD will be stored here on success, or -1
+ * on failure. If null, the mapping will be synchronous.
+ * \param addr a pointer to where the starting address of the mapped portion will be
+ * stored. On failure, nullptr will be stored here.
+ *
+ * \todo Only one portion can be mapped at the same time - this is true for gralloc, but there
+ * is no need for this for 1D buffers.
+ * \todo Do we need to support sync operation as we could just wait for the fence?
+ *
+ * \retval C2_OK the operation was successful
+ * \retval C2_NO_PERMISSION no permission to map the portion
+ * \retval C2_TIMED_OUT the operation timed out
+ * \retval C2_NO_MEMORY not enough memory to complete the operation
+ * \retval C2_BAD_VALUE the parameters (offset/size) are invalid or outside the allocation, or
+ * the usage flags are invalid (caller error)
+ * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
+ */
+ virtual C2Error map(
+ size_t offset, size_t size, C2MemoryUsage usage, int *fenceFd /* nullable */,
+ void **addr /* nonnull */) = 0;
+
+ /**
+ * Unmaps a portion of an allocation at |addr| with |size|. These must be parameters previously
+ * passed to |map|; otherwise, this operation is a no-op.
+ *
+ * \param addr starting address of the mapped region
+ * \param size size of the mapped region
+ * \param fenceFd a pointer to a file descriptor if an async unmapping is requested. If
+ * not-null, a release fence FD will be stored here on success, or -1
+ * on failure. This fence signals when the original allocation contains
+ * any changes that happened to the mapped region. If null, the unmapping
+ * will be synchronous.
+ *
+ * \retval C2_OK the operation was successful
+ * \retval C2_TIMED_OUT the operation timed out
+ * \retval C2_BAD_VALUE the parameters (addr/size) do not correspond to previously mapped
+ * regions (caller error)
+ * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
+ * \retval C2_NO_PERMISSION no permission to unmap the portion (unexpected - system)
+ */
+ virtual C2Error unmap(void *addr, size_t size, int *fenceFd /* nullable */) = 0;
+
+ /**
+ * Returns true if this is a valid allocation.
+ *
+ * \todo remove?
+ */
+ virtual bool isValid() const = 0;
+
+ /**
+ * Returns a pointer to the allocation handle.
+ */
+ virtual const C2Handle *handle() const = 0;
+
+ /**
+ * Returns true if this is the same allocation as |other|.
+ */
+ virtual bool equals(const std::shared_ptr<C2LinearAllocation> &other) const = 0;
+
+protected:
+ // \todo should we limit allocation directly?
+ C2LinearAllocation(size_t capacity) : _C2LinearCapacityAspect(c2_min(capacity, UINT32_MAX)) {}
+ virtual ~C2LinearAllocation() = default;
+};
+
+/**
+ * \ingroup graphic allocator
+ * 2D allocation interface.
+ */
+class C2GraphicAllocation : public _C2PlanarCapacityAspect {
+public:
+ /**
+ * Maps a rectangular section (as defined by |rect|) of a 2D allocation into local process
+ * memory for flexible access. On success, it fills out |layout| with the plane specifications
+ * and fills the |addr| array with pointers to the first byte of the top-left pixel of each
+ * plane used. Otherwise, it leaves |layout| and |addr| untouched. |fenceFd| is a file
+ * descriptor referring to an acquire sync fence object. If it is already safe to access the
+ * buffer contents, then -1.
+ *
+ * \note Only one portion of the graphic allocation can be mapped at the same time. (This is
+ * from gralloc1 limitation.)
+ *
+ * \param rect section to be mapped (this does not have to be aligned)
+ * \param usage the desired usage. \todo this must be kSoftwareRead and/or
+ * kSoftwareWrite.
+ * \param fenceFd a pointer to a file descriptor if an async mapping is requested. If
+ * not-null, and acquire fence FD will be stored here on success, or -1
+ * on failure. If null, the mapping will be synchronous.
+ * \param layout a pointer to where the mapped planes' descriptors will be
+ * stored. On failure, nullptr will be stored here.
+ *
+ * \todo Do we need to support sync operation as we could just wait for the fence?
+ *
+ * \retval C2_OK the operation was successful
+ * \retval C2_NO_PERMISSION no permission to map the section
+ * \retval C2_ALREADY_EXISTS there is already a mapped region (caller error)
+ * \retval C2_TIMED_OUT the operation timed out
+ * \retval C2_NO_MEMORY not enough memory to complete the operation
+ * \retval C2_BAD_VALUE the parameters (rect) are invalid or outside the allocation, or the
+ * usage flags are invalid (caller error)
+ * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
+
+ */
+ virtual C2Error map(
+ C2Rect rect, C2MemoryUsage usage, int *fenceFd,
+ // TODO: return <addr, size> buffers with plane sizes
+ C2PlaneLayout *layout /* nonnull */, uint8_t **addr /* nonnull */) = 0;
+
+ /**
+ * Unmaps the last mapped rectangular section.
+ *
+ * \param fenceFd a pointer to a file descriptor if an async unmapping is requested. If
+ * not-null, a release fence FD will be stored here on success, or -1
+ * on failure. This fence signals when the original allocation contains
+ * any changes that happened to the mapped section. If null, the unmapping
+ * will be synchronous.
+ *
+ * \retval C2_OK the operation was successful
+ * \retval C2_TIMED_OUT the operation timed out
+ * \retval C2_NOT_FOUND there is no mapped region (caller error)
+ * \retval C2_CORRUPTED some unknown error prevented the operation from completing (unexpected)
+ * \retval C2_NO_PERMISSION no permission to unmap the section (unexpected - system)
+ */
+ virtual C2Error unmap(C2Fence *fenceFd /* nullable */) = 0;
+
+ /**
+ * Returns true if this is a valid allocation.
+ *
+ * \todo remove?
+ */
+ virtual bool isValid() const = 0;
+
+ /**
+ * Returns a pointer to the allocation handle.
+ */
+ virtual const C2Handle *handle() const = 0;
+
+ /**
+ * Returns true if this is the same allocation as |other|.
+ */
+ virtual bool equals(const std::shared_ptr<const C2GraphicAllocation> &other) = 0;
+
+protected:
+ virtual ~C2GraphicAllocation();
+};
+
+/**
+ * Allocators are used by the framework to allocate memory (allocations) for buffers. They can
+ * support either 1D or 2D allocations.
+ *
+ * \note In theory they could support both, but in practice, we will use only one or the other.
+ *
+ * Never constructed on stack.
+ *
+ * Allocators are provided by vendors.
+ */
+class C2Allocator {
+public:
+ /**
+ * Allocates a 1D allocation of given |capacity| and |usage|. If successful, the allocation is
+ * stored in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
+ *
+ * \param capacity the size of requested allocation (the allocation could be slightly
+ * larger, e.g. to account for any system-required alignment)
+ * \param usage the memory usage info for the requested allocation. \note that the
+ * returned allocation may be later used/mapped with different usage.
+ * The allocator should layout the buffer to be optimized for this usage,
+ * but must support any usage. One exception: protected buffers can
+ * only be used in a protected scenario.
+ * \param allocation pointer to where the allocation shall be stored on success. nullptr
+ * will be stored here on failure
+ *
+ * \retval C2_OK the allocation was successful
+ * \retval C2_NO_MEMORY not enough memory to complete the allocation
+ * \retval C2_TIMED_OUT the allocation timed out
+ * \retval C2_NO_PERMISSION no permission to complete the allocation
+ * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
+ * \retval C2_UNSUPPORTED this allocator does not support 1D allocations
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+ */
+ virtual C2Error allocateLinearBuffer(
+ uint32_t capacity __unused, C2MemoryUsage usage __unused,
+ std::shared_ptr<C2LinearAllocation> *allocation /* nonnull */) {
+ *allocation = nullptr;
+ return C2_UNSUPPORTED;
+ }
+
+ /**
+ * (Re)creates a 1D allocation from a native |handle|. If successful, the allocation is stored
+ * in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
+ *
+ * \param handle the handle for the existing allocation
+ * \param allocation pointer to where the allocation shall be stored on success. nullptr
+ * will be stored here on failure
+ *
+ * \retval C2_OK the allocation was recreated successfully
+ * \retval C2_NO_MEMORY not enough memory to recreate the allocation
+ * \retval C2_TIMED_OUT the recreation timed out (unexpected)
+ * \retval C2_NO_PERMISSION no permission to recreate the allocation
+ * \retval C2_BAD_VALUE invalid handle (caller error)
+ * \retval C2_UNSUPPORTED this allocator does not support 1D allocations
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+ */
+ virtual C2Error recreateLinearBuffer(
+ const C2Handle *handle __unused,
+ std::shared_ptr<C2LinearAllocation> *allocation /* nonnull */) {
+ *allocation = nullptr;
+ return C2_UNSUPPORTED;
+ }
+
+ /**
+ * Allocates a 2D allocation of given |width|, |height|, |format| and |usage|. If successful,
+ * the allocation is stored in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
+ *
+ * \param width the width of requested allocation (the allocation could be slightly
+ * larger, e.g. to account for any system-required alignment)
+ * \param height the height of requested allocation (the allocation could be slightly
+ * larger, e.g. to account for any system-required alignment)
+ * \param format the pixel format of requested allocation. This could be a vendor
+ * specific format.
+ * \param usage the memory usage info for the requested allocation. \note that the
+ * returned allocation may be later used/mapped with different usage.
+ * The allocator should layout the buffer to be optimized for this usage,
+ * but must support any usage. One exception: protected buffers can
+ * only be used in a protected scenario.
+ * \param allocation pointer to where the allocation shall be stored on success. nullptr
+ * will be stored here on failure
+ *
+ * \retval C2_OK the allocation was successful
+ * \retval C2_NO_MEMORY not enough memory to complete the allocation
+ * \retval C2_TIMED_OUT the allocation timed out
+ * \retval C2_NO_PERMISSION no permission to complete the allocation
+ * \retval C2_BAD_VALUE width, height, format or usage are not supported (invalid) (caller error)
+ * \retval C2_UNSUPPORTED this allocator does not support 2D allocations
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+ */
+ virtual C2Error allocateGraphicBuffer(
+ uint32_t width __unused, uint32_t height __unused, uint32_t format __unused,
+ C2MemoryUsage usage __unused,
+ std::shared_ptr<C2GraphicAllocation> *allocation /* nonnull */) {
+ *allocation = nullptr;
+ return C2_UNSUPPORTED;
+ }
+
+ /**
+ * (Re)creates a 2D allocation from a native handle. If successful, the allocation is stored
+ * in |allocation|. Otherwise, |allocation| is set to 'nullptr'.
+ *
+ * \param handle the handle for the existing allocation
+ * \param allocation pointer to where the allocation shall be stored on success. nullptr
+ * will be stored here on failure
+ *
+ * \retval C2_OK the allocation was recreated successfully
+ * \retval C2_NO_MEMORY not enough memory to recreate the allocation
+ * \retval C2_TIMED_OUT the recreation timed out (unexpected)
+ * \retval C2_NO_PERMISSION no permission to recreate the allocation
+ * \retval C2_BAD_VALUE invalid handle (caller error)
+ * \retval C2_UNSUPPORTED this allocator does not support 2D allocations
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during recreation (unexpected)
+ */
+ virtual C2Error recreateGraphicBuffer(
+ const C2Handle *handle __unused,
+ std::shared_ptr<C2GraphicAllocation> *allocation /* nonnull */) {
+ *allocation = nullptr;
+ return C2_UNSUPPORTED;
+ }
+
+protected:
+ C2Allocator() = default;
+
+ virtual ~C2Allocator() = default;
+};
+
+/**
+ * Block allocators are used by components to allocate memory for output buffers. They can
+ * support either linear (1D), circular (1D) or graphic (2D) allocations.
+ *
+ * Never constructed on stack.
+ *
+ * Block allocators are provided by the framework.
+ */
+class C2BlockAllocator {
+public:
+ /**
+ * Allocates a linear writeable block of given |capacity| and |usage|. If successful, the
+ * block is stored in |block|. Otherwise, |block| is set to 'nullptr'.
+ *
+ * \param capacity the size of requested block.
+ * \param usage the memory usage info for the requested allocation. \note that the
+ * returned allocation may be later used/mapped with different usage.
+ * The allocator shall lay out the buffer to be optimized for this usage,
+ * but must support any usage. One exception: protected buffers can
+ * only be used in a protected scenario.
+ * \param block pointer to where the allocated block shall be stored on success. nullptr
+ * will be stored here on failure
+ *
+ * \retval C2_OK the allocation was successful
+ * \retval C2_NO_MEMORY not enough memory to complete the allocation
+ * \retval C2_TIMED_OUT the allocation timed out
+ * \retval C2_NO_PERMISSION no permission to complete the allocation
+ * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
+ * \retval C2_UNSUPPORTED this allocator does not support linear allocations
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+ */
+ virtual C2Error allocateLinearBlock(
+ uint32_t capacity __unused, C2MemoryUsage usage __unused,
+ std::shared_ptr<C2LinearBlock> *block /* nonnull */) {
+ *block = nullptr;
+ return C2_UNSUPPORTED;
+ }
+
+ /**
+ * Allocates a circular writeable block of given |capacity| and |usage|. If successful, the
+ * block is stored in |block|. Otherwise, |block| is set to 'nullptr'.
+ *
+ * \param capacity the size of requested circular block. (the allocation could be slightly
+ * larger, e.g. to account for any system-required alignment)
+ * \param usage the memory usage info for the requested allocation. \note that the
+ * returned allocation may be later used/mapped with different usage.
+ * The allocator shall lay out the buffer to be optimized for this usage,
+ * but must support any usage. One exception: protected buffers can
+ * only be used in a protected scenario.
+ * \param block pointer to where the allocated block shall be stored on success. nullptr
+ * will be stored here on failure
+ *
+ * \retval C2_OK the allocation was successful
+ * \retval C2_NO_MEMORY not enough memory to complete the allocation
+ * \retval C2_TIMED_OUT the allocation timed out
+ * \retval C2_NO_PERMISSION no permission to complete the allocation
+ * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
+ * \retval C2_UNSUPPORTED this allocator does not support circular allocations
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+ */
+ virtual C2Error allocateCircularBlock(
+ uint32_t capacity __unused, C2MemoryUsage usage __unused,
+ std::shared_ptr<C2CircularBlock> *block /* nonnull */) {
+ *block = nullptr;
+ return C2_UNSUPPORTED;
+ }
+
+ /**
+ * Allocates a 2D graphic block of given |width|, |height|, |format| and |usage|. If successful,
+ * the allocation is stored in |block|. Otherwise, |block| is set to 'nullptr'.
+ *
+ * \param width the width of requested allocation (the allocation could be slightly
+ * larger, e.g. to account for any system-required alignment)
+ * \param height the height of requested allocation (the allocation could be slightly
+ * larger, e.g. to account for any system-required alignment)
+ * \param format the pixel format of requested allocation. This could be a vendor
+ * specific format.
+ * \param usage the memory usage info for the requested allocation. \note that the
+ * returned allocation may be later used/mapped with different usage.
+ * The allocator should layout the buffer to be optimized for this usage,
+ * but must support any usage. One exception: protected buffers can
+ * only be used in a protected scenario.
+ * \param block pointer to where the allocation shall be stored on success. nullptr
+ * will be stored here on failure
+ *
+ * \retval C2_OK the allocation was successful
+ * \retval C2_NO_MEMORY not enough memory to complete the allocation
+ * \retval C2_TIMED_OUT the allocation timed out
+ * \retval C2_NO_PERMISSION no permission to complete the allocation
+ * \retval C2_BAD_VALUE width, height, format or usage are not supported (invalid) (caller error)
+ * \retval C2_UNSUPPORTED this allocator does not support 2D allocations
+ * \retval C2_CORRUPTED some unknown, unrecoverable error occured during allocation (unexpected)
+ */
+ virtual C2Error allocateGraphicBlock(
+ uint32_t width __unused, uint32_t height __unused, uint32_t format __unused,
+ C2MemoryUsage usage __unused,
+ std::shared_ptr<C2GraphicBlock> *block /* nonnull */) {
+ *block = nullptr;
+ return C2_UNSUPPORTED;
+ }
+
+protected:
+ C2BlockAllocator() = default;
+
+ virtual ~C2BlockAllocator() = default;
+};
+
+/// @}
+
+/// \cond INTERNAL
+
+/// \todo These are no longer used
+
+/// \addtogroup linear
+/// @{
+
+/** \deprecated */
+class C2LinearBuffer
+ : public C2Buffer, public _C2LinearRangeAspect,
+ public std::enable_shared_from_this<C2LinearBuffer> {
+public:
+ /** \todo what is this? */
+ const C2Handle *handle() const;
+
+protected:
+ inline C2LinearBuffer(const C2ConstLinearBlock &block);
+
+private:
+ class Impl;
+ Impl *mImpl;
+};
+
+class C2ReadCursor;
+
+class C2WriteCursor {
+public:
+ uint32_t remaining() const; // remaining data to be read
+ void commit(); // commits the current position. discard data before current position
+ void reset() const; // resets position to the last committed position
+ // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
+ // sliced off.
+ C2ReadCursor slice(uint32_t size) const;
+ // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
+ // sliced off.
+ C2WriteCursor reserve(uint32_t size);
+ // bool read(T&);
+ // bool write(T&);
+ C2Fence waitForSpace(uint32_t size);
+};
+
+/// @}
+
+/// \addtogroup graphic
+/// @{
+
+struct C2ColorSpace {
+//public:
+ enum Standard {
+ BT601,
+ BT709,
+ BT2020,
+ // TODO
+ };
+
+ enum Range {
+ LIMITED,
+ FULL,
+ // TODO
+ };
+
+ enum TransferFunction {
+ BT709Transfer,
+ BT2020Transfer,
+ HybridLogGamma2,
+ HybridLogGamma4,
+ // TODO
+ };
+};
+
+/** \deprecated */
+class C2GraphicBuffer : public C2Buffer {
+public:
+ // constant attributes
+ inline uint32_t width() const { return mWidth; }
+ inline uint32_t height() const { return mHeight; }
+ inline uint32_t format() const { return mFormat; }
+ inline const C2MemoryUsage usage() const { return mUsage; }
+
+ // modifiable attributes
+
+
+ virtual const C2ColorSpace colorSpace() const = 0;
+ // best effort
+ virtual void setColorSpace_be(const C2ColorSpace &colorSpace) = 0;
+ virtual bool setColorSpace(const C2ColorSpace &colorSpace) = 0;
+
+ const C2Handle *handle() const;
+
+protected:
+ uint32_t mWidth;
+ uint32_t mHeight;
+ uint32_t mFormat;
+ C2MemoryUsage mUsage;
+
+ class Impl;
+ Impl *mImpl;
+};
+
+/// @}
+
+/// \endcond
+
+/// @}
+
+} // namespace android
+
+#endif // C2BUFFER_H_
diff --git a/media/libstagefright/codec2/include/C2Component.h b/media/libstagefright/codec2/include/C2Component.h
new file mode 100644
index 0000000..1ee9302
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Component.h
@@ -0,0 +1,685 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2COMPONENT_H_
+
+#define C2COMPONENT_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <list>
+#include <memory>
+#include <vector>
+#include <functional>
+
+#include <C2Param.h>
+#include <C2Work.h>
+
+namespace android {
+
+/// \defgroup components Components
+/// @{
+
+class C2Component;
+
+class C2ComponentListener {
+public:
+ virtual void onWorkDone(std::weak_ptr<C2Component> component,
+ std::vector<std::unique_ptr<C2Work>> workItems) = 0;
+
+ virtual void onTripped(std::weak_ptr<C2Component> component,
+ std::vector<std::shared_ptr<C2SettingResult>> settingResult) = 0;
+
+ virtual void onError(std::weak_ptr<C2Component> component,
+ uint32_t errorCode) = 0;
+
+ // virtual void onTunnelReleased(<from>, <to>) = 0;
+
+ // virtual void onComponentReleased(<id>) = 0;
+
+protected:
+ virtual ~C2ComponentListener();
+};
+
+/**
+ * Component interface object. This object contains all of the configuration of a potential or
+ * actual component. It can be created and used independently of an actual C2Component instance to
+ * query support and parameters for various component settings and configurations for a potential
+ * component. Actual components also expose this interface.
+ */
+
+class C2ComponentInterface {
+public:
+ // ALWAYS AVAILABLE METHODS
+ // =============================================================================================
+
+ /**
+ * Returns the name of this component or component interface object.
+ * This is a unique name for this component or component interface 'class'; however, multiple
+ * instances of this component SHALL have the same name.
+ *
+ * This method MUST be supported in any state. This call does not change the state nor the
+ * internal states of the component.
+ *
+ * This method MUST be "non-blocking" and return within 1ms.
+ *
+ * \return the name of this component or component interface object.
+ * \retval an empty string if there was not enough memory to allocate the actual name.
+ */
+ virtual C2String getName() const = 0;
+
+ /**
+ * Returns a unique ID for this component or interface object.
+ * This ID is used as work targets, unique work IDs, and when configuring tunneling.
+ *
+ * This method MUST be supported in any state. This call does not change the state nor the
+ * internal states of the component.
+ *
+ * This method MUST be "non-blocking" and return within 1ms.
+ *
+ * \return a unique node ID for this component or component interface instance.
+ */
+ virtual node_id getId() const = 0;
+
+ /**
+ * Queries a set of parameters from the component or interface object.
+ * Querying is performed at best effort: the component SHALL query all supported parameters and
+ * skip unsupported ones, or heap allocated parameters that could not be allocated. Any errors
+ * are communicated in the return value. Additionally, preallocated (e.g. stack) parameters that
+ * could not be queried are invalidated. Parameters to be allocated on the heap are omitted from
+ * the result.
+ *
+ * \note Parameter values do not depend on the order of query.
+ *
+ * \todo This method cannot be used to query info-buffers. Is that a problem?
+ *
+ * This method MUST be supported in any state. This call does not change the state nor the
+ * internal states of the component.
+ *
+ * This method MUST be "non-blocking" and return within 1ms.
+ *
+ * \param[in,out] stackParams a list of params queried. These are initialized specific to each
+ * setting; e.g. size and index are set and rest of the members are
+ * cleared.
+ * \note Flexible settings that are of incorrect size will be invalidated.
+ * \param[in] heapParamIndices a vector of param indices for params to be queried and returned on the
+ * heap. These parameters will be returned in heapParams. Unsupported param
+ * indices will be ignored.
+ * \param[out] heapParams a list of params where to which the supported heap parameters will be
+ * appended in the order they appear in heapParamIndices.
+ *
+ * \retval C2_OK all parameters could be queried
+ * \retval C2_BAD_INDEX all supported parameters could be queried, but some parameters were not
+ * supported
+ * \retval C2_NO_MEMORY could not allocate memory for a supported parameter
+ * \retval C2_CORRUPTED some unknown error prevented the querying of the parameters
+ * (unexpected)
+ */
+ virtual status_t query_nb(
+ const std::vector<C2Param* const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>>* const heapParams) const = 0;
+
+ /**
+ * Sets a set of parameters for the component or interface object.
+ * Tuning is performed at best effort: the component SHALL update all supported configuration at
+ * best effort (unless configured otherwise) and skip unsupported ones. Any errors are
+ * communicated in the return value and in |failures|.
+ *
+ * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
+ * update may allow some subsequent parameter update.
+ *
+ * This method MUST be supported in any state.
+ *
+ * This method MUST be "non-blocking" and return within 1ms.
+ *
+ * \param[in,out] params a list of parameter updates. These will be updated to the actual
+ * parameter values after the updates (this is because tuning is performed
+ * at best effort).
+ * \todo params that could not be updated are not marked here, so are
+ * confusing - are they "existing" values or intended to be configured
+ * values?
+ * \param[out] failures a list of parameter failures
+ *
+ * \retval C2_OK all parameters could be updated successfully
+ * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
+ * parameters were not supported
+ * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
+ * they contained unsupported values. These are returned in |failures|.
+ * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
+ * they contained unsupported values, but could not allocate a failure
+ * object for them.
+ * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
+ * (unexpected)
+ */
+ virtual status_t config_nb(
+ const std::vector<C2Param* const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>>* const failures) = 0;
+
+ /**
+ * Atomically sets a set of parameters for the component or interface object.
+ *
+ * \note This method is used mainly for reserving resources for a component.
+ *
+ * The component SHALL update all supported configuration at
+ * best effort(TBD) (unless configured otherwise) and skip unsupported ones. Any errors are
+ * communicated in the return value and in |failures|.
+ *
+ * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
+ * update may allow some subsequent parameter update.
+ *
+ * This method MUST be supported in any state.
+ *
+ * This method may be momentarily blocking, but MUST return within 5ms.
+ *
+ * \param params[in,out] a list of parameter updates. These will be updated to the actual
+ * parameter values after the updates (this is because tuning is performed
+ * at best effort).
+ * \todo params that could not be updated are not marked here, so are
+ * confusing - are they "existing" values or intended to be configured
+ * values?
+ * \param failures[out] a list of parameter failures
+ *
+ * \retval C2_OK all parameters could be updated successfully
+ * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
+ * parameters were not supported
+ * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
+ * they contained unsupported values. These are returned in |failures|.
+ * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
+ * they contained unsupported values, but could not allocate a failure
+ * object for them.
+ * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
+ * (unexpected)
+ */
+ virtual status_t commit_sm(
+ const std::vector<C2Param* const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>>* const failures) = 0;
+
+ // TUNNELING
+ // =============================================================================================
+
+ /**
+ * Creates a tunnel from this component to the target component.
+ *
+ * If the component is successfully created, subsequent work items queued may include a
+ * tunneled path between these components.
+ *
+ * This method MUST be supported in any state.
+ *
+ * This method may be momentarily blocking, but MUST return within 5ms.
+ *
+ * \retval C2_OK the tunnel was successfully created
+ * \retval C2_BAD_INDEX the target component does not exist
+ * \retval C2_ALREADY_EXIST the tunnel already exists
+ * \retval C2_UNSUPPORTED the tunnel is not supported
+ *
+ * \retval C2_TIMED_OUT could not create the tunnel within the time limit (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented the creation of the tunnel (unexpected)
+ */
+ virtual status_t createTunnel_sm(node_id targetComponent) = 0;
+
+ /**
+ * Releases a tunnel from this component to the target component.
+ *
+ * The release of a tunnel is delayed while there are pending work items for the tunnel.
+ * After releasing a tunnel, subsequent work items queued MUST NOT include a tunneled
+ * path between these components.
+ *
+ * This method MUST be supported in any state.
+ *
+ * This method may be momentarily blocking, but MUST return within 5ms.
+ *
+ * \retval C2_OK the tunnel was marked for release successfully
+ * \retval C2_BAD_INDEX the target component does not exist
+ * \retval C2_NOT_FOUND the tunnel does not exist
+ *
+ * \retval C2_TIMED_OUT could not mark the tunnel for release within the time limit (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented the release of the tunnel (unexpected)
+ */
+ virtual status_t releaseTunnel_sm(node_id targetComponent) = 0;
+
+
+ // REFLECTION MECHANISM (USED FOR EXTENSION)
+ // =============================================================================================
+
+ /**
+ * Returns the parameter reflector.
+ *
+ * This is used to describe parameter fields.
+ *
+ * \return a shared parameter reflector object.
+ */
+ virtual std::shared_ptr<C2ParamReflector> getParamReflector() const = 0;
+
+ /**
+ * Returns the set of supported parameters.
+ *
+ * \param[out] params a vector of supported parameters will be appended to this vector.
+ *
+ * \retval C2_OK the operation completed successfully.
+ * \retval C2_NO_MEMORY not enough memory to complete this method.
+ */
+ virtual status_t getSupportedParams(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) const = 0;
+
+ /**
+ *
+ * \todo should this take a list considering that setting some fields may further limit other
+ * fields in the same list?
+ */
+ virtual status_t getSupportedValues(
+ const std::vector<const C2ParamField> fields,
+ std::vector<C2FieldSupportedValues>* const values) const = 0;
+
+ virtual ~C2ComponentInterface() = default;
+};
+
+class C2Component {
+public:
+ // METHODS AVAILABLE WHEN RUNNING
+ // =============================================================================================
+
+ /**
+ * Queues up work for the component.
+ *
+ * This method MUST be supported in running (including tripped) states.
+ *
+ * This method MUST be "non-blocking" and return within 1ms
+ *
+ * It is acceptable for this method to return OK and return an error value using the
+ * onWorkDone() callback.
+ *
+ * \retval C2_OK the work was successfully queued
+ * \retval C2_BAD_INDEX some component(s) in the work do(es) not exist
+ * \retval C2_UNSUPPORTED the components are not tunneled
+ *
+ * \retval C2_NO_MEMORY not enough memory to queue the work
+ * \retval C2_CORRUPTED some unknown error prevented queuing the work (unexpected)
+ */
+ virtual status_t queue_nb(std::list<std::unique_ptr<C2Work>>* const items) = 0;
+
+ /**
+ * Announces a work to be queued later for the component. This reserves a slot for the queue
+ * to ensure correct work ordering even if the work is queued later.
+ *
+ * This method MUST be supported in running (including tripped) states.
+ *
+ * This method MUST be "non-blocking" and return within 1 ms
+ *
+ * \retval C2_OK the work announcement has been successfully recorded
+ * \retval C2_BAD_INDEX some component(s) in the work outline do(es) not exist
+ * \retval C2_UNSUPPORTED the componentes are not tunneled
+ *
+ * \retval C2_NO_MEMORY not enough memory to record the work announcement
+ * \retval C2_CORRUPTED some unknown error prevented recording the announcement (unexpected)
+ *
+ * \todo Can this be rolled into queue_nb?
+ */
+ virtual status_t announce_nb(const std::vector<C2WorkOutline> &items) = 0;
+
+ /**
+ * Discards and abandons any pending work for the component, and optionally any component
+ * downstream.
+ *
+ * \todo define this: we could flush all work before last item queued for component across all
+ * components linked to this; flush only work items that are queued to this
+ * component
+ * \todo return work # of last flushed item; or all flushed (but not returned items)
+ * \todo we could make flush take a work item and flush all work before/after that item to allow
+ * TBD (slicing/seek?)
+ * \todo we could simply take a list of numbers and flush those... this is bad for decoders
+ * also, what would happen to fine grade references?
+ *
+ * This method MUST be supported in running (including tripped) states.
+ *
+ * This method may be momentarily blocking, but must return within 5ms.
+ *
+ * Work that could be immediately abandoned/discarded SHALL be returned in |flushedWork|; this
+ * can be done in an arbitrary order.
+ *
+ * Work that could not be abandoned or discarded immediately SHALL be marked to be
+ * discarded at the earliest opportunity, and SHALL be returned via the onWorkDone() callback.
+ *
+ * \param flushThrough flush work from this component and all components connected downstream
+ * from it via tunneling.
+ *
+ * \retval C2_OK the work announcement has been successfully recorded
+ * \retval C2_TIMED_OUT the flush could not be completed within the time limit (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented flushing from completion (unexpected)
+ */
+ virtual status_t flush_sm(bool flushThrough, std::list<std::unique_ptr<C2Work>>* const flushedWork) = 0;
+
+ /**
+ * Drains the component, and optionally downstream components
+ *
+ * \todo define this; we could place EOS to all upstream components, just this component, or
+ * all upstream and downstream component.
+ * \todo should EOS carry over to downstream components?
+ *
+ * Marks last work item as "end-of-stream", so component is notified not to wait for further
+ * work before it processes work already queued. This method is called to set the end-of-stream
+ * flag after work has been queued. Client can continue to queue further work immediately after
+ * this method returns.
+ *
+ * This method MUST be supported in running (including tripped) states.
+ *
+ * This method MUST be "non-blocking" and return within 1ms.
+ *
+ * Work that is completed SHALL be returned via the onWorkDone() callback.
+ *
+ * \param drainThrough marks the last work item with a persistent "end-of-stream" marker that
+ * will drain downstream components.
+ *
+ * \todo this may confuse work-ordering downstream; could be an mode enum
+ *
+ * \retval C2_OK the work announcement has been successfully recorded
+ * \retval C2_TIMED_OUT the flush could not be completed within the time limit (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented flushing from completion (unexpected)
+ */
+ virtual status_t drain_nb(bool drainThrough) = 0;
+
+ // STATE CHANGE METHODS
+ // =============================================================================================
+
+ /**
+ * Starts the component.
+ *
+ * This method MUST be supported in stopped state.
+ *
+ * \todo This method MUST return within 500ms. Seems this should be able to return quickly, as
+ * there are no immediate guarantees. Though there are guarantees for responsiveness immediately
+ * after start returns.
+ *
+ * \todo Could we just start a ComponentInterface to get a Component?
+ *
+ * \retval C2_OK the work announcement has been successfully recorded
+ * \retval C2_NO_MEMORY not enough memory to start the component
+ * \retval C2_TIMED_OUT the component could not be started within the time limit (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented starting the component (unexpected)
+ */
+ virtual status_t start() = 0;
+
+ /**
+ * Stops the component.
+ *
+ * This method MUST be supported in running (including tripped) state.
+ *
+ * This method MUST return withing 500ms.
+ *
+ * Upon this call, all pending work SHALL be abandoned.
+ *
+ * \todo should this return completed work, since client will just free it? Perhaps just to
+ * verify accounting.
+ *
+ * This does not alter any settings and tunings that may have resulted in a tripped state.
+ * (Is this material given the definition? Perhaps in case we want to start again.)
+ */
+ virtual status_t stop() = 0;
+
+ /**
+ * Resets the component.
+ *
+ * This method MUST be supported in running (including tripped) state.
+ *
+ * This method MUST be supported during any other call (\todo or just blocking ones?)
+ *
+ * This method MUST return withing 500ms.
+ *
+ * After this call returns all work is/must be abandoned, all references should be released.
+ *
+ * \todo should this return completed work, since client will just free it? Also, if it unblocks
+ * a stop, where should completed work be returned?
+ *
+ * This brings settings back to their default - "guaranteeing" no tripped space.
+ *
+ * \todo reclaim support - it seems that since ownership is passed, this will allow reclaiming stuff.
+ */
+ virtual void reset() = 0;
+
+ /**
+ * Releases the component.
+ *
+ * This method MUST be supported in any state. (\todo Or shall we force reset() first to bring
+ * to a known state?)
+ *
+ * This method MUST return withing 500ms.
+ *
+ * \todo should this return completed work, since client will just free it? Also, if it unblocks
+ * a stop, where should completed work be returned?
+ *
+ * TODO: does it matter if this call has a short time limit? Yes, as upon return all references
+ * shall be abandoned.
+ */
+ virtual void release() = 0;
+
+ /**
+ * Returns the interface for this component.
+ *
+ * \return the component interface
+ */
+ virtual std::shared_ptr<C2ComponentInterface> intf() = 0;
+
+protected:
+ virtual ~C2Component() = default;
+};
+
+class C2FrameInfoParser {
+public:
+ /**
+ * \return the content type supported by this info parser.
+ *
+ * \todo this may be redundant
+ */
+ virtual C2StringLiteral getType() const = 0;
+
+ /**
+ * \return a vector of supported parameter indices parsed by this info parser.
+ *
+ * \todo sticky vs. non-sticky params? this may be communicated by param-reflector.
+ */
+ virtual const std::vector<C2Param::Index> getParsedParams() const = 0;
+
+ /**
+ * Resets this info parser. This brings this parser to its initial state after creation.
+ *
+ * This method SHALL return within 5ms.
+ *
+ * \retval C2_OK the info parser was reset
+ * \retval C2_TIMED_OUT could not reset the parser within the time limit (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented the resetting of the parser (unexpected)
+ */
+ virtual status_t reset() { return C2_OK; }
+
+ virtual status_t parseFrame(C2BufferPack &frame);
+
+ virtual ~C2FrameInfoParser() = default;
+};
+
+struct C2ComponentInfo {
+ // TBD
+
+};
+
+class C2AllocatorStore {
+public:
+ // TBD
+
+ enum Type {
+ LINEAR, ///< basic linear allocator type
+ GRALLOC, ///< basic gralloc allocator type
+ };
+
+ /**
+ * Creates an allocator.
+ *
+ * \param type the type of allocator to create
+ * \param allocator shared pointer where the created allocator is stored. Cleared on failure
+ * and updated on success.
+ *
+ * \retval C2_OK the allocator was created successfully
+ * \retval C2_TIMED_OUT could not create the allocator within the time limit (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented the creation of the allocator (unexpected)
+ *
+ * \retval C2_NOT_FOUND no such allocator
+ * \retval C2_NO_MEMORY not enough memory to create the allocator
+ */
+ virtual status_t createAllocator(Type type, std::shared_ptr<C2Allocator>* const allocator) = 0;
+
+ virtual ~C2AllocatorStore() = default;
+};
+
+class C2ComponentStore {
+ /**
+ * Creates a component.
+ *
+ * This method SHALL return within 100ms.
+ *
+ * \param name name of the component to create
+ * \param component shared pointer where the created component is stored. Cleared on
+ * failure and updated on success.
+ *
+ * \retval C2_OK the component was created successfully
+ * \retval C2_TIMED_OUT could not create the component within the time limit (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented the creation of the component (unexpected)
+ *
+ * \retval C2_NOT_FOUND no such component
+ * \retval C2_NO_MEMORY not enough memory to create the component
+ */
+ virtual status_t createComponent(C2String name, std::shared_ptr<C2Component>* const component);
+
+ /**
+ * Creates a component interface.
+ *
+ * This method SHALL return within 100ms.
+ *
+ * \param name name of the component interface to create
+ * \param interface shared pointer where the created interface is stored
+ *
+ * \retval C2_OK the component interface was created successfully
+ * \retval C2_TIMED_OUT could not create the component interface within the time limit
+ * (unexpected)
+ * \retval C2_CORRUPTED some unknown error prevented the creation of the component interface
+ * (unexpected)
+ *
+ * \retval C2_NOT_FOUND no such component interface
+ * \retval C2_NO_MEMORY not enough memory to create the component interface
+ *
+ * \todo Do we need an interface, or could this just be a component that is never started?
+ */
+ virtual status_t createInterface(C2String name, std::shared_ptr<C2ComponentInterface>* const interface);
+
+ /**
+ * Returns the list of components supported by this component store.
+ *
+ * This method SHALL return within 1ms.
+ *
+ * \retval vector of component information.
+ */
+ virtual std::vector<std::unique_ptr<const C2ComponentInfo>> getComponents();
+
+ // -------------------------------------- UTILITY METHODS --------------------------------------
+
+ // on-demand buffer layout conversion (swizzling)
+ virtual status_t copyBuffer(std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst);
+
+ // status_t selectPreferredColor(formats<A>, formats<B>);
+
+ // GLOBAL SETTINGS
+ // system-wide stride & slice-height (???)
+
+ /**
+ * Queries a set of system-wide parameters.
+ * Querying is performed at best effort: the store SHALL query all supported parameters and
+ * skip unsupported ones, or heap allocated parameters that could not be allocated. Any errors
+ * are communicated in the return value. Additionally, preallocated (e.g. stack) parameters that
+ * could not be queried are invalidated. Parameters to be allocated on the heap are omitted from
+ * the result.
+ *
+ * \note Parameter values do not depend on the order of query.
+ *
+ * This method MUST be "non-blocking" and return within 1ms.
+ *
+ * \param stackParams a list of params queried. These are initialized specific to each
+ * setting; e.g. size and index are set and rest of the members are
+ * cleared.
+ * NOTE: Flexible settings that are of incorrect size will be invalidated.
+ * \param heapParamIndices a vector of param indices for params to be queried and returned on the
+ * heap. These parameters will be returned in heapParams. Unsupported param
+ * indices will be ignored.
+ * \param heapParams a list of params where to which the supported heap parameters will be
+ * appended in the order they appear in heapParamIndices.
+ *
+ * \retval C2_OK all parameters could be queried
+ * \retval C2_BAD_INDEX all supported parameters could be queried, but some parameters were not
+ * supported
+ * \retval C2_NO_MEMORY could not allocate memory for a supported parameter
+ * \retval C2_CORRUPTED some unknown error prevented the querying of the parameters
+ * (unexpected)
+ */
+ virtual status_t query_nb(
+ const std::vector<C2Param* const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>>* const heapParams) = 0;
+
+ /**
+ * Sets a set of system-wide parameters.
+ *
+ * \note There are no settable system-wide parameters defined thus far, but may be added in the
+ * future.
+ *
+ * Tuning is performed at best effort: the store SHALL update all supported configuration at
+ * best effort (unless configured otherwise) and skip unsupported ones. Any errors are
+ * communicated in the return value and in |failures|.
+ *
+ * \note Parameter tuning DOES depend on the order of the tuning parameters. E.g. some parameter
+ * update may allow some subsequent parameter update.
+ *
+ * This method MUST be "non-blocking" and return within 1ms.
+ *
+ * \param params a list of parameter updates. These will be updated to the actual
+ * parameter values after the updates (this is because tuning is performed
+ * at best effort).
+ * \todo params that could not be updated are not marked here, so are
+ * confusing - are they "existing" values or intended to be configured
+ * values?
+ * \param failures a list of parameter failures
+ *
+ * \retval C2_OK all parameters could be updated successfully
+ * \retval C2_BAD_INDEX all supported parameters could be updated successfully, but some
+ * parameters were not supported
+ * \retval C2_BAD_VALUE some supported parameters could not be updated successfully because
+ * they contained unsupported values. These are returned in |failures|.
+ * \retval C2_NO_MEMORY some supported parameters could not be updated successfully because
+ * they contained unsupported values, but could not allocate a failure
+ * object for them.
+ * \retval C2_CORRUPTED some unknown error prevented the update of the parameters
+ * (unexpected)
+ */
+ virtual status_t config_nb(
+ const std::vector<C2Param* const> ¶ms,
+ std::list<std::unique_ptr<C2SettingResult>>* const failures) = 0;
+
+ virtual ~C2ComponentStore() = default;
+};
+
+// ================================================================================================
+
+/// @}
+
+} // namespace android
+
+#endif // C2COMPONENT_H_
diff --git a/media/libstagefright/codec2/include/C2Config.h b/media/libstagefright/codec2/include/C2Config.h
new file mode 100644
index 0000000..30e9193
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Config.h
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2CONFIG_H_
+#define C2CONFIG_H_
+
+#include <C2ParamDef.h>
+
+namespace android {
+
+/// \defgroup config Component configuration
+/// @{
+
+#ifndef DEFINE_C2_ENUM_VALUE_AUTO_HELPER
+#define DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, ...)
+#define DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, ...)
+#endif
+
+#define C2ENUM(name, type, ...) \
+enum name : type { __VA_ARGS__ }; \
+DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, NULL, __VA_ARGS__)
+
+#define C2ENUM_CUSTOM_PREFIX(name, type, prefix, ...) \
+enum name : type { __VA_ARGS__ }; \
+DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, __VA_ARGS__)
+
+#define C2ENUM_CUSTOM_NAMES(name, type, names, ...) \
+enum name : type { __VA_ARGS__ }; \
+DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, __VA_ARGS__)
+
+enum C2ParamIndexKind : uint32_t {
+ /// domain
+ kParamIndexDomain,
+
+ /// configuration descriptors
+ kParamIndexSupportedParams,
+ kParamIndexRequiredParams,
+ kParamIndexReadOnlyParams,
+ kParamIndexRequestedInfos,
+
+ /// latency
+ kParamIndexLatency,
+
+ // generic time behavior
+ kParamIndexTemporal,
+
+ /// port configuration
+ kParamIndexMime,
+ kParamIndexStreamCount,
+ kParamIndexFormat,
+
+ // video info
+
+ kParamIndexStructStart = 0x1,
+ kParamIndexVideoSize,
+ kParamIndexMaxVideoSizeHint,
+
+ kParamIndexParamStart = 0x800,
+};
+
+C2ENUM(C2DomainKind, int32_t,
+ C2DomainVideo,
+ C2DomainAudio,
+ C2DomainOther = C2DomainAudio + 1
+);
+
+// read-only
+
+typedef C2GlobalParam<C2Info, C2SimpleValueStruct<C2DomainKind>, kParamIndexDomain> C2ComponentDomainInfo;
+// typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexDomain> C2ComponentDomainInfo;
+//DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<C2DomainKind>, { C2FIELD(mValue, "value") });
+
+// read-only
+typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexSupportedParams> C2SupportedParamsInfo;
+
+/// \todo do we define it as a param?
+// read-only
+typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexRequiredParams> C2RequiredParamsInfo;
+
+// read-only
+typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexReadOnlyParams> C2ReadOnlyParamsInfo;
+
+// read-only
+typedef C2GlobalParam<C2Info, C2Uint32Array, kParamIndexRequestedInfos> C2RequestedInfosInfo;
+
+// read-only
+//typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexRequestedInfos> C2RequestedInfosInfo;
+
+/// latency
+
+typedef C2PortParam<C2Info, C2Uint32Value, kParamIndexLatency> C2PortLatencyInfo;
+
+typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexLatency> C2ComponentLatencyInfo;
+
+/// \todo
+typedef C2GlobalParam<C2Info, C2Uint32Value, kParamIndexTemporal> C2ComponentTemporalInfo;
+
+/// port configuration
+
+typedef C2PortParam<C2Tuning, C2StringValue, kParamIndexMime> C2PortMimeConfig;
+
+typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexStreamCount> C2PortStreamCountConfig;
+
+typedef C2StreamParam<C2Tuning, C2StringValue, kParamIndexMime> C2StreamMimeConfig;
+
+C2ENUM(C2FormatKind, uint32_t,
+ C2FormatCompressed,
+ C2FormatAudio = 1,
+ C2FormatVideo = 4,
+)
+
+typedef C2StreamParam<C2Tuning, C2Uint32Value, kParamIndexFormat> C2StreamFormatConfig;
+
+/*
+ Component description fields:
+
+// format (video/compressed/audio/other-do we need other?) per stream
+
+// likely some of these are exposed as separate settings:
+
+struct C2BaseTuning {
+ // latency characteristics
+ uint32_t latency;
+ bool temporal; // seems this only makes sense if latency is 1..., so this could be captured as latency = 0
+ uint32_t delay;
+
+ uint32_t numInputStreams; // RW? - or suggestion only: RO
+ uint32_t numOutputStreams; // RW
+ //
+ // refs characteristics (per stream?)
+ uint32_t maxInputRefs; // RO
+ uint32_t maxOutputRefs; // RO
+ uint32_t maxInputMemory; // RO - max time refs are held for
+ uint32_t maxOutputMemory; // RO
+
+ // per stream
+ bool compressed;
+ // format... video/compressed/audio/other?
+ // actual "audio/video" format type
+ uint32_t width/height? is this needed, or just queue...
+ // mime...
+};
+*/
+
+
+
+
+
+
+// overall component
+// => C: domain: audio or video
+// => C: kind: decoder, encoder or filter
+// => "mime" class
+
+// => C: temporal (bool) => does this depend on ordering?
+// => I: latency
+// => I: history max duration...
+// => I: history max frames kept...
+// => I: reordering depth
+// => I: frc (bool) (perhaps ratio?)
+// => I: current frc
+
+// - pause
+// => last frame 'number' processed
+// => current frame 'number' processed
+// => invalid settings =>[]
+
+// video decoder configuration: // audio
+// - encoding // -encoding
+// - hint: max width/height // -hint: sample rate, channels
+// - hint: profile/level // -hint: tools used
+// - hint: framerate (bitrate?) // -hint: bitrate
+// - default: color space (from container)
+// - hint: color format // -hint: pcm-encoding
+// - hint: # of views (e.g. MVC) // -hint?: channel groups
+// - default: HDR static info (from container) // -hint?: channel mappings
+// - hint: rotation (e.g. for allocator)
+
+// => # of streams required and their formats? (setting?)
+// => # of streams produced and their formats? (tuning)
+
+// => output
+// - # of views // -channel groups && channel mappings
+// - width/height/crop/color format/color space/HDR static info (from buffers)
+// (as required by the allocator & framework)
+// - SEI (or equivalent) <= [port]
+// - CC
+// - reference info
+
+// video encoder configurations
+// - encoding // - encoding
+// - hint: width/height // - hint: sample rate, channels
+// - hint: frame rate
+// - hint: max width/height (? does this differ from width/height?)
+// - # of input (e.g. MVC) // - hint: # groups and mappings
+// - # of output (e.g. SVC) => bitrates/width/height/framerates? per stream
+// - hint: profile/level // - hint: profile/level
+// - HDR static info + (info: HDR)
+// - color space
+// - hint: color format? // - hint: pcm encoding
+// - SEI
+// - CC
+// - reference directive
+// - hint: bitrate (or quality) // - hint: bitrate/quality
+// - optional: codec-specific parameters // - optional: csd
+
+// => output // => output
+// - layers per stream? // E-AC3?... DTS?...Dolby-Vision?
+// - reference info
+
+
+// RM:
+// - need SPS for full knowledge => component should return max. (component can use less)
+// - critical parameters? (interlaced? profile? level?)
+
+struct C2VideoSizeStruct {
+ int32_t mWidth; ///< video width
+ int32_t mHeight; ///< video height
+
+ DEFINE_AND_DESCRIBE_C2STRUCT(VideoSize)
+ C2FIELD(mWidth, "width")
+ C2FIELD(mHeight, "height")
+};
+
+// video size for video decoder [OUT]
+typedef C2StreamParam<C2Info, C2VideoSizeStruct> C2VideoSizeStreamInfo;
+
+// max video size for video decoder [IN]
+typedef C2PortParam<C2Setting, C2VideoSizeStruct, kParamIndexMaxVideoSizeHint> C2MaxVideoSizeHintPortSetting;
+
+// video encoder size [IN]
+typedef C2StreamParam<C2Tuning, C2VideoSizeStruct> C2VideoSizeStreamTuning;
+
+/// @}
+
+} // namespace android
+
+#endif
diff --git a/media/libstagefright/codec2/include/C2Param.h b/media/libstagefright/codec2/include/C2Param.h
new file mode 100644
index 0000000..fd43061
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Param.h
@@ -0,0 +1,1171 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2PARAM_H_
+#define C2PARAM_H_
+
+#include <C2.h>
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <list>
+#include <string>
+#include <type_traits>
+
+#define C2_PACK __attribute__((packed))
+
+namespace android {
+
+/// \addtogroup Parameters
+/// @{
+
+/// \defgroup internal Internal helpers.
+
+/*!
+ * \file
+ * PARAMETERS: SETTINGs, TUNINGs, and INFOs
+ * ===
+ *
+ * These represent miscellaneous control and metadata information and are likely copied into
+ * kernel space. Therefore, these are C-like structures designed to carry just a small amount of
+ * information. We are using C++ to be able to add constructors, as well as non-virtual and class
+ * methods.
+ *
+ * ==Specification details:
+ *
+ * Restrictions:
+ * - must be POD struct, e.g. no vtable (no virtual destructor)
+ * - must have the same size in 64-bit and 32-bit mode (no size_t)
+ * - as such, no pointer members
+ *
+ * Behavior:
+ * - Params can be global (not related to input or output), related to input or output,
+ * or related to an input/output stream.
+ * - All params are queried/set using a unique param index, which incorporates a potential stream
+ * index and/or port.
+ * - Querying (supported) params MUST never fail.
+ * - All params MUST have default values.
+ * - If some fields have "unsupported" or "invalid" values during setting, this SHOULD be
+ * communicated to the app.
+ * a) Ideally, this should be avoided. When setting parameters, in general, component should do
+ * "best effort" to apply all settings. It should change "invalid/unsupported" values to the
+ * nearest supported values.
+ * - This is communicated to the client by changing the source values in tune()/
+ * configure().
+ * b) If falling back to a supported value is absolutely impossible, the component SHALL return
+ * an error for the specific setting, but should continue to apply other settings.
+ * TODO: this currently may result in unintended results.
+ *
+ * **NOTE:** unlike OMX, params are not versioned. Instead, a new struct with new base index
+ * SHALL be added as new versions are required.
+ *
+ * The proper subtype (Setting, Info or Param) is incorporated into the class type. Define structs
+ * to define multiple subtyped versions of related parameters.
+ *
+ * ==Implementation details:
+ *
+ * - Use macros to define parameters
+ * - All parameters must have a default constructor
+ * - This is only used for instantiating the class in source (e.g. will not be used
+ * when building a parameter by the framework from key/value pairs.)
+ */
+
+/// \ingroup internal
+struct _C2ParamManipulator;
+
+/**
+ * Parameter base class.
+ */
+struct C2Param {
+ // param index encompasses the following:
+ //
+ // - type (setting, tuning, info, struct)
+ // - vendor extension flag
+ // - flexible parameter flag
+ // - direction (global, input, output)
+ // - stream flag
+ // - stream ID (usually 0)
+ //
+ // layout:
+ //
+ // +------+-----+---+------+--------+----|------+--------------+
+ // | kind | dir | - |stream|streamID|flex|vendor| base index |
+ // +------+-----+---+------+--------+----+------+--------------+
+ // bit: 31..30 29.28 25 24 .. 17 16 15 14 .. 0
+ //
+public:
+ /**
+ * C2Param kinds, usable as bitmaps.
+ */
+ enum Kind : uint32_t {
+ NONE = 0,
+ STRUCT = (1 << 0),
+ INFO = (1 << 1),
+ SETTING = (1 << 2),
+ TUNING = (1 << 3) | SETTING, // tunings are settings
+ };
+
+ /**
+ * base index (including the vendor extension bit) is a global index for
+ * C2 parameter structs. (e.g. the same indices cannot be reused for different
+ * structs for different components).
+ */
+ struct BaseIndex {
+ protected:
+ enum : uint32_t {
+ kTypeMask = 0xC0000000,
+ kTypeStruct = 0x00000000,
+ kTypeTuning = 0x40000000,
+ kTypeSetting = 0x80000000,
+ kTypeInfo = 0xC0000000,
+
+ kDirMask = 0x30000000,
+ kDirGlobal = 0x20000000,
+ kDirUndefined = 0x30000000, // MUST have all bits set
+ kDirInput = 0x00000000,
+ kDirOutput = 0x10000000,
+
+ kStreamFlag = 0x02000000,
+ kStreamIdMask = 0x01FE0000,
+ kStreamIdShift = 17,
+ kStreamIdMax = kStreamIdMask >> kStreamIdShift,
+ kStreamMask = kStreamFlag | kStreamIdMask,
+
+ kFlexibleFlag = 0x00010000,
+ kVendorFlag = 0x00008000,
+ kParamMask = 0x0000FFFF,
+ kBaseMask = kParamMask | kFlexibleFlag,
+ };
+
+ public:
+ enum : uint32_t {
+ kVendorStart = kVendorFlag, ///< vendor structs SHALL start after this
+ _kFlexibleFlag = kFlexibleFlag, // TODO: this is only needed for testing
+ };
+
+ /// constructor/conversion from uint32_t
+ inline BaseIndex(uint32_t index) : mIndex(index) { }
+
+ // no conversion from uint64_t
+ inline BaseIndex(uint64_t index) = delete;
+
+ /// returns true iff this is a vendor extension parameter
+ inline bool isVendor() const { return mIndex & kVendorFlag; }
+
+ /// returns true iff this is a flexible parameter (with variable size)
+ inline bool isFlexible() const { return mIndex & kFlexibleFlag; }
+
+ /// returns the base type: the index for the underlying struct
+ inline unsigned int baseIndex() const { return mIndex & kBaseMask; }
+
+ /// returns the param index for the underlying struct
+ inline unsigned int paramIndex() const { return mIndex & kParamMask; }
+
+ DEFINE_FIELD_BASED_COMPARISON_OPERATORS(BaseIndex, mIndex)
+
+ protected:
+ uint32_t mIndex;
+ };
+
+ /**
+ * type encompasses the parameter kind (tuning, setting, info), whether the
+ * parameter is global, input or output, and whether it is for a stream.
+ */
+ struct Type : public BaseIndex {
+ /// returns true iff this is a global parameter (not for input nor output)
+ inline bool isGlobal() const { return (mIndex & kDirMask) == kDirGlobal; }
+ /// returns true iff this is an input or input stream parameter
+ inline bool forInput() const { return (mIndex & kDirMask) == kDirInput; }
+ /// returns true iff this is an output or output stream parameter
+ inline bool forOutput() const { return (mIndex & kDirMask) == kDirOutput; }
+
+ /// returns true iff this is a stream parameter
+ inline bool forStream() const { return mIndex & kStreamFlag; }
+ /// returns true iff this is a port (input or output) parameter
+ inline bool forPort() const { return !forStream() && !isGlobal(); }
+
+ /// returns the parameter type: the parameter index without the stream ID
+ inline uint32_t type() const { return mIndex & (~kStreamIdMask); }
+
+ /// return the kind of this param
+ inline Kind kind() const {
+ switch (mIndex & kTypeMask) {
+ case kTypeStruct: return STRUCT;
+ case kTypeInfo: return INFO;
+ case kTypeSetting: return SETTING;
+ case kTypeTuning: return TUNING;
+ default: return NONE; // should not happen
+ }
+ }
+
+ /// constructor/conversion from uint32_t
+ inline Type(uint32_t index) : BaseIndex(index) { }
+
+ // no conversion from uint64_t
+ inline Type(uint64_t index) = delete;
+
+ private:
+ friend struct C2Param; // for setPort()
+ friend struct C2Tuning; // for kTypeTuning
+ friend struct C2Setting; // for kTypeSetting
+ friend struct C2Info; // for kTypeInfo
+ // for kDirGlobal
+ template<typename T, typename S, int I, class F> friend struct C2GlobalParam;
+ template<typename T, typename S, int I, class F> friend struct C2PortParam; // for kDir*
+ template<typename T, typename S, int I, class F> friend struct C2StreamParam; // for kDir*
+ friend struct _C2ParamInspector; // for testing
+
+ /**
+ * Sets the port/stream direction.
+ * @return true on success, false if could not set direction (e.g. it is global param).
+ */
+ inline bool setPort(bool output) {
+ if (isGlobal()) {
+ return false;
+ } else {
+ mIndex = (mIndex & ~kDirMask) | (output ? kDirOutput : kDirInput);
+ return true;
+ }
+ }
+ };
+
+ /**
+ * index encompasses all remaining information: basically the stream ID.
+ */
+ struct Index : public Type {
+ /// returns the index as uint32_t
+ inline operator uint32_t() const { return mIndex; }
+
+ /// constructor/conversion from uint32_t
+ inline Index(uint32_t index) : Type(index) { }
+
+ // no conversion from uint64_t
+ inline Index(uint64_t index) = delete;
+
+ /// returns the stream ID or ~0 if not a stream
+ inline unsigned stream() const {
+ return forStream() ? rawStream() : ~0U;
+ }
+
+ private:
+ friend struct C2Param; // for setStream, makeStreamId, isValid
+ friend struct _C2ParamInspector; // for testing
+
+ /**
+ * @return true if the type is valid, e.g. direction is not undefined AND
+ * stream is 0 if not a stream param.
+ */
+ inline bool isValid() const {
+ // there is no Type::isValid (even though some of this check could be
+ // performed on types) as this is only used on index...
+ return (forStream() ? rawStream() < kStreamIdMax : rawStream() == 0)
+ && (mIndex & kDirMask) != kDirUndefined;
+ }
+
+ /// returns the raw stream ID field
+ inline unsigned rawStream() const {
+ return (mIndex & kStreamIdMask) >> kStreamIdShift;
+ }
+
+ /// returns the streamId bitfield for a given |stream|. If stream is invalid,
+ /// returns an invalid bitfield.
+ inline static uint32_t makeStreamId(unsigned stream) {
+ // saturate stream ID (max value is invalid)
+ if (stream > kStreamIdMax) {
+ stream = kStreamIdMax;
+ }
+ return (stream << kStreamIdShift) & kStreamIdMask;
+ }
+
+ /**
+ * Sets the stream index.
+ * \return true on success, false if could not set index (e.g. not a stream param).
+ */
+ inline bool setStream(unsigned stream) {
+ if (forStream()) {
+ mIndex = (mIndex & ~kStreamIdMask) | makeStreamId(stream);
+ return this->stream() < kStreamIdMax;
+ }
+ return false;
+ }
+ };
+
+public:
+ // public getters for Index methods
+
+ /// returns true iff this is a vendor extension parameter
+ inline bool isVendor() const { return _mIndex.isVendor(); }
+ /// returns true iff this is a flexible parameter
+ inline bool isFlexible() const { return _mIndex.isFlexible(); }
+ /// returns true iff this is a global parameter (not for input nor output)
+ inline bool isGlobal() const { return _mIndex.isGlobal(); }
+ /// returns true iff this is an input or input stream parameter
+ inline bool forInput() const { return _mIndex.forInput(); }
+ /// returns true iff this is an output or output stream parameter
+ inline bool forOutput() const { return _mIndex.forOutput(); }
+
+ /// returns true iff this is a stream parameter
+ inline bool forStream() const { return _mIndex.forStream(); }
+ /// returns true iff this is a port (input or output) parameter
+ inline bool forPort() const { return _mIndex.forPort(); }
+
+ /// returns the stream ID or ~0 if not a stream
+ inline unsigned stream() const { return _mIndex.stream(); }
+
+ /// returns the parameter type: the parameter index without the stream ID
+ inline uint32_t type() const { return _mIndex.type(); }
+
+ /// returns the kind of this parameter
+ inline Kind kind() const { return _mIndex.kind(); }
+
+ /// returns the size of the parameter or 0 if the parameter is invalid
+ inline size_t size() const { return _mSize; }
+
+ /// returns true iff the parameter is valid
+ inline operator bool() const { return _mIndex.isValid() && _mSize > 0; }
+
+ /// returns true iff the parameter is invalid
+ inline bool operator!() const { return !operator bool(); }
+
+ // equality is done by memcmp (use equals() to prevent any overread)
+ inline bool operator==(const C2Param &o) const {
+ return equals(o) && memcmp(this, &o, _mSize) == 0;
+ }
+ inline bool operator!=(const C2Param &o) const { return !operator==(o); }
+
+ /// safe(r) type cast from pointer and size
+ inline static C2Param* From(void *addr, size_t len) {
+ // _mSize must fit into size
+ if (len < sizeof(_mSize) + offsetof(C2Param, _mSize)) {
+ return nullptr;
+ }
+ // _mSize must match length
+ C2Param *param = (C2Param*)addr;
+ if (param->_mSize != len) {
+ return nullptr;
+ }
+ return param;
+ }
+
+#if 0
+ template<typename P, class=decltype(C2Param(P()))>
+ P *As() { return P::From(this); }
+ template<typename P>
+ const P *As() const { return const_cast<const P*>(P::From(const_cast<C2Param*>(this))); }
+#endif
+
+protected:
+ /// sets the stream field. Returns true iff successful.
+ inline bool setStream(unsigned stream) {
+ return _mIndex.setStream(stream);
+ }
+
+ /// sets the port (direction). Returns true iff successful.
+ inline bool setPort(bool output) {
+ return _mIndex.setPort(output);
+ }
+
+public:
+ /// invalidate this parameter. There is no recovery from this call; e.g. parameter
+ /// cannot be 'corrected' to be valid.
+ inline void invalidate() { _mSize = 0; }
+
+ // if other is the same kind of (valid) param as this, copy it into this and return true.
+ // otherwise, do not copy anything, and return false.
+ inline bool updateFrom(const C2Param &other) {
+ if (other._mSize == _mSize && other._mIndex == _mIndex && _mSize > 0) {
+ memcpy(this, &other, _mSize);
+ return true;
+ }
+ return false;
+ }
+
+protected:
+ // returns |o| if it is a null ptr, or if can suitably be a param of given |type| (e.g. has
+ // same type (ignoring stream ID), and size). Otherwise, returns null. If |checkDir| is false,
+ // allow undefined or different direction (e.g. as constructed from C2PortParam() vs.
+ // C2PortParam::input), but still require equivalent type (stream, port or global); otherwise,
+ // return null.
+ inline static const C2Param* ifSuitable(
+ const C2Param* o, size_t size, Type type, size_t flexSize = 0, bool checkDir = true) {
+ if (o == nullptr || o->_mSize < size || (flexSize && ((o->_mSize - size) % flexSize))) {
+ return nullptr;
+ } else if (checkDir) {
+ return o->_mIndex.type() == type.mIndex ? o : nullptr;
+ } else if (o->_mIndex.isGlobal()) {
+ return nullptr;
+ } else {
+ return ((o->_mIndex.type() ^ type.mIndex) & ~Type::kDirMask) ? nullptr : o;
+ }
+ }
+
+ /// base constructor
+ inline C2Param(uint32_t paramSize, Index paramIndex)
+ : _mSize(paramSize),
+ _mIndex(paramIndex) {
+ if (paramSize > sizeof(C2Param)) {
+ memset(this + 1, 0, paramSize - sizeof(C2Param));
+ }
+ }
+
+ /// base constructor with stream set
+ inline C2Param(uint32_t paramSize, Index paramIndex, unsigned stream)
+ : _mSize(paramSize),
+ _mIndex(paramIndex | Index::makeStreamId(stream)) {
+ if (paramSize > sizeof(C2Param)) {
+ memset(this + 1, 0, paramSize - sizeof(C2Param));
+ }
+ if (!forStream()) {
+ invalidate();
+ }
+ }
+
+private:
+ friend struct _C2ParamInspector; // for testing
+
+ /// returns the base type: the index for the underlying struct (for testing
+ /// as this can be gotten by the baseIndex enum)
+ inline uint32_t _baseIndex() const { return _mIndex.baseIndex(); }
+
+ /// returns true iff |o| has the same size and index as this. This performs the
+ /// basic check for equality.
+ inline bool equals(const C2Param &o) const {
+ return _mSize == o._mSize && _mIndex == o._mIndex;
+ }
+
+ uint32_t _mSize;
+ Index _mIndex;
+};
+
+/// \ingroup internal
+/// allow C2Params access to private methods, e.g. constructors
+#define C2PARAM_MAKE_FRIENDS \
+ template<typename U, typename S, int I, class F> friend struct C2GlobalParam; \
+ template<typename U, typename S, int I, class F> friend struct C2PortParam; \
+ template<typename U, typename S, int I, class F> friend struct C2StreamParam; \
+
+/**
+ * Setting base structure for component method signatures. Wrap constructors.
+ */
+struct C2Setting : public C2Param {
+protected:
+ template<typename ...Args>
+ inline C2Setting(const Args(&... args)) : C2Param(args...) { }
+public: // TODO
+ enum : uint32_t { indexFlags = Type::kTypeSetting };
+};
+
+/**
+ * Tuning base structure for component method signatures. Wrap constructors.
+ */
+struct C2Tuning : public C2Setting {
+protected:
+ template<typename ...Args>
+ inline C2Tuning(const Args(&... args)) : C2Setting(args...) { }
+public: // TODO
+ enum : uint32_t { indexFlags = Type::kTypeTuning };
+};
+
+/**
+ * Info base structure for component method signatures. Wrap constructors.
+ */
+struct C2Info : public C2Param {
+protected:
+ template<typename ...Args>
+ inline C2Info(const Args(&... args)) : C2Param(args...) { }
+public: // TODO
+ enum : uint32_t { indexFlags = Type::kTypeInfo };
+};
+
+/**
+ * Structure uniquely specifying a field in an arbitrary structure.
+ *
+ * \note This structure is used differently in C2FieldDescriptor to
+ * identify array fields, such that _mSize is the size of each element. This is
+ * because the field descriptor contains the array-length, and we want to keep
+ * a relevant element size for variable length arrays.
+ */
+struct _C2FieldId {
+//public:
+ /**
+ * Constructor used for C2FieldDescriptor that removes the array extent.
+ *
+ * \param[in] offset pointer to the field in an object at address 0.
+ */
+ template<typename T, class B=typename std::remove_extent<T>::type>
+ inline _C2FieldId(T* offset)
+ : // offset is from "0" so will fit on 32-bits
+ _mOffset((uint32_t)(uintptr_t)(offset)),
+ _mSize(sizeof(B)) { }
+
+ /**
+ * Direct constructor from offset and size.
+ *
+ * \param[in] offset offset of the field.
+ * \param[in] size size of the field.
+ */
+ inline _C2FieldId(size_t offset, size_t size)
+ : _mOffset(offset), _mSize(size) {}
+
+ /**
+ * Constructor used to identify a field in an object.
+ *
+ * \param U[type] pointer to the object that contains this field. This is needed in case the
+ * field is in an (inherited) base class, in which case T will be that base class.
+ * \param pm[im] member pointer to the field
+ */
+ template<typename R, typename T, typename U, typename B=typename std::remove_extent<R>::type>
+ inline _C2FieldId(U *, R T::* pm)
+ : _mOffset((uint32_t)(uintptr_t)(&(((U*)256)->*pm)) - 256u),
+ _mSize(sizeof(B)) { }
+
+ /**
+ * Constructor used to identify a field in an object.
+ *
+ * \param U[type] pointer to the object that contains this field
+ * \param pm[im] member pointer to the field
+ */
+ template<typename R, typename T, typename B=typename std::remove_extent<R>::type>
+ inline _C2FieldId(R T::* pm)
+ : _mOffset((uint32_t)(uintptr_t)(&(((T*)0)->*pm))),
+ _mSize(sizeof(B)) { }
+
+ inline bool operator==(const _C2FieldId &other) const {
+ return _mOffset == other._mOffset && _mSize == other._mSize;
+ }
+
+ inline bool operator<(const _C2FieldId &other) const {
+ return _mOffset < other._mOffset ||
+ // NOTE: order parent structure before sub field
+ (_mOffset == other._mOffset && _mSize > other._mSize);
+ }
+
+ DEFINE_OTHER_COMPARISON_OPERATORS(_C2FieldId)
+
+#if 0
+ inline uint32_t offset() const { return _mOffset; }
+ inline uint32_t size() const { return _mSize; }
+#endif
+
+#if defined(FRIEND_TEST)
+ friend void PrintTo(const _C2FieldId &d, ::std::ostream*);
+#endif
+
+private:
+ uint32_t _mOffset; // offset of field
+ uint32_t _mSize; // size of field
+};
+
+/**
+ * Structure uniquely specifying a field in a configuration
+ */
+struct C2ParamField {
+//public:
+ // TODO: fix what this is for T[] (for now size becomes T[1])
+ template<typename S, typename T>
+ inline C2ParamField(S* param, T* offset)
+ : _mIndex(param->index()),
+ _mFieldId(offset) {}
+
+ template<typename R, typename T, typename U>
+ inline C2ParamField(U *p, R T::* pm) : _mIndex(p->type()), _mFieldId(p, pm) { }
+
+ inline bool operator==(const C2ParamField &other) const {
+ return _mIndex == other._mIndex && _mFieldId == other._mFieldId;
+ }
+
+ inline bool operator<(const C2ParamField &other) const {
+ return _mIndex < other._mIndex ||
+ (_mIndex == other._mIndex && _mFieldId < other._mFieldId);
+ }
+
+ DEFINE_OTHER_COMPARISON_OPERATORS(C2ParamField)
+
+private:
+ C2Param::Index _mIndex;
+ _C2FieldId _mFieldId;
+};
+
+/**
+ * A shared (union) representation of numeric values
+ */
+class C2Value {
+public:
+ /// A union of supported primitive types.
+ union Primitive {
+ int32_t i32; ///< int32_t value
+ uint32_t u32; ///< uint32_t value
+ int64_t i64; ///< int64_t value
+ uint64_t u64; ///< uint64_t value
+ float fp; ///< float value
+
+ // constructors - implicit
+ Primitive(int32_t value) : i32(value) { }
+ Primitive(uint32_t value) : u32(value) { }
+ Primitive(int64_t value) : i64(value) { }
+ Primitive(uint64_t value) : u64(value) { }
+ Primitive(float value) : fp(value) { }
+
+ Primitive() : u64(0) { }
+
+ private:
+ friend class C2Value;
+ template<typename T> const T &ref() const;
+ };
+
+ enum Type {
+ NO_INIT,
+ INT32,
+ UINT32,
+ INT64,
+ UINT64,
+ FLOAT,
+ };
+
+ template<typename T> static constexpr Type typeFor();
+
+ // constructors - implicit
+ template<typename T>
+ C2Value(T value) : mType(typeFor<T>()), mValue(value) { }
+
+ C2Value() : mType(NO_INIT) { }
+
+ inline Type type() const { return mType; }
+
+ template<typename T>
+ inline bool get(T *value) const {
+ if (mType == typeFor<T>()) {
+ *value = mValue.ref<T>();
+ return true;
+ }
+ return false;
+ }
+
+private:
+ Type mType;
+ Primitive mValue;
+};
+
+template<> const int32_t &C2Value::Primitive::ref<int32_t>() const { return i32; }
+template<> const int64_t &C2Value::Primitive::ref<int64_t>() const { return i64; }
+template<> const uint32_t &C2Value::Primitive::ref<uint32_t>() const { return u32; }
+template<> const uint64_t &C2Value::Primitive::ref<uint64_t>() const { return u64; }
+template<> const float &C2Value::Primitive::ref<float>() const { return fp; }
+
+template<> constexpr C2Value::Type C2Value::typeFor<int32_t>() { return INT32; }
+template<> constexpr C2Value::Type C2Value::typeFor<int64_t>() { return INT64; }
+template<> constexpr C2Value::Type C2Value::typeFor<uint32_t>() { return UINT32; }
+template<> constexpr C2Value::Type C2Value::typeFor<uint64_t>() { return UINT64; }
+template<> constexpr C2Value::Type C2Value::typeFor<float>() { return FLOAT; }
+
+/**
+ * field descriptor. A field is uniquely defined by an index into a parameter.
+ * (Note: Stream-id is not captured as a field.)
+ *
+ * Ordering of fields is by offset. In case of structures, it is depth first,
+ * with a structure taking an index just before and in addition to its members.
+ */
+struct C2FieldDescriptor {
+//public:
+ /** field types and flags
+ * \note: only 32-bit and 64-bit fields are supported (e.g. no boolean, as that
+ * is represented using INT32).
+ */
+ enum Type : uint32_t {
+ // primitive types
+ INT32 = C2Value::INT32, ///< 32-bit signed integer
+ UINT32 = C2Value::UINT32, ///< 32-bit unsigned integer
+ INT64 = C2Value::INT64, ///< 64-bit signed integer
+ UINT64 = C2Value::UINT64, ///< 64-bit signed integer
+ FLOAT = C2Value::FLOAT, ///< 32-bit floating point
+
+ // array types
+ STRING = 0x100, ///< fixed-size string (POD)
+ BLOB, ///< blob. Blobs have no sub-elements and can be thought of as byte arrays;
+ ///< however, bytes cannot be individually addressed by clients.
+
+ // complex types
+ STRUCT_FLAG = 0x10000, ///< structs. Marked with this flag in addition to their baseIndex.
+ };
+
+ typedef std::pair<C2String, C2Value::Primitive> named_value_type;
+ typedef std::vector<const named_value_type> named_values_type;
+ //typedef std::pair<std::vector<C2String>, std::vector<C2Value::Primitive>> named_values_type;
+
+ /**
+ * Template specialization that returns the named values for a type.
+ *
+ * \todo hide from client.
+ *
+ * \return a vector of name-value pairs.
+ */
+ template<typename B>
+ static named_values_type namedValuesFor(const B &);
+
+ inline C2FieldDescriptor(uint32_t type, uint32_t length, C2StringLiteral name, size_t offset, size_t size)
+ : _mType((Type)type), _mLength(length), _mName(name), _mFieldId(offset, size) { }
+
+ template<typename T, class B=typename std::remove_extent<T>::type>
+ inline C2FieldDescriptor(const T* offset, const char *name)
+ : _mType(this->getType((B*)nullptr)),
+ _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
+ _mName(name),
+ _mNamedValues(namedValuesFor(*(B*)0)),
+ _mFieldId(offset) {}
+
+/*
+ template<typename T, typename B=typename std::remove_extent<T>::type>
+ inline C2FieldDescriptor<T, B, false>(T* offset, const char *name)
+ : _mType(this->getType((B*)nullptr)),
+ _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
+ _mName(name),
+ _mFieldId(offset) {}
+*/
+
+ /// \deprecated
+ template<typename T, typename S, class B=typename std::remove_extent<T>::type>
+ constexpr inline C2FieldDescriptor(S*, T S::* field, const char *name)
+ : _mType(this->getType((B*)nullptr)),
+ _mLength(std::is_array<T>::value ? std::extent<T>::value : 1),
+ _mName(name),
+ _mFieldId(&(((S*)0)->*field)) {}
+
+ /// returns the type of this field
+ inline Type type() const { return _mType; }
+ /// returns the length of the field in case it is an array. Returns 0 for
+ /// T[] arrays, returns 1 for T[1] arrays as well as if the field is not an array.
+ inline size_t length() const { return _mLength; }
+ /// returns the name of the field
+ inline C2StringLiteral name() const { return _mName; }
+
+ const named_values_type &namedValues() const { return _mNamedValues; }
+
+#if defined(FRIEND_TEST)
+ friend void PrintTo(const C2FieldDescriptor &, ::std::ostream*);
+ friend bool operator==(const C2FieldDescriptor &, const C2FieldDescriptor &);
+ FRIEND_TEST(C2ParamTest_ParamFieldList, VerifyStruct);
+#endif
+
+private:
+ const Type _mType;
+ const uint32_t _mLength; // the last member can be arbitrary length if it is T[] array,
+ // extending to the end of the parameter (this is marked with
+ // 0). T[0]-s are not fields.
+ const C2StringLiteral _mName;
+ const named_values_type _mNamedValues;
+
+ const _C2FieldId _mFieldId; // field identifier (offset and size)
+
+ // NOTE: We do not capture default value(s) here as that may depend on the component.
+ // NOTE: We also do not capture bestEffort, as 1) this should be true for most fields,
+ // 2) this is at parameter granularity.
+
+ // type resolution
+ inline static Type getType(int32_t*) { return INT32; }
+ inline static Type getType(uint32_t*) { return UINT32; }
+ inline static Type getType(int64_t*) { return INT64; }
+ inline static Type getType(uint64_t*) { return UINT64; }
+ inline static Type getType(float*) { return FLOAT; }
+ inline static Type getType(char*) { return STRING; }
+ inline static Type getType(uint8_t*) { return BLOB; }
+
+ template<typename T,
+ class=typename std::enable_if<std::is_enum<T>::value>::type>
+ inline static Type getType(T*) {
+ typename std::underlying_type<T>::type underlying(0);
+ return getType(&underlying);
+ }
+
+ // verify C2Struct by having a fieldList and a baseIndex.
+ template<typename T,
+ class=decltype(T::baseIndex + 1), class=decltype(T::fieldList)>
+ inline static Type getType(T*) {
+ static_assert(!std::is_base_of<C2Param, T>::value, "cannot use C2Params as fields");
+ return (Type)(T::baseIndex | STRUCT_FLAG);
+ }
+};
+
+#define DEFINE_NO_NAMED_VALUES_FOR(type) \
+template<> inline C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const type &) { \
+ return named_values_type(); \
+}
+
+// We cannot subtype constructor for enumerated types so insted define no named values for
+// non-enumerated integral types.
+DEFINE_NO_NAMED_VALUES_FOR(int32_t)
+DEFINE_NO_NAMED_VALUES_FOR(uint32_t)
+DEFINE_NO_NAMED_VALUES_FOR(int64_t)
+DEFINE_NO_NAMED_VALUES_FOR(uint64_t)
+DEFINE_NO_NAMED_VALUES_FOR(uint8_t)
+DEFINE_NO_NAMED_VALUES_FOR(char)
+DEFINE_NO_NAMED_VALUES_FOR(float)
+
+/**
+ * Describes the fields of a structure.
+ */
+struct C2StructDescriptor {
+public:
+ /// Returns the parameter type
+ inline C2Param::BaseIndex baseIndex() const { return _mType.baseIndex(); }
+
+ // Returns the number of fields in this param (not counting any recursive fields).
+ // Must be at least 1 for valid params.
+ inline size_t numFields() const { return _mFields.size(); }
+
+ // Returns the list of immediate fields (not counting any recursive fields).
+ typedef std::vector<const C2FieldDescriptor>::const_iterator field_iterator;
+ inline field_iterator cbegin() const { return _mFields.cbegin(); }
+ inline field_iterator cend() const { return _mFields.cend(); }
+
+ // only supplying const iterator - but these are needed for range based loops
+ inline field_iterator begin() const { return _mFields.cbegin(); }
+ inline field_iterator end() const { return _mFields.cend(); }
+
+ template<typename T>
+ inline C2StructDescriptor(T*)
+ : C2StructDescriptor(T::baseIndex, T::fieldList) { }
+
+ inline C2StructDescriptor(
+ C2Param::BaseIndex type,
+ std::initializer_list<const C2FieldDescriptor> fields)
+ : _mType(type), _mFields(fields) { }
+
+private:
+ const C2Param::BaseIndex _mType;
+ const std::vector<const C2FieldDescriptor> _mFields;
+};
+
+/**
+ * Describes parameters for a component.
+ */
+struct C2ParamDescriptor {
+public:
+ /**
+ * Returns whether setting this param is required to configure this component.
+ * This can only be true for builtin params for platform-defined components (e.g. video and
+ * audio encoders/decoders, video/audio filters).
+ * For vendor-defined components, it can be true even for vendor-defined params,
+ * but it is not recommended, in case the component becomes platform-defined.
+ */
+ inline bool isRequired() const { return _mIsRequired; }
+
+ /**
+ * Returns whether this parameter is persistent. This is always true for C2Tuning and C2Setting,
+ * but may be false for C2Info. If true, this parameter persists across frames and applies to
+ * the current and subsequent frames. If false, this C2Info parameter only applies to the
+ * current frame and is not assumed to have the same value (or even be present) on subsequent
+ * frames, unless it is specified for those frames.
+ */
+ inline bool isPersistent() const { return _mIsPersistent; }
+
+ /// Returns the name of this param.
+ /// This defaults to the underlying C2Struct's name, but could be altered for a component.
+ inline C2String name() const { return _mName; }
+
+ /// Returns the parameter type
+ /// \todo fix this
+ inline C2Param::Type type() const { return _mType; }
+
+ template<typename T>
+ inline C2ParamDescriptor(bool isRequired, C2StringLiteral name, const T*)
+ : _mIsRequired(isRequired),
+ _mIsPersistent(true),
+ _mName(name),
+ _mType(T::typeIndex) { }
+
+ inline C2ParamDescriptor(
+ bool isRequired, C2StringLiteral name, C2Param::Type type)
+ : _mIsRequired(isRequired),
+ _mIsPersistent(true),
+ _mName(name),
+ _mType(type) { }
+
+private:
+ const bool _mIsRequired;
+ const bool _mIsPersistent;
+ const C2String _mName;
+ const C2Param::Type _mType;
+};
+
+/// \ingroup internal
+/// Define a structure without baseIndex.
+#define DEFINE_C2STRUCT_NO_BASE(name) \
+public: \
+ typedef C2##name##Struct _type; /**< type name shorthand */ \
+ const static std::initializer_list<const C2FieldDescriptor> fieldList; /**< structure fields */
+
+/// Define a structure with matching baseIndex.
+#define DEFINE_C2STRUCT(name) \
+public: \
+ enum : uint32_t { baseIndex = kParamIndex##name }; \
+ DEFINE_C2STRUCT_NO_BASE(name)
+
+/// Define a flexible structure with matching baseIndex.
+#define DEFINE_FLEX_C2STRUCT(name, flexMember) \
+public: \
+ FLEX(C2##name##Struct, flexMember) \
+ enum : uint32_t { baseIndex = kParamIndex##name | C2Param::BaseIndex::_kFlexibleFlag }; \
+ DEFINE_C2STRUCT_NO_BASE(name)
+
+/// \ingroup internal
+/// Describe a structure of a templated structure.
+#define DESCRIBE_TEMPLATED_C2STRUCT(strukt, list) \
+ template<> \
+ const std::initializer_list<const C2FieldDescriptor> strukt::fieldList = list;
+
+/// \deprecated
+/// Describe the fields of a structure using an initializer list.
+#define DESCRIBE_C2STRUCT(name, list) \
+ const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = list;
+
+/**
+ * Describe a field of a structure.
+ * These must be in order.
+ *
+ * There are two ways to use this macro:
+ *
+ * ~~~~~~~~~~~~~ (.cpp)
+ * struct C2VideoWidthStruct {
+ * int32_t mWidth;
+ * C2VideoWidthStruct() {} // optional default constructor
+ * C2VideoWidthStruct(int32_t _width) : mWidth(_width) {}
+ *
+ * DEFINE_AND_DESCRIBE_C2STRUCT(VideoWidth)
+ * C2FIELD(mWidth, "width")
+ * };
+ * ~~~~~~~~~~~~~
+ *
+ * ~~~~~~~~~~~~~ (.cpp)
+ * struct C2VideoWidthStruct {
+ * int32_t mWidth;
+ * C2VideoWidthStruct() = default; // optional default constructor
+ * C2VideoWidthStruct(int32_t _width) : mWidth(_width) {}
+ *
+ * DEFINE_C2STRUCT(VideoWidth)
+ * } C2_PACK;
+ *
+ * DESCRIBE_C2STRUCT(VideoWidth, {
+ * C2FIELD(mWidth, "width")
+ * })
+ * ~~~~~~~~~~~~~
+ *
+ * For flexible structures (those ending in T[]), use the flexible macros:
+ *
+ * ~~~~~~~~~~~~~ (.cpp)
+ * struct C2VideoFlexWidthsStruct {
+ * int32_t mWidths[];
+ * C2VideoFlexWidthsStruct(); // must have a default constructor
+ *
+ * private:
+ * // may have private constructors taking number of widths as the first argument
+ * // This is used by the C2Param factory methods, e.g.
+ * // C2VideoFlexWidthsGlobalParam::alloc_unique(size_t, int32_t);
+ * C2VideoFlexWidthsStruct(size_t flexCount, int32_t value) {
+ * for (size_t i = 0; i < flexCount; ++i) {
+ * mWidths[i] = value;
+ * }
+ * }
+ *
+ * // If the last argument is T[N] or std::initializer_list<T>, the flexCount will
+ * // be automatically calculated and passed by the C2Param factory methods, e.g.
+ * // int widths[] = { 1, 2, 3 };
+ * // C2VideoFlexWidthsGlobalParam::alloc_unique(widths);
+ * template<unsigned N>
+ * C2VideoFlexWidthsStruct(size_t flexCount, const int32_t(&init)[N]) {
+ * for (size_t i = 0; i < flexCount; ++i) {
+ * mWidths[i] = init[i];
+ * }
+ * }
+ *
+ * DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(VideoFlexWidths, mWidths)
+ * C2FIELD(mWidths, "widths")
+ * };
+ * ~~~~~~~~~~~~~
+ *
+ * ~~~~~~~~~~~~~ (.cpp)
+ * struct C2VideoFlexWidthsStruct {
+ * int32_t mWidths[];
+ * C2VideoFlexWidthsStruct(); // must have a default constructor
+ *
+ * DEFINE_FLEX_C2STRUCT(VideoFlexWidths, mWidths)
+ * } C2_PACK;
+ *
+ * DESCRIBE_C2STRUCT(VideoFlexWidths, {
+ * C2FIELD(mWidths, "widths")
+ * })
+ * ~~~~~~~~~~~~~
+ *
+ */
+#define C2FIELD(member, name) \
+ C2FieldDescriptor(&((_type*)(nullptr))->member, name),
+
+/// \deprecated
+#define C2SOLE_FIELD(member, name) \
+ C2FieldDescriptor(&_type::member, name, 0)
+
+/// Define a structure with matching baseIndex and start describing its fields.
+/// This must be at the end of the structure definition.
+#define DEFINE_AND_DESCRIBE_C2STRUCT(name) \
+ DEFINE_C2STRUCT(name) } C2_PACK; \
+ const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = {
+
+/// Define a flexible structure with matching baseIndex and start describing its fields.
+/// This must be at the end of the structure definition.
+#define DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(name, flexMember) \
+ DEFINE_FLEX_C2STRUCT(name, flexMember) } C2_PACK; \
+ const std::initializer_list<const C2FieldDescriptor> C2##name##Struct::fieldList = {
+
+/**
+ * Parameter reflector class.
+ *
+ * This class centralizes the description of parameter structures. This can be shared
+ * by multiple components as describing a parameter does not imply support of that
+ * parameter. However, each supported parameter and any dependent structures within
+ * must be described by the parameter reflector provided by a component.
+ */
+class C2ParamReflector {
+public:
+ /**
+ * Describes a parameter structure.
+ *
+ * \param[in] paramIndex the base index of the parameter structure
+ *
+ * \return the description of the parameter structure
+ * \retval nullptr if the parameter is not supported by this reflector
+ *
+ * This methods shall not block and return immediately.
+ *
+ * \note this class does not take a set of indices because we would then prefer
+ * to also return any dependent structures, and we don't want this logic to be
+ * repeated in each reflector. Alternately, this could just return a map of all
+ * descriptions, but we want to conserve memory if client only wants the description
+ * of a few indices.
+ */
+ virtual std::unique_ptr<C2StructDescriptor> describe(C2Param::BaseIndex paramIndex) = 0;
+
+protected:
+ virtual ~C2ParamReflector() = default;
+};
+
+/**
+ * A useable supported values for a field.
+ *
+ * This can be either a range or a set of values. The range can be linear or geometric with a
+ * clear minimum and maximum value, and can have an optional step size or geometric ratio. Values
+ * can optionally represent flags.
+ *
+ * \note Do not use flags to represent bitfields. Use individual values or separate fields instead.
+ */
+template<typename T>
+struct C2TypedFieldSupportedValues {
+//public:
+ enum Type {
+ RANGE, ///< a numeric range that can be continuous or discrete
+ VALUES, ///< a list of values
+ FLAGS ///< a list of flags that can be OR-ed
+ };
+
+ Type type;
+
+ struct {
+ T min;
+ T max;
+ T step;
+ T nom;
+ T denom;
+ } range;
+ std::vector<T> values;
+
+ C2TypedFieldSupportedValues(T min, T max, T step = T(std::is_floating_point<T>::value ? 0 : 1))
+ : type(RANGE),
+ range{min, max, step, (T)1, (T)1} { }
+
+ C2TypedFieldSupportedValues(T min, T max, T nom, T den) :
+ type(RANGE),
+ range{min, max, (T)0, nom, den} { }
+
+ C2TypedFieldSupportedValues(bool flags, std::initializer_list<T> list) :
+ type(flags ? FLAGS : VALUES),
+ values(list) {}
+};
+
+/**
+ * Generic supported values for a field.
+ *
+ * This can be either a range or a set of values. The range can be linear or geometric with a
+ * clear minimum and maximum value, and can have an optional step size or geometric ratio. Values
+ * can optionally represent flags.
+ *
+ * \note Do not use flags to represent bitfields. Use individual values or separate fields instead.
+ */
+struct C2FieldSupportedValues {
+//public:
+ enum Type {
+ RANGE, ///< a numeric range that can be continuous or discrete
+ VALUES, ///< a list of values
+ FLAGS ///< a list of flags that can be OR-ed
+ };
+
+ Type type;
+
+ typedef C2Value::Primitive Primitive;
+
+ struct {
+ Primitive min;
+ Primitive max;
+ Primitive step;
+ Primitive nom;
+ Primitive denom;
+ } range;
+ std::vector<Primitive> values;
+
+ template<typename T>
+ C2FieldSupportedValues(T min, T max, T step = T(std::is_floating_point<T>::value ? 0 : 1))
+ : type(RANGE),
+ range{min, max, step, (T)1, (T)1} { }
+
+ template<typename T>
+ C2FieldSupportedValues(T min, T max, T nom, T den) :
+ type(RANGE),
+ range{min, max, (T)0, nom, den} { }
+
+ template<typename T>
+ C2FieldSupportedValues(bool flags, std::initializer_list<T> list)
+ : type(flags ? FLAGS : VALUES),
+ range{(T)0, (T)0, (T)0, (T)0, (T)0} {
+ for(T value : list) {
+ values.emplace_back(value);
+ }
+ }
+
+ template<typename T, typename E=decltype(C2FieldDescriptor::namedValuesFor(*(T*)0))>
+ C2FieldSupportedValues(bool flags, const T*)
+ : type(flags ? FLAGS : VALUES),
+ range{(T)0, (T)0, (T)0, (T)0, (T)0} {
+ C2FieldDescriptor::named_values_type named = C2FieldDescriptor::namedValuesFor(*(T*)0);
+ for (const C2FieldDescriptor::named_value_type &item : named) {
+ values.emplace_back(item.second);
+ }
+ }
+};
+
+/// @}
+
+} // namespace android
+
+#endif // C2PARAM_H_
diff --git a/media/libstagefright/codec2/include/C2ParamDef.h b/media/libstagefright/codec2/include/C2ParamDef.h
new file mode 100644
index 0000000..f369617
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2ParamDef.h
@@ -0,0 +1,901 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \file
+ * Templates used to declare parameters.
+ */
+#ifndef C2PARAM_DEF_H_
+#define C2PARAM_DEF_H_
+
+#include <type_traits>
+
+#include <C2Param.h>
+
+namespace android {
+
+/// \addtogroup Parameters
+/// @{
+
+/* ======================== UTILITY TEMPLATES FOR PARAMETER DEFINITIONS ======================== */
+
+/// \addtogroup internal
+/// @{
+
+/// Helper class that checks if a type has equality and inequality operators.
+struct C2_HIDE _C2Comparable_impl
+{
+ template<typename S, typename=decltype(S() == S())>
+ static std::true_type __testEQ(int);
+ template<typename>
+ static std::false_type __testEQ(...);
+
+ template<typename S, typename=decltype(S() != S())>
+ static std::true_type __testNE(int);
+ template<typename>
+ static std::false_type __testNE(...);
+};
+
+/**
+ * Helper template that returns if a type has equality and inequality operators.
+ *
+ * Use as _C2Comparable<typename S>::value.
+ */
+template<typename S>
+struct C2_HIDE _C2Comparable
+ : public std::integral_constant<bool, decltype(_C2Comparable_impl::__testEQ<S>(0))::value
+ || decltype(_C2Comparable_impl::__testNE<S>(0))::value> {
+};
+
+/// Helper class that checks if a type has a baseIndex constant.
+struct C2_HIDE _C2BaseIndexHelper_impl
+{
+ template<typename S, int=S::baseIndex>
+ static std::true_type __testBaseIndex(int);
+ template<typename>
+ static std::false_type __testBaseIndex(...);
+};
+
+/// Helper template that verifies a type's baseIndex and creates it if the type does not have one.
+template<typename S, int BaseIndex,
+ bool HasBase=decltype(_C2BaseIndexHelper_impl::__testBaseIndex<S>(0))::value>
+struct C2_HIDE C2BaseIndexOverride {
+ // TODO: what if we allow structs without baseIndex?
+ static_assert(BaseIndex == S::baseIndex, "baseIndex differs from structure");
+};
+
+/// Specialization for types without a baseIndex.
+template<typename S, int BaseIndex>
+struct C2_HIDE C2BaseIndexOverride<S, BaseIndex, false> {
+public:
+ enum : uint32_t {
+ baseIndex = BaseIndex, ///< baseIndex override.
+ };
+};
+
+/// Helper template that adds a baseIndex to a type if it does not have one.
+template<typename S, int BaseIndex>
+struct C2_HIDE C2AddBaseIndex : public S, public C2BaseIndexOverride<S, BaseIndex> {};
+
+/**
+ * \brief Helper class to check struct requirements for parameters.
+ *
+ * Features:
+ * - verify default constructor, no virtual methods, and no equality operators.
+ * - expose typeIndex, and non-flex flexSize.
+ */
+template<typename S, int BaseIndex, unsigned TypeIndex>
+struct C2_HIDE C2StructCheck {
+ static_assert(
+ std::is_default_constructible<S>::value, "C2 structure must have default constructor");
+ static_assert(!std::is_polymorphic<S>::value, "C2 structure must not have virtual methods");
+ static_assert(!_C2Comparable<S>::value, "C2 structure must not have operator== or !=");
+
+public:
+ enum : uint32_t {
+ typeIndex = BaseIndex | TypeIndex
+ };
+
+protected:
+ enum : uint32_t {
+ flexSize = 0, // TODO: is this still needed? this may be confusing.
+ };
+};
+
+/// Helper class that checks if a type has an integer flexSize member.
+struct C2_HIDE _C2Flexible_impl {
+ /// specialization for types that have a flexSize member
+ template<typename S, unsigned=S::flexSize>
+ static std::true_type __testFlexSize(int);
+ template<typename>
+ static std::false_type __testFlexSize(...);
+};
+
+/// Helper template that returns if a type has an integer flexSize member.
+template<typename S>
+struct C2_HIDE _C2Flexible
+ : public std::integral_constant<bool, decltype(_C2Flexible_impl::__testFlexSize<S>(0))::value> {
+};
+
+/// Macro to test if a type is flexible (has a flexSize member).
+#define IF_FLEXIBLE(S) ENABLE_IF(_C2Flexible<S>::value)
+/// Shorthand for std::enable_if
+#define ENABLE_IF(cond) typename std::enable_if<cond>::type
+
+/// Helper template that exposes the flexible subtype of a struct.
+template<typename S, typename E=void>
+struct C2_HIDE _C2FlexHelper {
+ typedef void flexType;
+ enum : uint32_t { flexSize = 0 };
+};
+
+/// Specialization for flexible types.
+template<typename S>
+struct C2_HIDE _C2FlexHelper<S,
+ typename std::enable_if<!std::is_void<typename S::flexMemberType>::value>::type> {
+ typedef typename _C2FlexHelper<typename S::flexMemberType>::flexType flexType;
+ enum : uint32_t { flexSize = _C2FlexHelper<typename S::flexMemberType>::flexSize };
+};
+
+/// Specialization for flex arrays.
+template<typename S>
+struct C2_HIDE _C2FlexHelper<S[],
+ typename std::enable_if<std::is_void<typename _C2FlexHelper<S>::flexType>::value>::type> {
+ typedef S flexType;
+ enum : uint32_t { flexSize = sizeof(S) };
+};
+
+/**
+ * \brief Helper class to check flexible struct requirements and add common operations.
+ *
+ * Features:
+ * - expose baseIndex and fieldList (this is normally inherited from the struct, but flexible
+ * structs cannot be base classes and thus inherited from)
+ * - disable copy assignment and construction (TODO: this is already done in the FLEX macro for the
+ * flexible struct, so may not be needed here)
+ */
+template<typename S, int BaseIndex, unsigned TypeIndex>
+struct C2_HIDE C2FlexStructCheck : public C2StructCheck<S, BaseIndex, TypeIndex> {
+public:
+ enum : uint32_t {
+ /// \hideinitializer
+ baseIndex = BaseIndex | C2Param::BaseIndex::_kFlexibleFlag, ///< flexible struct base-index
+ };
+
+ const static std::initializer_list<const C2FieldDescriptor> fieldList; // TODO assign here
+
+ // default constructor needed because of the disabled copy constructor
+ inline C2FlexStructCheck() = default;
+
+protected:
+ // cannot copy flexible params
+ C2FlexStructCheck(const C2FlexStructCheck<S, BaseIndex, TypeIndex> &) = delete;
+ C2FlexStructCheck& operator= (const C2FlexStructCheck<S, BaseIndex, TypeIndex> &) = delete;
+
+ // constants used for helper methods
+ enum : uint32_t {
+ /// \hideinitializer
+ flexSize = _C2FlexHelper<S>::flexSize, ///< size of flexible type
+ /// \hideinitializer
+ maxSize = (uint32_t)std::min((size_t)UINT32_MAX, SIZE_MAX), // TODO: is this always u32 max?
+ /// \hideinitializer
+ baseSize = sizeof(S) + sizeof(C2Param), ///< size of the base param
+ };
+
+ /// returns the allocated size of this param with flexCount, or 0 if it would overflow.
+ inline static size_t calcSize(size_t flexCount, size_t size = baseSize) {
+ if (flexCount <= (maxSize - size) / S::flexSize) {
+ return size + S::flexSize * flexCount;
+ }
+ return 0;
+ }
+
+ /// dynamic new operator usable for params of type S
+ inline void* operator new(size_t size, size_t flexCount) noexcept {
+ // TODO: assert(size == baseSize);
+ size = calcSize(flexCount, size);
+ if (size > 0) {
+ return ::operator new(size);
+ }
+ return nullptr;
+ }
+};
+
+// TODO: this probably does not work.
+/// Expose fieldList from subClass;
+template<typename S, int BaseIndex, unsigned TypeIndex>
+const std::initializer_list<const C2FieldDescriptor> C2FlexStructCheck<S, BaseIndex, TypeIndex>::fieldList = S::fieldList;
+
+/// Define From() cast operators for params.
+#define DEFINE_CAST_OPERATORS(_type) \
+ inline static _type* From(C2Param *other) { \
+ return (_type*)C2Param::ifSuitable( \
+ other, sizeof(_type),_type::typeIndex, _type::flexSize, \
+ (_type::typeIndex & T::Index::kDirUndefined) != T::Index::kDirUndefined); \
+ } \
+ inline static const _type* From(const C2Param *other) { \
+ return const_cast<const _type*>(From(const_cast<C2Param *>(other))); \
+ } \
+ inline static _type* From(std::nullptr_t) { return nullptr; } \
+
+/**
+ * Define flexible allocators (alloc_shared or alloc_unique) for flexible params.
+ * - P::alloc_xyz(flexCount, args...): allocate for given flex-count.
+ * - P::alloc_xyz(args..., T[]): allocate for size of (and with) init array.
+ * - P::alloc_xyz(T[]): allocate for size of (and with) init array with no other args.
+ * - P::alloc_xyz(args..., std::initializer_list<T>): allocate for size of (and with) initializer
+ * list.
+ */
+#define DEFINE_FLEXIBLE_ALLOC(_type, S, ptr) \
+ template<typename ...Args> \
+ inline static std::ptr##_ptr<_type> alloc_##ptr(size_t flexCount, const Args(&... args)) { \
+ return std::ptr##_ptr<_type>(new(flexCount) _type(flexCount, args...)); \
+ } \
+ /* NOTE: unfortunately this is not supported by clang yet */ \
+ template<typename ...Args, typename U=typename S::flexType, unsigned N> \
+ inline static std::ptr##_ptr<_type> alloc_##ptr(const Args(&... args), const U(&init)[N]) { \
+ return std::ptr##_ptr<_type>(new(N) _type(N, args..., init)); \
+ } \
+ /* so for now, specialize for no args */ \
+ template<typename U=typename S::flexType, unsigned N> \
+ inline static std::ptr##_ptr<_type> alloc_##ptr(const U(&init)[N]) { \
+ return std::ptr##_ptr<_type>(new(N) _type(N, init)); \
+ } \
+ template<typename ...Args, typename U=typename S::flexType> \
+ inline static std::ptr##_ptr<_type> alloc_##ptr( \
+ const Args(&... args), const std::initializer_list<U> &init) { \
+ return std::ptr##_ptr<_type>(new(init.size()) _type(init.size(), args..., init)); \
+ } \
+
+/**
+ * Define flexible methods alloc_shared, alloc_unique and flexCount.
+ */
+#define DEFINE_FLEXIBLE_METHODS(_type, S) \
+ DEFINE_FLEXIBLE_ALLOC(_type, S, shared) \
+ DEFINE_FLEXIBLE_ALLOC(_type, S, unique) \
+ inline size_t flexCount() const { \
+ static_assert(sizeof(_type) == _type::baseSize, "incorrect baseSize"); \
+ size_t sz = this->size(); \
+ if (sz >= sizeof(_type)) { \
+ return (sz - sizeof(_type)) / _type::flexSize; \
+ } \
+ return 0; \
+ } \
+
+/// Mark flexible member variable and make structure flexible.
+#define FLEX(cls, m) \
+ C2_DO_NOT_COPY(cls) \
+private: \
+ C2PARAM_MAKE_FRIENDS \
+ /* default constructor with flexCount */ \
+ inline cls(size_t) : cls() {} \
+ /** \if 0 */ \
+ template<typename, typename> friend struct _C2FlexHelper; \
+ typedef decltype(m) flexMemberType; \
+public: \
+ /* constexpr static flexMemberType cls::* flexMember = &cls::m; */ \
+ typedef typename _C2FlexHelper<flexMemberType>::flexType flexType; \
+ static_assert(\
+ !std::is_void<flexType>::value, \
+ "member is not flexible, or a flexible array of a flexible type"); \
+ enum : uint32_t { flexSize = _C2FlexHelper<flexMemberType>::flexSize }; \
+ /** \endif */ \
+
+/// @}
+
+/**
+ * Global-parameter template.
+ *
+ * Base template to define a global setting/tuning or info based on a structure and
+ * an optional BaseIndex. Global parameters are not tied to a port (input or output).
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
+ * structure can be accessed directly, and constructors and potential public methods are also
+ * wrapped.
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ */
+template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
+struct C2_HIDE C2GlobalParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2StructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirGlobal> {
+private:
+ typedef C2GlobalParam<T, S, BaseIndex> _type;
+
+public:
+ /// Wrapper around base structure's constructor.
+ template<typename ...Args>
+ inline C2GlobalParam(const Args(&... args)) : T(sizeof(_type), _type::typeIndex), S(args...) { }
+
+ DEFINE_CAST_OPERATORS(_type)
+};
+
+/**
+ * Global-parameter template for flexible structures.
+ *
+ * Base template to define a global setting/tuning or info based on a flexible structure and
+ * an optional BaseIndex. Global parameters are not tied to a port (input or output).
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped flexible structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
+ * structures can be accessed via the m member variable; however, the constructors of the structure
+ * are wrapped directly. (This is because flexible types cannot be subclassed.)
+ */
+template<typename T, typename S, int BaseIndex>
+struct C2_HIDE C2GlobalParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
+ : public T, public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirGlobal> {
+private:
+ typedef C2GlobalParam<T, S, BaseIndex> _type;
+
+ /// Wrapper around base structure's constructor.
+ template<typename ...Args>
+ inline C2GlobalParam(size_t flexCount, const Args(&... args))
+ : T(_type::calcSize(flexCount), _type::typeIndex), m(flexCount, args...) { }
+
+public:
+ S m; ///< wrapped flexible structure
+
+ DEFINE_FLEXIBLE_METHODS(_type, S)
+ DEFINE_CAST_OPERATORS(_type)
+};
+
+/**
+ * Port-parameter template.
+ *
+ * Base template to define a port setting/tuning or info based on a structure and
+ * an optional BaseIndex. Port parameters are tied to a port (input or output), but not to a
+ * specific stream.
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
+ * structure can be accessed directly, and constructors and potential public methods are also
+ * wrapped.
+ *
+ * There are 3 flavors of port parameters: unspecified, input and output. Parameters with
+ * unspecified port expose a setPort method, and add an initial port parameter to the constructor.
+ */
+template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
+struct C2_HIDE C2PortParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+ private C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirUndefined> {
+private:
+ typedef C2PortParam<T, S, BaseIndex> _type;
+
+public:
+ /// Default constructor.
+ inline C2PortParam() : T(sizeof(_type), _type::typeIndex) { }
+ template<typename ...Args>
+ /// Wrapper around base structure's constructor while specifying port/direction.
+ inline C2PortParam(bool _output, const Args(&... args))
+ : T(sizeof(_type), _output ? output::typeIndex : input::typeIndex), S(args...) { }
+ /// Set port/direction.
+ inline void setPort(bool output) { C2Param::setPort(output); }
+
+ DEFINE_CAST_OPERATORS(_type)
+
+ /// Specialization for an input port parameter.
+ struct input : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirInput> {
+ /// Wrapper around base structure's constructor.
+ template<typename ...Args>
+ inline input(const Args(&... args)) : T(sizeof(_type), input::typeIndex), S(args...) { }
+
+ DEFINE_CAST_OPERATORS(input)
+
+ };
+
+ /// Specialization for an output port parameter.
+ struct output : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2StructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirOutput> {
+ /// Wrapper around base structure's constructor.
+ template<typename ...Args>
+ inline output(const Args(&... args)) : T(sizeof(_type), output::typeIndex), S(args...) { }
+
+ DEFINE_CAST_OPERATORS(output)
+ };
+};
+
+/**
+ * Port-parameter template for flexible structures.
+ *
+ * Base template to define a port setting/tuning or info based on a flexible structure and
+ * an optional BaseIndex. Port parameters are tied to a port (input or output), but not to a
+ * specific stream.
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped flexible structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
+ * structures can be accessed via the m member variable; however, the constructors of the structure
+ * are wrapped directly. (This is because flexible types cannot be subclassed.)
+ *
+ * There are 3 flavors of port parameters: unspecified, input and output. Parameters with
+ * unspecified port expose a setPort method, and add an initial port parameter to the constructor.
+ */
+template<typename T, typename S, int BaseIndex>
+struct C2_HIDE C2PortParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
+ : public T, public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Type::kDirUndefined> {
+private:
+ typedef C2PortParam<T, S, BaseIndex> _type;
+
+ /// Default constructor for basic allocation: new(flexCount) P.
+ inline C2PortParam(size_t flexCount) : T(_type::calcSize(flexCount), _type::typeIndex) { }
+ template<typename ...Args>
+ /// Wrapper around base structure's constructor while also specifying port/direction.
+ inline C2PortParam(size_t flexCount, bool _output, const Args(&... args))
+ : T(_type::calcSize(flexCount), _output ? output::typeIndex : input::typeIndex),
+ m(flexCount, args...) { }
+
+public:
+ /// Set port/direction.
+ inline void setPort(bool output) { C2Param::setPort(output); }
+
+ S m; ///< wrapped flexible structure
+
+ DEFINE_FLEXIBLE_METHODS(_type, S)
+ DEFINE_CAST_OPERATORS(_type)
+
+ /// Specialization for an input port parameter.
+ struct input : public T, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirInput> {
+ private:
+ /// Wrapper around base structure's constructor while also specifying port/direction.
+ template<typename ...Args>
+ inline input(size_t flexCount, const Args(&... args))
+ : T(_type::calcSize(flexCount), input::typeIndex), m(flexCount, args...) { }
+
+ public:
+ S m; ///< wrapped flexible structure
+
+ DEFINE_FLEXIBLE_METHODS(input, S)
+ DEFINE_CAST_OPERATORS(input)
+ };
+
+ /// Specialization for an output port parameter.
+ struct output : public T, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2FlexStructCheck<S, BaseIndex, T::indexFlags | T::Index::kDirOutput> {
+ private:
+ /// Wrapper around base structure's constructor while also specifying port/direction.
+ template<typename ...Args>
+ inline output(size_t flexCount, const Args(&... args))
+ : T(_type::calcSize(flexCount), output::typeIndex), m(flexCount, args...) { }
+
+ public:
+ S m; ///< wrapped flexible structure
+
+ DEFINE_FLEXIBLE_METHODS(output, S)
+ DEFINE_CAST_OPERATORS(output)
+ };
+};
+
+/**
+ * Stream-parameter template.
+ *
+ * Base template to define a stream setting/tuning or info based on a structure and
+ * an optional BaseIndex. Stream parameters are tied to a specific stream on a port (input or
+ * output).
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields of the wrapped
+ * structure can be accessed directly, and constructors and potential public methods are also
+ * wrapped.
+ *
+ * There are 3 flavors of stream parameters: unspecified port, input and output. All of these expose
+ * a setStream method and an extra initial streamID parameter for the constructor. Moreover,
+ * parameters with unspecified port expose a setPort method, and add an additional initial port
+ * parameter to the constructor.
+ */
+template<typename T, typename S, int BaseIndex=S::baseIndex, class Flex=void>
+struct C2_HIDE C2StreamParam : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+ private C2StructCheck<S, BaseIndex,
+ T::indexFlags | T::Index::kStreamFlag | T::Index::kDirUndefined> {
+private:
+ typedef C2StreamParam<T, S, BaseIndex> _type;
+
+public:
+ /// Default constructor. Port/direction and stream-ID is undefined.
+ inline C2StreamParam() : T(sizeof(_type), _type::typeIndex) { }
+ /// Wrapper around base structure's constructor while also specifying port/direction and
+ /// stream-ID.
+ template<typename ...Args>
+ inline C2StreamParam(bool _output, unsigned stream, const Args(&... args))
+ : T(sizeof(_type), _output ? output::typeIndex : input::typeIndex, stream),
+ S(args...) { }
+ /// Set port/direction.
+ inline void setPort(bool output) { C2Param::setPort(output); }
+ /// Set stream-id. \retval true if the stream-id was successfully set.
+ inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+ DEFINE_CAST_OPERATORS(_type)
+
+ /// Specialization for an input stream parameter.
+ struct input : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2StructCheck<S, BaseIndex,
+ T::indexFlags | T::Index::kStreamFlag | T::Type::kDirInput> {
+ /// Default constructor. Stream-ID is undefined.
+ inline input() : T(sizeof(_type), input::typeIndex) { }
+ /// Wrapper around base structure's constructor while also specifying stream-ID.
+ template<typename ...Args>
+ inline input(unsigned stream, const Args(&... args))
+ : T(sizeof(_type), input::typeIndex, stream), S(args...) { }
+ /// Set stream-id. \retval true if the stream-id was successfully set.
+ inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+ DEFINE_CAST_OPERATORS(input)
+ };
+
+ /// Specialization for an output stream parameter.
+ struct output : public T, public S, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2StructCheck<S, BaseIndex,
+ T::indexFlags | T::Index::kStreamFlag | T::Type::kDirOutput> {
+ /// Default constructor. Stream-ID is undefined.
+ inline output() : T(sizeof(_type), output::typeIndex) { }
+ /// Wrapper around base structure's constructor while also specifying stream-ID.
+ template<typename ...Args>
+ inline output(unsigned stream, const Args(&... args))
+ : T(sizeof(_type), output::typeIndex, stream), S(args...) { }
+ /// Set stream-id. \retval true if the stream-id was successfully set.
+ inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+ DEFINE_CAST_OPERATORS(output)
+ };
+};
+
+/**
+ * Stream-parameter template for flexible structures.
+ *
+ * Base template to define a stream setting/tuning or info based on a flexible structure and
+ * an optional BaseIndex. Stream parameters are tied to a specific stream on a port (input or
+ * output).
+ *
+ * \tparam T param type C2Setting, C2Tuning or C2Info
+ * \tparam S wrapped flexible structure
+ * \tparam BaseIndex optional base-index override. Must be specified for common/reused structures.
+ *
+ * Parameters wrap structures by prepending a (parameter) header. The fields and methods of flexible
+ * structures can be accessed via the m member variable; however, the constructors of the structure
+ * are wrapped directly. (This is because flexible types cannot be subclassed.)
+ *
+ * There are 3 flavors of stream parameters: unspecified port, input and output. All of these expose
+ * a setStream method and an extra initial streamID parameter for the constructor. Moreover,
+ * parameters with unspecified port expose a setPort method, and add an additional initial port
+ * parameter to the constructor.
+ */
+template<typename T, typename S, int BaseIndex>
+struct C2_HIDE C2StreamParam<T, S, BaseIndex, IF_FLEXIBLE(S)>
+ : public T, public C2BaseIndexOverride<S, BaseIndex>,
+ private C2FlexStructCheck<S, BaseIndex,
+ T::indexFlags | T::Index::kStreamFlag | T::Index::kDirUndefined> {
+private:
+ typedef C2StreamParam<T, S> _type;
+ /// Default constructor. Port/direction and stream-ID is undefined.
+ inline C2StreamParam(size_t flexCount) : T(_type::calcSize(flexCount), _type::typeIndex, 0u) { }
+ /// Wrapper around base structure's constructor while also specifying port/direction and
+ /// stream-ID.
+ template<typename ...Args>
+ inline C2StreamParam(size_t flexCount, bool _output, unsigned stream, const Args(&... args))
+ : T(_type::calcSize(flexCount), _output ? output::typeIndex : input::typeIndex, stream),
+ m(flexCount, args...) { }
+
+public:
+ S m; ///< wrapped flexible structure
+
+ /// Set port/direction.
+ inline void setPort(bool output) { C2Param::setPort(output); }
+ /// Set stream-id. \retval true if the stream-id was successfully set.
+ inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+ DEFINE_FLEXIBLE_METHODS(_type, S)
+ DEFINE_CAST_OPERATORS(_type)
+
+ /// Specialization for an input stream parameter.
+ struct input : public T, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2FlexStructCheck<S, BaseIndex,
+ T::indexFlags | T::Index::kStreamFlag | T::Type::kDirInput> {
+ private:
+ /// Default constructor. Stream-ID is undefined.
+ inline input(size_t flexCount) : T(_type::calcSize(flexCount), input::typeIndex) { }
+ /// Wrapper around base structure's constructor while also specifying stream-ID.
+ template<typename ...Args>
+ inline input(size_t flexCount, unsigned stream, const Args(&... args))
+ : T(_type::calcSize(flexCount), input::typeIndex, stream), m(flexCount, args...) { }
+
+ public:
+ S m; ///< wrapped flexible structure
+
+ /// Set stream-id. \retval true if the stream-id was successfully set.
+ inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+ DEFINE_FLEXIBLE_METHODS(input, S)
+ DEFINE_CAST_OPERATORS(input)
+ };
+
+ /// Specialization for an output stream parameter.
+ struct output : public T, public C2BaseIndexOverride<S, BaseIndex>,
+ public C2FlexStructCheck<S, BaseIndex,
+ T::indexFlags | T::Index::kStreamFlag | T::Type::kDirOutput> {
+ private:
+ /// Default constructor. Stream-ID is undefined.
+ inline output(size_t flexCount) : T(_type::calcSize(flexCount), output::typeIndex) { }
+ /// Wrapper around base structure's constructor while also specifying stream-ID.
+ template<typename ...Args>
+ inline output(size_t flexCount, unsigned stream, const Args(&... args))
+ : T(_type::calcSize(flexCount), output::typeIndex, stream), m(flexCount, args...) { }
+
+ public:
+ S m; ///< wrapped flexible structure
+
+ /// Set stream-id. \retval true if the stream-id was successfully set.
+ inline bool setStream(unsigned stream) { return C2Param::setStream(stream); }
+
+ DEFINE_FLEXIBLE_METHODS(output, S)
+ DEFINE_CAST_OPERATORS(output)
+ };
+};
+
+/* ======================== SIMPLE VALUE PARAMETERS ======================== */
+
+/**
+ * \ingroup internal
+ * A structure template encapsulating a single element with default constructors and no base-index.
+ */
+template<typename T>
+struct C2SimpleValueStruct {
+ T mValue; ///< simple value of the structure
+ // Default constructor.
+ inline C2SimpleValueStruct() = default;
+ // Constructor with an initial value.
+ inline C2SimpleValueStruct(T value) : mValue(value) {}
+ DEFINE_C2STRUCT_NO_BASE(SimpleValue)
+};
+
+// TODO: move this and next to some generic place
+/**
+ * Interface to a block of (mapped) memory containing an array of some type (T).
+ */
+template<typename T>
+struct C2MemoryBlock {
+ /// \returns the number of elements in this block.
+ virtual size_t size() const = 0;
+ /// \returns a const pointer to the start of this block. Care must be taken to not read outside
+ /// the block.
+ virtual const T *data() const = 0; // TODO: should this be friend access only in some C2Memory module?
+ /// \returns a pointer to the start of this block. Care must be taken to not read or write
+ /// outside the block.
+ inline T *data() { return const_cast<T*>(data()); }
+protected:
+ // TODO: for now it should never be deleted as C2MemoryBlock
+ virtual ~C2MemoryBlock() = default;
+};
+
+/**
+ * Interface to a block of memory containing a constant (constexpr) array of some type (T).
+ */
+template<typename T>
+struct C2ConstMemoryBlock : public C2MemoryBlock<T> {
+ virtual const T * data() const { return mData; }
+ virtual size_t size() const { return mSize; }
+
+ /// Constructor.
+ template<unsigned N>
+ inline constexpr C2ConstMemoryBlock(const T(&init)[N]) : mData(init), mSize(N) {}
+
+private:
+ const T *mData;
+ const size_t mSize;
+};
+
+/// \addtogroup internal
+/// @{
+
+/// Helper class to initialize flexible arrays with various initalizers.
+struct _C2ValueArrayHelper {
+ // char[]-s are used as null terminated strings, so the last element is never inited.
+
+ /// Initialize a flexible array using a constexpr memory block.
+ template<typename T>
+ static void init(T(&array)[], size_t arrayLen, const C2MemoryBlock<T> &block) {
+ // reserve last element for terminal 0 for strings
+ if (arrayLen && std::is_same<T, char>::value) {
+ --arrayLen;
+ }
+ if (block.data()) {
+ memcpy(array, block.data(), std::min(arrayLen, block.size()) * sizeof(T));
+ }
+ }
+
+ /// Initialize a flexible array using an initializer list.
+ template<typename T>
+ static void init(T(&array)[], size_t arrayLen, const std::initializer_list<T> &init) {
+ size_t ix = 0;
+ // reserve last element for terminal 0 for strings
+ if (arrayLen && std::is_same<T, char>::value) {
+ --arrayLen;
+ }
+ for (const T &item : init) {
+ if (ix == arrayLen) {
+ break;
+ }
+ array[ix++] = item;
+ }
+ }
+
+ /// Initialize a flexible array using another flexible array.
+ template<typename T, unsigned N>
+ static void init(T(&array)[], size_t arrayLen, const T(&str)[N]) {
+ // reserve last element for terminal 0 for strings
+ if (arrayLen && std::is_same<T, char>::value) {
+ --arrayLen;
+ }
+ if (arrayLen) {
+ strncpy(array, str, std::min(arrayLen, (size_t)N));
+ }
+ }
+};
+
+/**
+ * Specialization for a flexible blob and string arrays. A structure template encapsulating a single
+ * flexible array member with default flexible constructors and no base-index. This type cannot be
+ * constructed on its own as it's size is 0.
+ *
+ * \internal This is different from C2SimpleArrayStruct<T[]> simply because its member has the name
+ * as mValue to reflect this is a single value.
+ */
+template<typename T>
+struct C2SimpleValueStruct<T[]> {
+ static_assert(std::is_same<T, char>::value || std::is_same<T, uint8_t>::value,
+ "C2SimpleValueStruct<T[]> is only for BLOB or STRING");
+ T mValue[];
+
+ inline C2SimpleValueStruct() = default;
+ DEFINE_C2STRUCT_NO_BASE(SimpleValue)
+ FLEX(C2SimpleValueStruct, mValue)
+
+private:
+ inline C2SimpleValueStruct(size_t flexCount, const C2MemoryBlock<T> &block) {
+ _C2ValueArrayHelper::init(mValue, flexCount, block);
+ }
+
+ inline C2SimpleValueStruct(size_t flexCount, const std::initializer_list<T> &init) {
+ _C2ValueArrayHelper::init(mValue, flexCount, init);
+ }
+
+ template<unsigned N>
+ inline C2SimpleValueStruct(size_t flexCount, const T(&init)[N]) {
+ _C2ValueArrayHelper::init(mValue, flexCount, init);
+ }
+};
+
+/// @}
+
+/**
+ * A structure template encapsulating a single flexible array element of a specific type (T) with
+ * default constructors and no base-index. This type cannot be constructed on its own as it's size
+ * is 0. Instead, it is meant to be used as a parameter, e.g.
+ *
+ * typedef C2StreamParam<C2Info, C2SimpleArrayStruct<C2MyFancyStruct>,
+ * kParamIndexMyFancyArrayStreamParam> C2MyFancyArrayStreamInfo;
+ */
+template<typename T>
+struct C2SimpleArrayStruct {
+ static_assert(!std::is_same<T, char>::value && !std::is_same<T, uint8_t>::value,
+ "use C2SimpleValueStruct<T[]> is for BLOB or STRING");
+
+ T mValues[]; ///< array member
+ /// Default constructor
+ inline C2SimpleArrayStruct() = default;
+ DEFINE_C2STRUCT_NO_BASE(SimpleArray)
+ FLEX(C2SimpleArrayStruct, mValues)
+
+private:
+ /// Construct from a C2MemoryBlock.
+ /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
+ inline C2SimpleArrayStruct(size_t flexCount, const C2MemoryBlock<T> &block) {
+ _C2ValueArrayHelper::init(mValues, flexCount, block);
+ }
+
+ /// Construct from an initializer list.
+ /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
+ inline C2SimpleArrayStruct(size_t flexCount, const std::initializer_list<T> &init) {
+ _C2ValueArrayHelper::init(mValues, flexCount, init);
+ }
+
+ /// Construct from another flexible array.
+ /// Used only by the flexible parameter allocators (alloc_unique & alloc_shared).
+ template<unsigned N>
+ inline C2SimpleArrayStruct(size_t flexCount, const T(&init)[N]) {
+ _C2ValueArrayHelper::init(mValues, flexCount, init);
+ }
+};
+
+/**
+ * \addtogroup simplevalue Simple value and array structures.
+ * @{
+ *
+ * Simple value structures.
+ *
+ * Structures containing a single simple value. These can be reused to easily define simple
+ * parameters of various types:
+ *
+ * typedef C2PortParam<C2Tuning, C2Int32Value, kParamIndexMyIntegerPortParam>
+ * C2MyIntegerPortParamTuning;
+ *
+ * They contain a single member (mValue or mValues) that is described as "value" or "values".
+ */
+/// A 32-bit signed integer parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<int32_t> C2Int32Value;
+/// A 32-bit signed integer array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<int32_t> C2Int32Array;
+/// A 32-bit unsigned integer parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<uint32_t> C2Uint32Value;
+/// A 32-bit unsigned integer array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<uint32_t> C2Uint32Array;
+/// A 64-bit signed integer parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<int64_t> C2Int64Value;
+/// A 64-bit signed integer array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<int64_t> C2Int64Array;
+/// A 64-bit unsigned integer parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<uint64_t> C2Uint64Value;
+/// A 64-bit unsigned integer array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<uint64_t> C2Uint64Array;
+/// A float parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<float> C2FloatValue;
+/// A float array parameter in mValues, described as "values"
+typedef C2SimpleArrayStruct<float> C2FloatArray;
+/// A blob flexible parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<uint8_t[]> C2BlobValue;
+/// A string flexible parameter in mValue, described as "value"
+typedef C2SimpleValueStruct<char[]> C2StringValue;
+
+#if 1
+template<typename T>
+const std::initializer_list<const C2FieldDescriptor> C2SimpleValueStruct<T>::fieldList = { C2FIELD(mValue, "value") };
+template<typename T>
+const std::initializer_list<const C2FieldDescriptor> C2SimpleValueStruct<T[]>::fieldList = { C2FIELD(mValue, "value") };
+template<typename T>
+const std::initializer_list<const C2FieldDescriptor> C2SimpleArrayStruct<T>::fieldList = { C2FIELD(mValues, "values") };
+#else
+// This seem to be able to be handled by the template above
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<int32_t>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint32_t>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<int64_t>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint64_t>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<float>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<uint8_t[]>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleValueStruct<char[]>, { C2FIELD(mValue, "value") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<int32_t>, { C2FIELD(mValues, "values") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<uint32_t>, { C2FIELD(mValues, "values") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<int64_t>, { C2FIELD(mValues, "values") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<uint64_t>, { C2FIELD(mValues, "values") });
+DESCRIBE_TEMPLATED_C2STRUCT(C2SimpleArrayStruct<float>, { C2FIELD(mValues, "values") });
+#endif
+
+/// @}
+
+/// @}
+
+} // namespace android
+
+#endif // C2PARAM_DEF_H_
diff --git a/media/libstagefright/codec2/include/C2Work.h b/media/libstagefright/codec2/include/C2Work.h
new file mode 100644
index 0000000..a42d11a
--- /dev/null
+++ b/media/libstagefright/codec2/include/C2Work.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2WORK_H_
+
+#define C2WORK_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <C2Param.h>
+#include <C2Buffer.h>
+#include <C2Config.h>
+
+#include <memory>
+#include <list>
+#include <vector>
+
+typedef int status_t;
+
+namespace android {
+
+/// \defgroup work Work and data processing
+/// @{
+
+struct C2SettingResult {
+ enum Failure {
+ READ_ONLY, ///< parameter is read-only and cannot be set
+ MISMATCH, ///< parameter mismatches input data
+ BAD_VALUE, ///< parameter does not accept value
+ BAD_TYPE, ///< parameter is not supported
+ BAD_PORT, ///< parameter is not supported on the specific port
+ BAD_INDEX, ///< parameter is not supported on the specific stream
+ CONFLICT, ///< parameter is in conflict with another setting
+ };
+
+ C2ParamField field;
+ Failure failure;
+ std::unique_ptr<C2FieldSupportedValues> supportedValues; //< if different from normal (e.g. in conflict w/another param or input data)
+ std::list<C2ParamField> conflictingFields;
+};
+
+// ================================================================================================
+// WORK
+// ================================================================================================
+
+// node_id-s
+typedef uint32_t node_id;
+
+enum flags_t : uint32_t {
+ BUFFERFLAG_CODEC_CONFIG,
+ BUFFERFLAG_DROP_FRAME,
+ BUFFERFLAG_END_OF_STREAM,
+};
+
+enum {
+ kParamIndexWorkOrdinal,
+};
+
+struct C2WorkOrdinalStruct {
+ uint64_t timestamp;
+ uint64_t frame_index; // submission ordinal on the initial component
+ uint64_t custom_ordinal; // can be given by the component, e.g. decode order
+
+ DEFINE_AND_DESCRIBE_C2STRUCT(WorkOrdinal)
+ C2FIELD(timestamp, "timestamp")
+ C2FIELD(frame_index, "frame-index")
+ C2FIELD(custom_ordinal, "custom-ordinal")
+};
+
+struct C2BufferPack {
+//public:
+ flags_t flags;
+ C2WorkOrdinalStruct ordinal;
+ std::vector<std::shared_ptr<C2Buffer>> buffers;
+ //< for initial work item, these may also come from the parser - if provided
+ //< for output buffers, these are the responses to requestedInfos
+ std::list<std::unique_ptr<C2Info>> infos;
+ std::list<std::shared_ptr<C2InfoBuffer>> infoBuffers;
+};
+
+struct C2Worklet {
+//public:
+ // IN
+ node_id component;
+
+ std::list<std::unique_ptr<C2Param>> tunings; //< tunings to be applied before processing this
+ // worklet
+ std::list<C2Param::Type> requestedInfos;
+ std::vector<std::shared_ptr<C2BlockAllocator>> allocators; //< This vector shall be the same size as
+ //< output.buffers.
+
+ // OUT
+ C2BufferPack output;
+ std::list<std::unique_ptr<C2SettingResult>> failures;
+};
+
+/**
+ * This structure holds information about all a single work item.
+ *
+ * This structure shall be passed by the client to the component for the first worklet. As such,
+ * worklets must not be empty. The ownership of this object is passed.
+ *
+ * input:
+ * The input data to be processed. This is provided by the client with ownership. When the work
+ * is returned, the input buffer-pack's buffer vector shall contain nullptrs.
+ *
+ * worklets:
+ * The chain of components and associated allocators, tunings and info requests that the data
+ * must pass through. If this has more than a single element, the tunnels between successive
+ * components of the worklet chain must have been (successfully) pre-registered at the time
+ * the work is submitted. Allocating the output buffers in the worklets is the responsibility
+ * of each component. Upon work submission, each output buffer-pack shall be an appropriately
+ * sized vector containing nullptrs. When the work is completed/returned to the client,
+ *
+ * worklets_processed:
+ * It shall be initialized to 0 by the client when the work is submitted.
+ * It shall contain the number of worklets that were successfully processed when the work is
+ * returned. If this is less then the number of worklets, result must not be success.
+ * It must be in the range of [0, worklets.size()].
+ *
+ * result:
+ * The final outcome of the work. If 0 when work is returned, it is assumed that all worklets
+ * have been processed.
+ */
+struct C2Work {
+//public:
+ // pre-chain infos (for portions of a tunneling chain that happend before this work-chain for
+ // this work item - due to framework facilitated (non-tunneled) work-chaining)
+ std::list<std::pair<std::unique_ptr<C2PortMimeConfig>, std::unique_ptr<C2Info>>> preChainInfos;
+ std::list<std::pair<std::unique_ptr<C2PortMimeConfig>, std::unique_ptr<C2Buffer>>> preChainInfoBlobs;
+
+ C2BufferPack input;
+ std::list<std::unique_ptr<C2Worklet>> worklets;
+
+ uint32_t worklets_processed;
+ status_t result;
+};
+
+struct C2WorkOutline {
+//public:
+ C2WorkOrdinalStruct ordinal;
+ std::list<node_id> chain;
+};
+
+/// @}
+
+} // namespace android
+
+#endif // C2WORK_H_
diff --git a/media/libstagefright/codec2/tests/Android.mk b/media/libstagefright/codec2/tests/Android.mk
new file mode 100644
index 0000000..49c4253
--- /dev/null
+++ b/media/libstagefright/codec2/tests/Android.mk
@@ -0,0 +1,37 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := codec2_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+ vndk/C2UtilTest.cpp \
+ C2_test.cpp \
+ C2Param_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libstagefright_codec2 \
+ liblog
+
+LOCAL_C_INCLUDES := \
+ frameworks/av/media/libstagefright/codec2/include \
+ frameworks/av/media/libstagefright/codec2/vndk/include \
+ $(TOP)/frameworks/native/include/media/openmax \
+
+LOCAL_CFLAGS += -Werror -Wall -std=c++14
+LOCAL_CLANG := true
+
+include $(BUILD_NATIVE_TEST)
+
+# Include subdirectory makefiles
+# ============================================================
+
+# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
+# team really wants is to build the stuff defined by this makefile.
+ifeq (,$(ONE_SHOT_MAKEFILE))
+include $(call first-makefiles-under,$(LOCAL_PATH))
+endif
diff --git a/media/libstagefright/codec2/tests/C2Param_test.cpp b/media/libstagefright/codec2/tests/C2Param_test.cpp
new file mode 100644
index 0000000..ec82c84
--- /dev/null
+++ b/media/libstagefright/codec2/tests/C2Param_test.cpp
@@ -0,0 +1,2687 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2Param_test"
+
+#include <gtest/gtest.h>
+
+#include <util/C2ParamUtils.h>
+#include <C2ParamDef.h>
+
+namespace android {
+
+void PrintTo(const _C2FieldId &id, ::std::ostream* os) {
+ *os << "@" << id._mOffset << "+" << id._mSize;
+}
+
+void PrintTo(const C2FieldDescriptor &fd, ::std::ostream *os) {
+ using FD=C2FieldDescriptor;
+ switch (fd.type()) {
+ case FD::INT32: *os << "i32"; break;
+ case FD::INT64: *os << "i64"; break;
+ case FD::UINT32: *os << "u32"; break;
+ case FD::UINT64: *os << "u64"; break;
+ case FD::FLOAT: *os << "float"; break;
+ case FD::STRING: *os << "char"; break;
+ case FD::BLOB: *os << "u8"; break;
+ default:
+ if (fd.type() & FD::STRUCT_FLAG) {
+ *os << "struct-" << (fd.type() & ~FD::STRUCT_FLAG);
+ } else {
+ *os << "type-" << fd.type();
+ }
+ }
+ *os << " " << fd.name();
+ if (fd.length() > 1) {
+ *os << "[" << fd.length() << "]";
+ } else if (fd.length() == 0) {
+ *os << "[]";
+ }
+ *os << " (";
+ PrintTo(fd._mFieldId, os);
+ *os << "*" << fd.length() << ")";
+}
+
+enum C2ParamIndexType {
+ kParamIndexNumber,
+ kParamIndexNumbers,
+ kParamIndexNumber2,
+ kParamIndexVendorStart = C2Param::BaseIndex::kVendorStart,
+ kParamIndexVendorNumbers,
+};
+
+void ffff(int(*)(int)) {}
+
+/* ============================= STRUCT DECLARATION AND DESCRIPTION ============================= */
+
+typedef C2FieldDescriptor FD;
+
+class C2ParamTest : public ::testing::Test {
+};
+
+class C2ParamTest_ParamFieldList
+ : public ::testing::TestWithParam<std::initializer_list<const C2FieldDescriptor>> {
+};
+
+enum {
+ kParamIndexSize,
+ kParamIndexTestA,
+ kParamIndexTestB,
+ kParamIndexTestFlexS32,
+ kParamIndexTestFlexEndS32,
+ kParamIndexTestFlexS64,
+ kParamIndexTestFlexEndS64,
+ kParamIndexTestFlexSize,
+ kParamIndexTestFlexEndSize,
+};
+
+struct C2SizeStruct {
+ int32_t mNumber;
+ int32_t mHeight;
+ enum : uint32_t { baseIndex = kParamIndexSize }; // <= needed for C2FieldDescriptor
+ const static std::initializer_list<const C2FieldDescriptor> fieldList; // <= needed for C2FieldDescriptor
+ const static FD::Type TYPE = (FD::Type)(baseIndex | FD::STRUCT_FLAG);
+};
+
+DEFINE_NO_NAMED_VALUES_FOR(C2SizeStruct)
+
+// Test 1. define a structure without any helper methods
+
+bool operator==(const C2FieldDescriptor &a, const C2FieldDescriptor &b) {
+ return a.type() == b.type()
+ && a.length() == b.length()
+ && strcmp(a.name(), b.name()) == 0
+ && a._mFieldId == b._mFieldId;
+}
+
+struct C2TestStruct_A {
+ int32_t mSigned32;
+ int64_t mSigned64[2];
+ uint32_t mUnsigned32[1];
+ uint64_t mUnsigned64;
+ float mFloat;
+ C2SizeStruct mSize[3];
+ uint8_t mBlob[100];
+ char mString[100];
+ bool mYesNo[100];
+
+ const static std::initializer_list<const C2FieldDescriptor> fieldList;
+ // enum : uint32_t { baseIndex = kParamIndexTest };
+ // typedef C2TestStruct_A _type;
+} __attribute__((packed));
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A::fieldList =
+ { { FD::INT32, 1, "s32", 0, 4 },
+ { FD::INT64, 2, "s64", 4, 8 },
+ { FD::UINT32, 1, "u32", 20, 4 },
+ { FD::UINT64, 1, "u64", 24, 8 },
+ { FD::FLOAT, 1, "fp", 32, 4 },
+ { C2SizeStruct::TYPE, 3, "size", 36, 8 },
+ { FD::BLOB, 100, "blob", 60, 1 },
+ { FD::STRING, 100, "str", 160, 1 },
+ { FD::BLOB, 100, "y-n", 260, 1 } };
+
+TEST_P(C2ParamTest_ParamFieldList, VerifyStruct) {
+ std::vector<const C2FieldDescriptor> fields = GetParam(), expected = C2TestStruct_A::fieldList;
+
+ // verify first field descriptor
+ EXPECT_EQ(FD::INT32, fields[0].type());
+ EXPECT_STREQ("s32", fields[0].name());
+ EXPECT_EQ(1u, fields[0].length());
+ EXPECT_EQ(_C2FieldId(0, 4), fields[0]._mFieldId);
+
+ EXPECT_EQ(expected[0], fields[0]);
+ EXPECT_EQ(expected[1], fields[1]);
+ EXPECT_EQ(expected[2], fields[2]);
+ EXPECT_EQ(expected[3], fields[3]);
+ EXPECT_EQ(expected[4], fields[4]);
+ EXPECT_EQ(expected[5], fields[5]);
+ EXPECT_EQ(expected[6], fields[6]);
+ EXPECT_EQ(expected[7], fields[7]);
+ for (size_t i = 8; i < fields.size() && i < expected.size(); ++i) {
+ EXPECT_EQ(expected[i], fields[i]);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InitializerList, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A::fieldList));
+
+// define fields using C2FieldDescriptor pointer constructor
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A_FD_PTR_fieldList =
+ { C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSigned32, "s32"),
+ C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSigned64, "s64"),
+ C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mUnsigned32, "u32"),
+ C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mUnsigned64, "u64"),
+ C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mFloat, "fp"),
+ C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mSize, "size"),
+ C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mBlob, "blob"),
+ C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mString, "str"),
+ // C2FieldDescriptor(&((C2TestStruct_A*)(nullptr))->mYesNo, "y-n")
+ };
+
+INSTANTIATE_TEST_CASE_P(PointerConstructor, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A_FD_PTR_fieldList));
+
+// define fields using C2FieldDescriptor member-pointer constructor
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_A_FD_MEM_PTR_fieldList =
+ { C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSigned32, "s32"),
+ C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSigned64, "s64"),
+ C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned32, "u32"),
+ C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned64, "u64"),
+ C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mFloat, "fp"),
+ C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mSize, "size"),
+ C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mBlob, "blob"),
+ C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mString, "str"),
+ // C2FieldDescriptor((C2TestStruct_A*)0, &C2TestStruct_A::mYesNo, "y-n")
+ };
+
+INSTANTIATE_TEST_CASE_P(MemberPointerConstructor, C2ParamTest_ParamFieldList, ::testing::Values(C2TestStruct_A_FD_MEM_PTR_fieldList));
+
+// Test 2. define a structure with two-step helper methods
+
+struct C2TestAStruct {
+ int32_t mSigned32;
+ int64_t mSigned64[2];
+ uint32_t mUnsigned32[1];
+ uint64_t mUnsigned64;
+ float mFloat;
+ C2SizeStruct mSize[3];
+ uint8_t mBlob[100];
+ char mString[100];
+ bool mYesNo[100];
+
+private: // test access level
+ DEFINE_C2STRUCT(TestA)
+} C2_PACK;
+
+DESCRIBE_C2STRUCT(TestA, {
+ C2FIELD(mSigned32, "s32")
+ C2FIELD(mSigned64, "s64")
+ C2FIELD(mUnsigned32, "u32")
+ C2FIELD(mUnsigned64, "u64")
+ C2FIELD(mFloat, "fp")
+ C2FIELD(mSize, "size")
+ C2FIELD(mBlob, "blob")
+ C2FIELD(mString, "str")
+ // C2FIELD(mYesNo, "y-n")
+}) // ; optional
+
+INSTANTIATE_TEST_CASE_P(DescribeStruct2Step, C2ParamTest_ParamFieldList, ::testing::Values(C2TestAStruct::fieldList));
+
+// Test 3. define a structure with one-step helper method
+
+struct C2TestBStruct {
+ int32_t mSigned32;
+ int64_t mSigned64[2];
+ uint32_t mUnsigned32[1];
+ uint64_t mUnsigned64;
+ float mFloat;
+ C2SizeStruct mSize[3];
+ uint8_t mBlob[100];
+ char mString[100];
+ bool mYesNo[100];
+
+private: // test access level
+ DEFINE_AND_DESCRIBE_C2STRUCT(TestB)
+
+ C2FIELD(mSigned32, "s32")
+ C2FIELD(mSigned64, "s64")
+ C2FIELD(mUnsigned32, "u32")
+ C2FIELD(mUnsigned64, "u64")
+ C2FIELD(mFloat, "fp")
+ C2FIELD(mSize, "size")
+ C2FIELD(mBlob, "blob")
+ C2FIELD(mString, "str")
+ // C2FIELD(mYesNo, "y-n")
+};
+
+INSTANTIATE_TEST_CASE_P(DescribeStruct1Step, C2ParamTest_ParamFieldList, ::testing::Values(C2TestBStruct::fieldList));
+
+// Test 4. flexible members
+
+template<typename T>
+class C2ParamTest_FlexParamFieldList : public ::testing::Test {
+protected:
+ using Type=FD::Type;
+
+ // static std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
+ static std::vector<std::vector<const C2FieldDescriptor>>
+ GetLists();
+
+ constexpr static Type flexType =
+ std::is_same<T, int32_t>::value ? FD::INT32 :
+ std::is_same<T, int64_t>::value ? FD::INT64 :
+ std::is_same<T, uint32_t>::value ? FD::UINT32 :
+ std::is_same<T, uint64_t>::value ? FD::UINT64 :
+ std::is_same<T, float>::value ? FD::FLOAT :
+ std::is_same<T, uint8_t>::value ? FD::BLOB :
+ std::is_same<T, char>::value ? FD::STRING :
+ std::is_same<T, C2SizeStruct>::value ? C2SizeStruct::TYPE : (Type)0;
+ constexpr static size_t flexSize = sizeof(T);
+};
+
+typedef ::testing::Types<int32_t, int64_t, C2SizeStruct> FlexTypes;
+TYPED_TEST_CASE(C2ParamTest_FlexParamFieldList, FlexTypes);
+
+TYPED_TEST(C2ParamTest_FlexParamFieldList, VerifyStruct) {
+ for (auto a : this->GetLists()) {
+ std::vector<const C2FieldDescriptor> fields = a;
+ if (fields.size() > 1) {
+ EXPECT_EQ(2u, fields.size());
+ EXPECT_EQ(C2FieldDescriptor(FD::INT32, 1, "s32", 0, 4), fields[0]);
+ EXPECT_EQ(C2FieldDescriptor(this->flexType, 0, "flex", 4, this->flexSize),
+ fields[1]);
+ } else {
+ EXPECT_EQ(1u, fields.size());
+ EXPECT_EQ(C2FieldDescriptor(this->flexType, 0, "flex", 0, this->flexSize),
+ fields[0]);
+ }
+ }
+}
+
+struct C2TestStruct_FlexS32 {
+ int32_t mFlex[];
+
+ const static std::initializer_list<const C2FieldDescriptor> fieldList;
+ // enum : uint32_t { baseIndex = kParamIndexTestFlex, flexSize = 4 };
+ // typedef C2TestStruct_FlexS32 _type;
+ // typedef int32_t flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexS32::fieldList = {
+ { FD::INT32, 0, "flex", 0, 4 }
+};
+
+struct C2TestStruct_FlexEndS32 {
+ int32_t mSigned32;
+ int32_t mFlex[];
+
+ const static std::initializer_list<const C2FieldDescriptor> fieldList;
+ // enum : uint32_t { baseIndex = kParamIndexTestFlexEnd, flexSize = 4 };
+ // typedef C2TestStruct_FlexEnd _type;
+ // typedef int32_t flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS32::fieldList = {
+ { FD::INT32, 1, "s32", 0, 4 },
+ { FD::INT32, 0, "flex", 4, 4 },
+};
+
+const static std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS32_ptr_fieldList = {
+ C2FieldDescriptor(&((C2TestStruct_FlexEndS32*)0)->mSigned32, "s32"),
+ C2FieldDescriptor(&((C2TestStruct_FlexEndS32*)0)->mFlex, "flex"),
+};
+
+struct C2TestFlexS32Struct {
+ int32_t mFlexSigned32[];
+private: // test access level
+ C2TestFlexS32Struct() {}
+
+ DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexS32, mFlexSigned32)
+ C2FIELD(mFlexSigned32, "flex")
+};
+
+struct C2TestFlexEndS32Struct {
+ int32_t mSigned32;
+ int32_t mFlexSigned32[];
+private: // test access level
+ C2TestFlexEndS32Struct() {}
+
+ DEFINE_FLEX_C2STRUCT(TestFlexEndS32, mFlexSigned32)
+} C2_PACK;
+
+DESCRIBE_C2STRUCT(TestFlexEndS32, {
+ C2FIELD(mSigned32, "s32")
+ C2FIELD(mFlexSigned32, "flex")
+}) // ; optional
+
+template<>
+std::vector<std::vector<const C2FieldDescriptor>>
+//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
+C2ParamTest_FlexParamFieldList<int32_t>::GetLists() {
+ return {
+ C2TestStruct_FlexS32::fieldList,
+ C2TestStruct_FlexEndS32::fieldList,
+ C2TestStruct_FlexEndS32_ptr_fieldList,
+ C2TestFlexS32Struct::fieldList,
+ C2TestFlexEndS32Struct::fieldList,
+ };
+}
+
+struct C2TestStruct_FlexS64 {
+ int64_t mFlexSigned64[];
+
+ const static std::initializer_list<const C2FieldDescriptor> fieldList;
+ // enum : uint32_t { baseIndex = kParamIndexTestFlexS64, flexSize = 8 };
+ // typedef C2TestStruct_FlexS64 _type;
+ // typedef int64_t flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexS64::fieldList = {
+ { FD::INT64, 0, "flex", 0, 8 }
+};
+
+struct C2TestStruct_FlexEndS64 {
+ int32_t mSigned32;
+ int64_t mSigned64Flex[];
+
+ const static std::initializer_list<const C2FieldDescriptor> fieldList;
+ // enum : uint32_t { baseIndex = C2TestStruct_FlexEndS64, flexSize = 8 };
+ // typedef C2TestStruct_FlexEndS64 _type;
+ // typedef int64_t flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndS64::fieldList = {
+ { FD::INT32, 1, "s32", 0, 4 },
+ { FD::INT64, 0, "flex", 4, 8 },
+};
+
+struct C2TestFlexS64Struct {
+ int64_t mFlexSigned64[];
+ C2TestFlexS64Struct() {}
+
+ DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexS64, mFlexSigned64)
+ C2FIELD(mFlexSigned64, "flex")
+};
+
+struct C2TestFlexEndS64Struct {
+ int32_t mSigned32;
+ int64_t mFlexSigned64[];
+ C2TestFlexEndS64Struct() {}
+
+ DEFINE_FLEX_C2STRUCT(TestFlexEndS64, mFlexSigned64)
+} C2_PACK;
+
+DESCRIBE_C2STRUCT(TestFlexEndS64, {
+ C2FIELD(mSigned32, "s32")
+ C2FIELD(mFlexSigned64, "flex")
+}) // ; optional
+
+template<>
+std::vector<std::vector<const C2FieldDescriptor>>
+//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
+C2ParamTest_FlexParamFieldList<int64_t>::GetLists() {
+ return {
+ C2TestStruct_FlexS64::fieldList,
+ C2TestStruct_FlexEndS64::fieldList,
+ C2TestFlexS64Struct::fieldList,
+ C2TestFlexEndS64Struct::fieldList,
+ };
+}
+
+struct C2TestStruct_FlexSize {
+ C2SizeStruct mFlexSize[];
+
+ const static std::initializer_list<const C2FieldDescriptor> fieldList;
+ // enum : uint32_t { baseIndex = kParamIndexTestFlexSize, flexSize = 8 };
+ // typedef C2TestStruct_FlexSize _type;
+ // typedef C2SizeStruct flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexSize::fieldList = {
+ { C2SizeStruct::TYPE, 0, "flex", 0, sizeof(C2SizeStruct) }
+};
+
+struct C2TestStruct_FlexEndSize {
+ int32_t mSigned32;
+ C2SizeStruct mSizeFlex[];
+
+ const static std::initializer_list<const C2FieldDescriptor> fieldList;
+ // enum : uint32_t { baseIndex = C2TestStruct_FlexEndSize, flexSize = 8 };
+ // typedef C2TestStruct_FlexEndSize _type;
+ // typedef C2SizeStruct flexType;
+};
+
+const std::initializer_list<const C2FieldDescriptor> C2TestStruct_FlexEndSize::fieldList = {
+ { FD::INT32, 1, "s32", 0, 4 },
+ { C2SizeStruct::TYPE, 0, "flex", 4, sizeof(C2SizeStruct) },
+};
+
+struct C2TestFlexSizeStruct {
+ C2SizeStruct mFlexSize[];
+ C2TestFlexSizeStruct() {}
+
+ DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(TestFlexSize, mFlexSize)
+ C2FIELD(mFlexSize, "flex")
+};
+
+struct C2TestFlexEndSizeStruct {
+ int32_t mSigned32;
+ C2SizeStruct mFlexSize[];
+ C2TestFlexEndSizeStruct() {}
+
+ DEFINE_FLEX_C2STRUCT(TestFlexEndSize, mFlexSize)
+} C2_PACK;
+
+DESCRIBE_C2STRUCT(TestFlexEndSize, {
+ C2FIELD(mSigned32, "s32")
+ C2FIELD(mFlexSize, "flex")
+}) // ; optional
+
+template<>
+std::vector<std::vector<const C2FieldDescriptor>>
+//std::initializer_list<std::initializer_list<const C2FieldDescriptor>>
+C2ParamTest_FlexParamFieldList<C2SizeStruct>::GetLists() {
+ return {
+ C2TestStruct_FlexSize::fieldList,
+ C2TestStruct_FlexEndSize::fieldList,
+ C2TestFlexSizeStruct::fieldList,
+ C2TestFlexEndSizeStruct::fieldList,
+ };
+}
+
+TEST_F(C2ParamTest, FieldId) {
+ // pointer constructor
+ EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&((C2TestStruct_A*)0)->mSigned32));
+ EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&((C2TestStruct_A*)0)->mSigned64));
+ EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId(&((C2TestStruct_A*)0)->mUnsigned32));
+ EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId(&((C2TestStruct_A*)0)->mUnsigned64));
+ EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId(&((C2TestStruct_A*)0)->mFloat));
+ EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId(&((C2TestStruct_A*)0)->mSize));
+ EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId(&((C2TestStruct_A*)0)->mBlob));
+ EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId(&((C2TestStruct_A*)0)->mString));
+ EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId(&((C2TestStruct_A*)0)->mYesNo));
+
+ EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&((C2TestFlexEndSizeStruct*)0)->mSigned32));
+ EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&((C2TestFlexEndSizeStruct*)0)->mFlexSize));
+
+ // member pointer constructor
+ EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSigned32));
+ EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSigned64));
+ EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned32));
+ EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mUnsigned64));
+ EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mFloat));
+ EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mSize));
+ EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mBlob));
+ EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mString));
+ EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId((C2TestStruct_A*)0, &C2TestStruct_A::mYesNo));
+
+ EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId((C2TestFlexEndSizeStruct*)0, &C2TestFlexEndSizeStruct::mSigned32));
+ EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId((C2TestFlexEndSizeStruct*)0, &C2TestFlexEndSizeStruct::mFlexSize));
+
+ // member pointer sans type pointer
+ EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&C2TestStruct_A::mSigned32));
+ EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&C2TestStruct_A::mSigned64));
+ EXPECT_EQ(_C2FieldId(20, 4), _C2FieldId(&C2TestStruct_A::mUnsigned32));
+ EXPECT_EQ(_C2FieldId(24, 8), _C2FieldId(&C2TestStruct_A::mUnsigned64));
+ EXPECT_EQ(_C2FieldId(32, 4), _C2FieldId(&C2TestStruct_A::mFloat));
+ EXPECT_EQ(_C2FieldId(36, 8), _C2FieldId(&C2TestStruct_A::mSize));
+ EXPECT_EQ(_C2FieldId(60, 1), _C2FieldId(&C2TestStruct_A::mBlob));
+ EXPECT_EQ(_C2FieldId(160, 1), _C2FieldId(&C2TestStruct_A::mString));
+ EXPECT_EQ(_C2FieldId(260, 1), _C2FieldId(&C2TestStruct_A::mYesNo));
+
+ EXPECT_EQ(_C2FieldId(0, 4), _C2FieldId(&C2TestFlexEndSizeStruct::mSigned32));
+ EXPECT_EQ(_C2FieldId(4, 8), _C2FieldId(&C2TestFlexEndSizeStruct::mFlexSize));
+
+ typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
+ typedef C2GlobalParam<C2Info, C2TestFlexEndSizeStruct> C2TestFlexEndSizeInfo;
+
+ // pointer constructor in C2Param
+ EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&((C2TestAInfo*)0)->mSigned32));
+ EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&((C2TestAInfo*)0)->mSigned64));
+ EXPECT_EQ(_C2FieldId(28, 4), _C2FieldId(&((C2TestAInfo*)0)->mUnsigned32));
+ EXPECT_EQ(_C2FieldId(32, 8), _C2FieldId(&((C2TestAInfo*)0)->mUnsigned64));
+ EXPECT_EQ(_C2FieldId(40, 4), _C2FieldId(&((C2TestAInfo*)0)->mFloat));
+ EXPECT_EQ(_C2FieldId(44, 8), _C2FieldId(&((C2TestAInfo*)0)->mSize));
+ EXPECT_EQ(_C2FieldId(68, 1), _C2FieldId(&((C2TestAInfo*)0)->mBlob));
+ EXPECT_EQ(_C2FieldId(168, 1), _C2FieldId(&((C2TestAInfo*)0)->mString));
+ EXPECT_EQ(_C2FieldId(268, 1), _C2FieldId(&((C2TestAInfo*)0)->mYesNo));
+
+ EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&((C2TestFlexEndSizeInfo*)0)->m.mSigned32));
+ EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&((C2TestFlexEndSizeInfo*)0)->m.mFlexSize));
+
+ // member pointer in C2Param
+ EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSigned32));
+ EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSigned64));
+ EXPECT_EQ(_C2FieldId(28, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mUnsigned32));
+ EXPECT_EQ(_C2FieldId(32, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mUnsigned64));
+ EXPECT_EQ(_C2FieldId(40, 4), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mFloat));
+ EXPECT_EQ(_C2FieldId(44, 8), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mSize));
+ EXPECT_EQ(_C2FieldId(68, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mBlob));
+ EXPECT_EQ(_C2FieldId(168, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mString));
+ EXPECT_EQ(_C2FieldId(268, 1), _C2FieldId((C2TestAInfo*)0, &C2TestAInfo::mYesNo));
+
+ // NOTE: cannot use a member pointer for flex params due to introduction of 'm'
+ // EXPECT_EQ(_C2FieldId(8, 4), _C2FieldId(&C2TestFlexEndSizeInfo::m.mSigned32));
+ // EXPECT_EQ(_C2FieldId(12, 8), _C2FieldId(&C2TestFlexEndSizeInfo::m.mFlexSize));
+
+
+
+}
+
+struct S32 {
+ template<typename T, class B=typename std::remove_extent<T>::type>
+ inline S32(const T*) {
+ static_assert(!std::is_array<T>::value, "should not be an array");
+ static_assert(std::is_same<B, int32_t>::value, "should be int32_t");
+ }
+};
+
+struct FLX {
+ template<typename U, typename T, class B=typename std::remove_extent<T>::type>
+ inline FLX(const T*, const U*) {
+ static_assert(std::is_array<T>::value, "should be an array");
+ static_assert(std::extent<T>::value == 0, "should be an array of 0 extent");
+ static_assert(std::is_same<B, U>::value, "should be type U");
+ }
+};
+
+struct MP {
+ template<typename U, typename T, typename ExpectedU, typename UnexpectedU>
+ inline MP(T U::*, const ExpectedU*, const UnexpectedU*) {
+ static_assert(!std::is_same<U, UnexpectedU>::value, "should not be member pointer of the base type");
+ static_assert(std::is_same<U, ExpectedU>::value, "should be member pointer of the derived type");
+ }
+
+ template<typename U, typename T, typename B, typename D>
+ inline MP(T D::*, const D*) { }
+};
+
+void compiledStatic_arrayTypePropagationTest() {
+ (void)S32(&((C2TestFlexEndS32Struct *)0)->mSigned32);
+ (void)FLX(&((C2TestFlexEndS32Struct *)0)->mFlexSigned32, (int32_t*)0);
+ (void)FLX(&((C2TestFlexS32Struct *)0)->mFlexSigned32, (int32_t*)0);
+
+ typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
+
+ // TRICKY: &derivedClass::baseMember has type of baseClass::*
+ static_assert(std::is_same<decltype(&C2TestAInfo::mSigned32), int32_t C2TestAStruct::*>::value,
+ "base member pointer should have base class in type");
+
+ // therefore, member pointer expands to baseClass::* in templates
+ (void)MP(&C2TestAInfo::mSigned32,
+ (C2TestAStruct*)0 /* expected */, (C2TestAInfo*)0 /* unexpected */);
+ // but can be cast to derivedClass::*
+ (void)MP((int32_t C2TestAInfo::*)&C2TestAInfo::mSigned32,
+ (C2TestAInfo*)0 /* expected */, (C2TestAStruct*)0 /* unexpected */);
+
+ // TRICKY: baseClass::* does not autoconvert to derivedClass::* even in templates
+ // (void)MP(&C2TestAInfo::mSigned32, (C2TestAInfo*)0);
+}
+
+TEST_F(C2ParamTest, MemberPointerCast) {
+ typedef C2GlobalParam<C2Info, C2TestAStruct> C2TestAInfo;
+
+ static_assert(offsetof(C2TestAInfo, mSigned32) == 8, "offset should be 8");
+ constexpr int32_t C2TestAStruct::* s32ptr = &C2TestAInfo::mSigned32;
+ constexpr int32_t C2TestAInfo::* s32ptr_derived = (int32_t C2TestAStruct::*)&C2TestAInfo::mSigned32;
+ constexpr int32_t C2TestAInfo::* s32ptr_cast2derived = (int32_t C2TestAInfo::*)s32ptr;
+ C2TestAInfo *info = (C2TestAInfo *)256;
+ C2TestAStruct *strukt = (C2TestAStruct *)info;
+ int32_t *info_s32_derived = &(info->*s32ptr_derived);
+ int32_t *info_s32_cast2derived = &(info->*s32ptr_cast2derived);
+ int32_t *info_s32 = &(info->*s32ptr);
+ int32_t *strukt_s32 = &(strukt->*s32ptr);
+
+ EXPECT_EQ(256u, (uintptr_t)info);
+ EXPECT_EQ(264u, (uintptr_t)strukt);
+ EXPECT_EQ(264u, (uintptr_t)info_s32_derived);
+ EXPECT_EQ(264u, (uintptr_t)info_s32_cast2derived);
+ EXPECT_EQ(264u, (uintptr_t)info_s32);
+ EXPECT_EQ(264u, (uintptr_t)strukt_s32);
+
+ typedef C2GlobalParam<C2Info, C2TestFlexEndSizeStruct> C2TestFlexEndSizeInfo;
+ static_assert(offsetof(C2TestFlexEndSizeInfo, m.mSigned32) == 8, "offset should be 8");
+ static_assert(offsetof(C2TestFlexEndSizeInfo, m.mFlexSize) == 12, "offset should be 12");
+}
+
+/* ===================================== PARAM USAGE TESTS ===================================== */
+
+struct C2NumberStruct {
+ int32_t mNumber;
+ C2NumberStruct() {}
+ C2NumberStruct(int32_t _number) : mNumber(_number) {}
+
+ DEFINE_AND_DESCRIBE_C2STRUCT(Number)
+ C2FIELD(mNumber, "number")
+};
+
+struct C2NumbersStruct {
+ int32_t mNumbers[];
+ C2NumbersStruct() {}
+
+ DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(Numbers, mNumbers)
+ C2FIELD(mNumbers, "numbers")
+};
+static_assert(sizeof(C2NumbersStruct) == 0, "C2NumbersStruct has incorrect size");
+
+typedef C2GlobalParam<C2Tuning, C2NumberStruct> C2NumberTuning;
+typedef C2PortParam<C2Tuning, C2NumberStruct> C2NumberPortTuning;
+typedef C2StreamParam<C2Tuning, C2NumberStruct> C2NumberStreamTuning;
+
+typedef C2GlobalParam<C2Tuning, C2NumbersStruct> C2NumbersTuning;
+typedef C2PortParam<C2Tuning, C2NumbersStruct> C2NumbersPortTuning;
+typedef C2StreamParam<C2Tuning, C2NumbersStruct> C2NumbersStreamTuning;
+
+//
+#if 0
+
+void test() {
+ C2NumberStruct s(10);
+ (void)C2NumberStruct::fieldList;
+};
+
+typedef C2StreamParam<C2Tuning, C2Int64Value, kParamIndexNumberB> C2NumberConfig4;
+typedef C2PortParam<C2Tuning, C2Int32Value, kParamIndexNumber> C2NumberConfig3;
+typedef C2GlobalParam<C2Tuning, C2StringValue, kParamIndexNumber> C2VideoNameConfig;
+
+void test3() {
+ C2NumberConfig3 s(10);
+ s.mValue = 11;
+ s = 12;
+ (void)C2NumberConfig3::fieldList;
+ std::shared_ptr<C2VideoNameConfig> n = C2VideoNameConfig::alloc_shared(25);
+ strcpy(n->m.mValue, "lajos");
+ C2NumberConfig4 t(false, 0, 11);
+ t.mValue = 15;
+};
+
+struct C2NumbersStruct {
+ int32_t mNumbers[];
+ enum { baseIndex = kParamIndexNumber };
+ const static std::initializer_list<const C2FieldDescriptor> fieldList;
+ C2NumbersStruct() {}
+
+ FLEX(C2NumbersStruct, mNumbers);
+};
+
+static_assert(sizeof(C2NumbersStruct) == 0, "yes");
+
+
+typedef C2GlobalParam<C2Info, C2NumbersStruct> C2NumbersInfo;
+
+const std::initializer_list<const C2FieldDescriptor> C2NumbersStruct::fieldList =
+// { { FD::INT32, 0, "widths" } };
+ { C2FieldDescriptor(&((C2NumbersStruct*)(nullptr))->mNumbers, "number") };
+
+typedef C2PortParam<C2Tuning, C2NumberStruct> C2NumberConfig;
+
+std::list<const C2FieldDescriptor> myList = C2NumberConfig::fieldList;
+
+ std::unique_ptr<android::C2ParamDescriptor> __test_describe(uint32_t paramType) {
+ std::list<const C2FieldDescriptor> fields = describeC2Params<C2NumberConfig>();
+
+ auto widths = C2NumbersInfo::alloc_shared(5);
+ widths->flexCount();
+ widths->m.mNumbers[4] = 1;
+
+ test();
+ test3();
+
+ C2NumberConfig outputWidth(false, 123);
+
+ C2Param::Index index(paramType);
+ switch (paramType) {
+ case C2NumberConfig::baseIndex:
+ return std::unique_ptr<C2ParamDescriptor>(new C2ParamDescriptor{
+ true /* isRequired */,
+ "number",
+ index,
+ });
+ }
+ return nullptr;
+ }
+
+
+} // namespace android
+
+#endif
+//
+
+template<typename T>
+bool canSetPort(T &o, bool output) { return o.setPort(output); }
+bool canSetPort(...) { return false; }
+
+template<typename S, typename=decltype(((S*)0)->setPort(true))>
+static std::true_type _canCallSetPort(int);
+template<typename>
+static std::false_type _canCallSetPort(...);
+#define canCallSetPort(x) decltype(_canCallSetPort<std::remove_reference<decltype(x)>::type>(0))::value
+
+/* ======================================= STATIC TESTS ======================================= */
+
+static_assert(_C2Comparable<int>::value, "int is not comparable");
+static_assert(!_C2Comparable<void>::value, "void is comparable");
+
+struct C2_HIDE _test0 {
+ bool operator==(const _test0&);
+ bool operator!=(const _test0&);
+};
+struct C2_HIDE _test1 {
+ bool operator==(const _test1&);
+};
+struct C2_HIDE _test2 {
+ bool operator!=(const _test2&);
+};
+static_assert(_C2Comparable<_test0>::value, "class with == and != is not comparable");
+static_assert(_C2Comparable<_test1>::value, "class with == is not comparable");
+static_assert(_C2Comparable<_test2>::value, "class with != is not comparable");
+
+/* ======================================= C2PARAM TESTS ======================================= */
+
+struct _C2ParamInspector {
+ static void StaticTest();
+ static void StaticFlexTest();
+};
+
+// TEST_F(_C2ParamInspector, StaticTest) {
+void _C2ParamInspector::StaticTest() {
+ typedef C2Param::Index I;
+
+ // C2NumberStruct: baseIndex = kIndex (args)
+ static_assert(C2NumberStruct::baseIndex == kParamIndexNumber, "bad index");
+ static_assert(sizeof(C2NumberStruct) == 4, "bad size");
+
+ // C2NumberTuning: kIndex | tun | global (args)
+ static_assert(C2NumberTuning::baseIndex == kParamIndexNumber, "bad index");
+ static_assert(C2NumberTuning::typeIndex == (kParamIndexNumber | I::kTypeTuning | I::kDirGlobal), "bad index");
+ static_assert(sizeof(C2NumberTuning) == 12, "bad size");
+
+ static_assert(offsetof(C2NumberTuning, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumberTuning, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumberTuning, mNumber) == 8, "bad offset");
+
+ // C2NumberPortTuning: kIndex | tun | port (bool, args)
+ static_assert(sizeof(C2NumberPortTuning) == 12, "bad size");
+ // C2NumberPortTuning::input: kIndex | tun | port | input (args)
+ // C2NumberPortTuning::output: kIndex | tun | port | output (args)
+ static_assert(C2NumberPortTuning::input::baseIndex ==
+ kParamIndexNumber, "bad index");
+ static_assert(C2NumberPortTuning::input::typeIndex ==
+ (kParamIndexNumber | I::kTypeTuning | I::kDirInput), "bad index");
+ static_assert(C2NumberPortTuning::output::baseIndex ==
+ kParamIndexNumber, "bad index");
+ static_assert(C2NumberPortTuning::output::typeIndex ==
+ (kParamIndexNumber | I::kTypeTuning | I::kDirOutput), "bad index");
+ static_assert(sizeof(C2NumberPortTuning::input) == 12, "bad size");
+ static_assert(sizeof(C2NumberPortTuning::output) == 12, "bad size");
+ static_assert(offsetof(C2NumberPortTuning::input, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumberPortTuning::input, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumberPortTuning::input, mNumber) == 8, "bad offset");
+ static_assert(offsetof(C2NumberPortTuning::output, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumberPortTuning::output, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumberPortTuning::output, mNumber) == 8, "bad offset");
+
+ // C2NumberStreamTuning: kIndex | tun | str (bool, uint, args)
+ static_assert(sizeof(C2NumberStreamTuning) == 12u, "bad size");
+ // C2NumberStreamTuning::input kIndex | tun | str | input (int, args)
+ // C2NumberStreamTuning::output kIx | tun | str | output (int, args)
+ static_assert(C2NumberStreamTuning::input::baseIndex ==
+ kParamIndexNumber, "bad index");
+ static_assert(C2NumberStreamTuning::input::typeIndex ==
+ (kParamIndexNumber | I::kTypeTuning | I::kDirInput | I::kStreamFlag), "bad index");
+ static_assert(C2NumberStreamTuning::output::baseIndex ==
+ kParamIndexNumber, "bad index");
+ static_assert(C2NumberStreamTuning::output::typeIndex ==
+ (kParamIndexNumber | I::kTypeTuning | I::kDirOutput | I::kStreamFlag), "bad index");
+ static_assert(sizeof(C2NumberStreamTuning::input) == 12u, "bad size");
+ static_assert(sizeof(C2NumberStreamTuning::output) == 12u, "bad size");
+ static_assert(offsetof(C2NumberStreamTuning::input, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumberStreamTuning::input, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumberStreamTuning::input, mNumber) == 8, "bad offset");
+ static_assert(offsetof(C2NumberStreamTuning::output, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumberStreamTuning::output, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumberStreamTuning::output, mNumber) == 8, "bad offset");
+}
+
+void _C2ParamInspector::StaticFlexTest() {
+ typedef C2Param::Index I;
+
+ // C2NumbersStruct: baseIndex = kIndex (args)
+ static_assert(C2NumbersStruct::baseIndex == (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+ static_assert(sizeof(C2NumbersStruct) == 0, "bad size");
+
+ // C2NumbersTuning: kIndex | tun | global (args)
+ static_assert(C2NumbersTuning::baseIndex == (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+ static_assert(C2NumbersTuning::typeIndex == (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirGlobal), "bad index");
+ static_assert(sizeof(C2NumbersTuning) == 8, "bad size");
+
+ static_assert(offsetof(C2NumbersTuning, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumbersTuning, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumbersTuning, m.mNumbers) == 8, "bad offset");
+
+ // C2NumbersPortTuning: kIndex | tun | port (bool, args)
+ static_assert(sizeof(C2NumbersPortTuning) == 8, "bad size");
+ // C2NumbersPortTuning::input: kIndex | tun | port | input (args)
+ // C2NumbersPortTuning::output: kIndex | tun | port | output (args)
+ static_assert(C2NumbersPortTuning::input::baseIndex ==
+ (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+ static_assert(C2NumbersPortTuning::input::typeIndex ==
+ (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirInput), "bad index");
+ static_assert(C2NumbersPortTuning::output::baseIndex ==
+ (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+ static_assert(C2NumbersPortTuning::output::typeIndex ==
+ (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirOutput), "bad index");
+ static_assert(sizeof(C2NumbersPortTuning::input) == 8, "bad size");
+ static_assert(sizeof(C2NumbersPortTuning::output) == 8, "bad size");
+ static_assert(offsetof(C2NumbersPortTuning::input, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumbersPortTuning::input, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumbersPortTuning::input, m.mNumbers) == 8, "bad offset");
+ static_assert(offsetof(C2NumbersPortTuning::output, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumbersPortTuning::output, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumbersPortTuning::output, m.mNumbers) == 8, "bad offset");
+
+ // C2NumbersStreamTuning: kIndex | tun | str (bool, uint, args)
+ static_assert(sizeof(C2NumbersStreamTuning) == 8, "bad size");
+ // C2NumbersStreamTuning::input kIndex | tun | str | input (int, args)
+ // C2NumbersStreamTuning::output kIx | tun | str | output (int, args)
+ static_assert(C2NumbersStreamTuning::input::baseIndex ==
+ (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+ static_assert(C2NumbersStreamTuning::input::typeIndex ==
+ (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirInput | I::kStreamFlag), "bad index");
+ static_assert(C2NumbersStreamTuning::output::baseIndex ==
+ (I::kFlexibleFlag | kParamIndexNumbers), "bad index");
+ static_assert(C2NumbersStreamTuning::output::typeIndex ==
+ (I::kFlexibleFlag | kParamIndexNumbers | I::kTypeTuning | I::kDirOutput | I::kStreamFlag), "bad index");
+ static_assert(sizeof(C2NumbersStreamTuning::input) == 8, "bad size");
+ static_assert(sizeof(C2NumbersStreamTuning::output) == 8, "bad size");
+ static_assert(offsetof(C2NumbersStreamTuning::input, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumbersStreamTuning::input, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumbersStreamTuning::input, m.mNumbers) == 8, "bad offset");
+ static_assert(offsetof(C2NumbersStreamTuning::output, _mSize) == 0, "bad size");
+ static_assert(offsetof(C2NumbersStreamTuning::output, _mIndex) == 4, "bad offset");
+ static_assert(offsetof(C2NumbersStreamTuning::output, m.mNumbers) == 8, "bad offset");
+}
+
+TEST_F(C2ParamTest, ParamOpsTest) {
+ const C2NumberStruct str(100);
+ C2NumberStruct bstr;
+
+ {
+ EXPECT_EQ(100, str.mNumber);
+ bstr.mNumber = 100;
+
+ C2Param::BaseIndex index = C2NumberStruct::baseIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_FALSE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+ }
+
+ const C2NumberTuning tun(100);
+ C2NumberTuning btun;
+
+ {
+ // flags & invariables
+ for (const auto &p : { tun, btun }) {
+ EXPECT_TRUE((bool)p);
+ EXPECT_FALSE(!p);
+ EXPECT_EQ(12u, p.size());
+
+ EXPECT_FALSE(p.isVendor());
+ EXPECT_FALSE(p.isFlexible());
+ EXPECT_TRUE(p.isGlobal());
+ EXPECT_FALSE(p.forInput());
+ EXPECT_FALSE(p.forOutput());
+ EXPECT_FALSE(p.forStream());
+ EXPECT_FALSE(p.forPort());
+ }
+
+ // value
+ EXPECT_EQ(100, tun.mNumber);
+ EXPECT_EQ(0, btun.mNumber);
+ EXPECT_FALSE(tun == btun);
+ EXPECT_FALSE(tun.operator==(btun));
+ EXPECT_TRUE(tun != btun);
+ EXPECT_TRUE(tun.operator!=(btun));
+ btun.mNumber = 100;
+ EXPECT_EQ(tun, btun);
+
+ // index
+ EXPECT_EQ(C2Param::Type(tun.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(tun.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(tun.type(), C2NumberTuning::typeIndex);
+ EXPECT_EQ(tun.stream(), ~0u);
+
+ C2Param::BaseIndex index = C2NumberTuning::baseIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_FALSE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+ C2Param::Type type = C2NumberTuning::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_FALSE(type.isFlexible());
+ EXPECT_TRUE(type.isGlobal());
+ EXPECT_FALSE(type.forInput());
+ EXPECT_FALSE(type.forOutput());
+ EXPECT_FALSE(type.forStream());
+ EXPECT_FALSE(type.forPort());
+
+ EXPECT_EQ(C2NumberTuning::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&tun), &tun);
+ EXPECT_EQ(C2NumberPortTuning::From(&tun), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&tun), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&tun), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::From(&tun), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&tun), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&tun), nullptr);
+ }
+
+ const C2NumberPortTuning outp1(true, 100), inp1(false, 100);
+ C2NumberPortTuning boutp1, binp1, binp3(false, 100);
+ const C2NumberPortTuning::input inp2(100);
+ C2NumberPortTuning::input binp2;
+ const C2NumberPortTuning::output outp2(100);
+ C2NumberPortTuning::output boutp2;
+
+ {
+ static_assert(canCallSetPort(binp3), "should be able to");
+ static_assert(canCallSetPort(binp1), "should be able to");
+ static_assert(!canCallSetPort(inp1), "should not be able to (const)");
+ static_assert(!canCallSetPort(inp2), "should not be able to (const & type)");
+ static_assert(!canCallSetPort(binp2), "should not be able to (type)");
+
+ // flags & invariables
+ for (const auto &p : { outp1, inp1, boutp1 }) {
+ EXPECT_EQ(12u, p.size());
+ EXPECT_FALSE(p.isVendor());
+ EXPECT_FALSE(p.isFlexible());
+ EXPECT_FALSE(p.isGlobal());
+ EXPECT_FALSE(p.forStream());
+ EXPECT_TRUE(p.forPort());
+ }
+ for (const auto &p : { inp2, binp2 }) {
+ EXPECT_EQ(12u, p.size());
+ EXPECT_FALSE(p.isVendor());
+ EXPECT_FALSE(p.isFlexible());
+ EXPECT_FALSE(p.isGlobal());
+ EXPECT_FALSE(p.forStream());
+ EXPECT_TRUE(p.forPort());
+ }
+ for (const auto &p : { outp2, boutp2 }) {
+ EXPECT_EQ(12u, p.size());
+ EXPECT_FALSE(p.isVendor());
+ EXPECT_FALSE(p.isFlexible());
+ EXPECT_FALSE(p.isGlobal());
+ EXPECT_FALSE(p.forStream());
+ EXPECT_TRUE(p.forPort());
+ }
+
+ // port specific flags & invariables
+ EXPECT_FALSE(outp1.forInput());
+ EXPECT_TRUE(outp1.forOutput());
+
+ EXPECT_TRUE(inp1.forInput());
+ EXPECT_FALSE(inp1.forOutput());
+
+ for (const auto &p : { outp1, inp1 }) {
+ EXPECT_TRUE((bool)p);
+ EXPECT_FALSE(!p);
+ EXPECT_EQ(100, p.mNumber);
+ }
+ for (const auto &p : { outp2, boutp2 }) {
+ EXPECT_TRUE((bool)p);
+ EXPECT_FALSE(!p);
+
+ EXPECT_FALSE(p.forInput());
+ EXPECT_TRUE(p.forOutput());
+ }
+ for (const auto &p : { inp2, binp2 }) {
+ EXPECT_TRUE((bool)p);
+ EXPECT_FALSE(!p);
+
+ EXPECT_TRUE(p.forInput());
+ EXPECT_FALSE(p.forOutput());
+ }
+ for (const auto &p : { boutp1 } ) {
+ EXPECT_FALSE((bool)p);
+ EXPECT_TRUE(!p);
+
+ EXPECT_FALSE(p.forInput());
+ EXPECT_FALSE(p.forOutput());
+ EXPECT_EQ(0, p.mNumber);
+ }
+
+ // values
+ EXPECT_EQ(100, inp2.mNumber);
+ EXPECT_EQ(100, outp2.mNumber);
+ EXPECT_EQ(0, binp1.mNumber);
+ EXPECT_EQ(0, binp2.mNumber);
+ EXPECT_EQ(0, boutp1.mNumber);
+ EXPECT_EQ(0, boutp2.mNumber);
+
+ EXPECT_TRUE(inp1 != outp1);
+ EXPECT_TRUE(inp1 == inp2);
+ EXPECT_TRUE(outp1 == outp2);
+ EXPECT_TRUE(binp1 == boutp1);
+ EXPECT_TRUE(binp2 != boutp2);
+
+ EXPECT_TRUE(inp1 != binp1);
+ binp1.mNumber = 100;
+ EXPECT_TRUE(inp1 != binp1);
+ binp1.setPort(false /* output */);
+ EXPECT_TRUE((bool)binp1);
+ EXPECT_FALSE(!binp1);
+ EXPECT_TRUE(inp1 == binp1);
+
+ EXPECT_TRUE(inp2 != binp2);
+ binp2.mNumber = 100;
+ EXPECT_TRUE(inp2 == binp2);
+
+ binp1.setPort(true /* output */);
+ EXPECT_TRUE(outp1 == binp1);
+
+ EXPECT_TRUE(outp1 != boutp1);
+ boutp1.mNumber = 100;
+ EXPECT_TRUE(outp1 != boutp1);
+ boutp1.setPort(true /* output */);
+ EXPECT_TRUE((bool)boutp1);
+ EXPECT_FALSE(!boutp1);
+ EXPECT_TRUE(outp1 == boutp1);
+
+ EXPECT_TRUE(outp2 != boutp2);
+ boutp2.mNumber = 100;
+ EXPECT_TRUE(outp2 == boutp2);
+
+ boutp1.setPort(false /* output */);
+ EXPECT_TRUE(inp1 == boutp1);
+
+ // index
+ EXPECT_EQ(C2Param::Type(inp1.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(inp1.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(inp1.type(), C2NumberPortTuning::input::typeIndex);
+ EXPECT_EQ(inp1.stream(), ~0u);
+
+ EXPECT_EQ(C2Param::Type(inp2.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(inp2.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(inp2.type(), C2NumberPortTuning::input::typeIndex);
+ EXPECT_EQ(inp2.stream(), ~0u);
+
+ EXPECT_EQ(C2Param::Type(outp1.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(outp1.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(outp1.type(), C2NumberPortTuning::output::typeIndex);
+ EXPECT_EQ(outp1.stream(), ~0u);
+
+ EXPECT_EQ(C2Param::Type(outp2.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(outp2.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(outp2.type(), C2NumberPortTuning::output::typeIndex);
+ EXPECT_EQ(outp2.stream(), ~0u);
+
+ C2Param::BaseIndex index = C2NumberPortTuning::input::typeIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_FALSE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+ index = C2NumberPortTuning::output::typeIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_FALSE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+ C2Param::Type type = C2NumberPortTuning::input::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_FALSE(type.isFlexible());
+ EXPECT_FALSE(type.isGlobal());
+ EXPECT_TRUE(type.forInput());
+ EXPECT_FALSE(type.forOutput());
+ EXPECT_FALSE(type.forStream());
+ EXPECT_TRUE(type.forPort());
+
+ type = C2NumberPortTuning::output::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_FALSE(type.isFlexible());
+ EXPECT_FALSE(type.isGlobal());
+ EXPECT_FALSE(type.forInput());
+ EXPECT_TRUE(type.forOutput());
+ EXPECT_FALSE(type.forStream());
+ EXPECT_TRUE(type.forPort());
+
+ EXPECT_EQ(C2NumberPortTuning::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::input::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&inp1), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&inp2), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&outp1), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&outp2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::From(&inp1), &inp1);
+ EXPECT_EQ(C2NumberPortTuning::From(&inp2), (C2NumberPortTuning*)&inp2);
+ EXPECT_EQ(C2NumberPortTuning::From(&outp1), &outp1);
+ EXPECT_EQ(C2NumberPortTuning::From(&outp2), (C2NumberPortTuning*)&outp2);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&inp1), (C2NumberPortTuning::input*)&inp1);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&inp2), &inp2);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&outp1), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&outp2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&inp1), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&inp2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&outp1), (C2NumberPortTuning::output*)&outp1);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&outp2), &outp2);
+ EXPECT_EQ(C2NumberStreamTuning::From(&inp1), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::From(&inp2), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::From(&outp1), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::From(&outp2), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&inp1), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&inp2), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&outp1), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&outp2), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&inp1), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&inp2), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&outp1), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&outp2), nullptr);
+ }
+
+ const C2NumberStreamTuning outs1(true, 1u, 100), ins1(false, 1u, 100);
+ C2NumberStreamTuning bouts1, bins1, bins3(false, 1u, 100);
+ const C2NumberStreamTuning::input ins2(1u, 100);
+ C2NumberStreamTuning::input bins2;
+ const C2NumberStreamTuning::output outs2(1u, 100);
+ C2NumberStreamTuning::output bouts2;
+
+ {
+ static_assert(canCallSetPort(bins3), "should be able to");
+ static_assert(canCallSetPort(bins1), "should be able to");
+ static_assert(!canCallSetPort(ins1), "should not be able to (const)");
+ static_assert(!canCallSetPort(ins2), "should not be able to (const & type)");
+ static_assert(!canCallSetPort(bins2), "should not be able to (type)");
+
+ // flags & invariables
+ for (const auto &p : { outs1, ins1, bouts1 }) {
+ EXPECT_EQ(12u, p.size());
+ EXPECT_FALSE(p.isVendor());
+ EXPECT_FALSE(p.isFlexible());
+ EXPECT_FALSE(p.isGlobal());
+ EXPECT_TRUE(p.forStream());
+ EXPECT_FALSE(p.forPort());
+ }
+ for (const auto &p : { ins2, bins2 }) {
+ EXPECT_EQ(12u, p.size());
+ EXPECT_FALSE(p.isVendor());
+ EXPECT_FALSE(p.isFlexible());
+ EXPECT_FALSE(p.isGlobal());
+ EXPECT_TRUE(p.forStream());
+ EXPECT_FALSE(p.forPort());
+ }
+ for (const auto &p : { outs2, bouts2 }) {
+ EXPECT_EQ(12u, p.size());
+ EXPECT_FALSE(p.isVendor());
+ EXPECT_FALSE(p.isFlexible());
+ EXPECT_FALSE(p.isGlobal());
+ EXPECT_TRUE(p.forStream());
+ EXPECT_FALSE(p.forPort());
+ }
+
+ // port specific flags & invariables
+ EXPECT_FALSE(outs1.forInput());
+ EXPECT_TRUE(outs1.forOutput());
+
+ EXPECT_TRUE(ins1.forInput());
+ EXPECT_FALSE(ins1.forOutput());
+
+ for (const auto &p : { outs1, ins1 }) {
+ EXPECT_TRUE((bool)p);
+ EXPECT_FALSE(!p);
+ EXPECT_EQ(100, p.mNumber);
+ EXPECT_EQ(1u, p.stream());
+ }
+ for (const auto &p : { outs2, bouts2 }) {
+ EXPECT_TRUE((bool)p);
+ EXPECT_FALSE(!p);
+
+ EXPECT_FALSE(p.forInput());
+ EXPECT_TRUE(p.forOutput());
+ }
+ for (const auto &p : { ins2, bins2 }) {
+ EXPECT_TRUE((bool)p);
+ EXPECT_FALSE(!p);
+
+ EXPECT_TRUE(p.forInput());
+ EXPECT_FALSE(p.forOutput());
+ }
+ for (const auto &p : { bouts1 } ) {
+ EXPECT_FALSE((bool)p);
+ EXPECT_TRUE(!p);
+
+ EXPECT_FALSE(p.forInput());
+ EXPECT_FALSE(p.forOutput());
+ EXPECT_EQ(0, p.mNumber);
+ }
+
+ // values
+ EXPECT_EQ(100, ins2.mNumber);
+ EXPECT_EQ(100, outs2.mNumber);
+ EXPECT_EQ(0, bins1.mNumber);
+ EXPECT_EQ(0, bins2.mNumber);
+ EXPECT_EQ(0, bouts1.mNumber);
+ EXPECT_EQ(0, bouts2.mNumber);
+
+ EXPECT_EQ(1u, ins2.stream());
+ EXPECT_EQ(1u, outs2.stream());
+ EXPECT_EQ(0u, bins1.stream());
+ EXPECT_EQ(0u, bins2.stream());
+ EXPECT_EQ(0u, bouts1.stream());
+ EXPECT_EQ(0u, bouts2.stream());
+
+ EXPECT_TRUE(ins1 != outs1);
+ EXPECT_TRUE(ins1 == ins2);
+ EXPECT_TRUE(outs1 == outs2);
+ EXPECT_TRUE(bins1 == bouts1);
+ EXPECT_TRUE(bins2 != bouts2);
+
+ EXPECT_TRUE(ins1 != bins1);
+ bins1.mNumber = 100;
+ EXPECT_TRUE(ins1 != bins1);
+ bins1.setPort(false /* output */);
+ EXPECT_TRUE(ins1 != bins1);
+ bins1.setStream(1u);
+ EXPECT_TRUE(ins1 == bins1);
+
+ EXPECT_TRUE(ins2 != bins2);
+ bins2.mNumber = 100;
+ EXPECT_TRUE(ins2 != bins2);
+ bins2.setStream(1u);
+ EXPECT_TRUE(ins2 == bins2);
+
+ bins1.setPort(true /* output */);
+ EXPECT_TRUE(outs1 == bins1);
+
+ EXPECT_TRUE(outs1 != bouts1);
+ bouts1.mNumber = 100;
+ EXPECT_TRUE(outs1 != bouts1);
+ bouts1.setPort(true /* output */);
+ EXPECT_TRUE(outs1 != bouts1);
+ bouts1.setStream(1u);
+ EXPECT_TRUE(outs1 == bouts1);
+
+ EXPECT_TRUE(outs2 != bouts2);
+ bouts2.mNumber = 100;
+ EXPECT_TRUE(outs2 != bouts2);
+ bouts2.setStream(1u);
+ EXPECT_TRUE(outs2 == bouts2);
+
+ bouts1.setPort(false /* output */);
+ EXPECT_TRUE(ins1 == bouts1);
+
+ // index
+ EXPECT_EQ(C2Param::Type(ins1.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(ins1.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(ins1.type(), C2NumberStreamTuning::input::typeIndex);
+
+ EXPECT_EQ(C2Param::Type(ins2.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(ins2.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(ins2.type(), C2NumberStreamTuning::input::typeIndex);
+
+ EXPECT_EQ(C2Param::Type(outs1.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(outs1.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(outs1.type(), C2NumberStreamTuning::output::typeIndex);
+
+ EXPECT_EQ(C2Param::Type(outs2.type()).baseIndex(), C2NumberStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(outs2.type()).paramIndex(), kParamIndexNumber);
+ EXPECT_EQ(outs2.type(), C2NumberStreamTuning::output::typeIndex);
+
+ C2Param::BaseIndex index = C2NumberStreamTuning::input::typeIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_FALSE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+ index = C2NumberStreamTuning::output::typeIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_FALSE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumber);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumber);
+
+ C2Param::Type type = C2NumberStreamTuning::input::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_FALSE(type.isFlexible());
+ EXPECT_FALSE(type.isGlobal());
+ EXPECT_TRUE(type.forInput());
+ EXPECT_FALSE(type.forOutput());
+ EXPECT_TRUE(type.forStream());
+ EXPECT_FALSE(type.forPort());
+
+ type = C2NumberStreamTuning::output::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_FALSE(type.isFlexible());
+ EXPECT_FALSE(type.isGlobal());
+ EXPECT_FALSE(type.forInput());
+ EXPECT_TRUE(type.forOutput());
+ EXPECT_TRUE(type.forStream());
+ EXPECT_FALSE(type.forPort());
+
+ EXPECT_EQ(C2NumberPortTuning::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::input::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&ins1), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&ins2), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&outs1), nullptr);
+ EXPECT_EQ(C2NumberTuning::From(&outs2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::From(&ins1), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::From(&ins2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::From(&outs1), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::From(&outs2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&ins1), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&ins2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&outs1), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::input::From(&outs2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&ins1), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&ins2), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&outs1), nullptr);
+ EXPECT_EQ(C2NumberPortTuning::output::From(&outs2), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::From(&ins1), &ins1);
+ EXPECT_EQ(C2NumberStreamTuning::From(&ins2), (C2NumberStreamTuning*)&ins2);
+ EXPECT_EQ(C2NumberStreamTuning::From(&outs1), &outs1);
+ EXPECT_EQ(C2NumberStreamTuning::From(&outs2), (C2NumberStreamTuning*)&outs2);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&ins1), (C2NumberStreamTuning::input*)&ins1);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&ins2), &ins2);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&outs1), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::input::From(&outs2), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&ins1), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&ins2), nullptr);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&outs1), (C2NumberStreamTuning::output*)&outs1);
+ EXPECT_EQ(C2NumberStreamTuning::output::From(&outs2), &outs2);
+
+ }
+
+ {
+ uint32_t videoWidth[] = { 12u, C2NumberStreamTuning::output::typeIndex, 100 };
+ C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
+ EXPECT_NE(p1, nullptr);
+ EXPECT_EQ(12u, p1->size());
+ EXPECT_EQ(p1->type(), C2NumberStreamTuning::output::typeIndex);
+
+ p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
+ EXPECT_EQ(p1, nullptr);
+
+ p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
+ EXPECT_EQ(p1, nullptr);
+
+ p1 = C2Param::From(videoWidth, 3);
+ EXPECT_EQ(p1, nullptr);
+
+ p1 = C2Param::From(videoWidth, 0);
+ EXPECT_EQ(p1, nullptr);
+ }
+}
+
+void StaticTestAddBaseIndex() {
+ struct nobase {};
+ struct base { enum : uint32_t { baseIndex = 1 }; };
+ static_assert(C2AddBaseIndex<nobase, 2>::baseIndex == 2, "should be 2");
+ static_assert(C2AddBaseIndex<base, 1>::baseIndex == 1, "should be 1");
+}
+
+class TestFlexHelper {
+ struct _Flex {
+ int32_t a;
+ char b[];
+ _Flex() {}
+ FLEX(_Flex, b);
+ };
+
+ struct _BoFlex {
+ _Flex a;
+ _BoFlex() {}
+ FLEX(_BoFlex, a);
+ };
+
+ struct _NonFlex {
+ };
+
+
+ static void StaticTest() {
+ static_assert(std::is_same<_C2FlexHelper<char>::flexType, void>::value, "should be void");
+ static_assert(std::is_same<_C2FlexHelper<char[]>::flexType, char>::value, "should be char");
+ static_assert(std::is_same<_C2FlexHelper<_Flex>::flexType, char>::value, "should be char");
+
+ static_assert(std::is_same<_C2FlexHelper<_BoFlex>::flexType, char>::value, "should be void");
+
+ static_assert(_C2Flexible<_Flex>::value, "should be flexible");
+ static_assert(!_C2Flexible<_NonFlex>::value, "should not be flexible");
+ }
+};
+
+TEST_F(C2ParamTest, FlexParamOpsTest) {
+// const C2NumbersStruct str{100};
+ C2NumbersStruct bstr;
+ {
+// EXPECT_EQ(100, str->m.mNumbers[0]);
+ (void)&bstr.mNumbers[0];
+
+ C2Param::BaseIndex index = C2NumbersStruct::baseIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_TRUE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+ }
+
+ std::unique_ptr<C2NumbersTuning> tun_ = C2NumbersTuning::alloc_unique(1);
+ tun_->m.mNumbers[0] = 100;
+ std::unique_ptr<const C2NumbersTuning> tun = std::move(tun_);
+ std::shared_ptr<C2NumbersTuning> btun = C2NumbersTuning::alloc_shared(1);
+
+ {
+ // flags & invariables
+ const C2NumbersTuning *T[] = { tun.get(), btun.get() };
+ for (const auto p : T) {
+ EXPECT_TRUE((bool)(*p));
+ EXPECT_FALSE(!(*p));
+ EXPECT_EQ(12u, p->size());
+
+ EXPECT_FALSE(p->isVendor());
+ EXPECT_TRUE(p->isFlexible());
+ EXPECT_TRUE(p->isGlobal());
+ EXPECT_FALSE(p->forInput());
+ EXPECT_FALSE(p->forOutput());
+ EXPECT_FALSE(p->forStream());
+ EXPECT_FALSE(p->forPort());
+ }
+
+ // value
+ EXPECT_EQ(100, tun->m.mNumbers[0]);
+ EXPECT_EQ(0, btun->m.mNumbers[0]);
+ EXPECT_FALSE(*tun == *btun);
+ EXPECT_FALSE(tun->operator==(*btun));
+ EXPECT_TRUE(*tun != *btun);
+ EXPECT_TRUE(tun->operator!=(*btun));
+ btun->m.mNumbers[0] = 100;
+ EXPECT_EQ(*tun, *btun);
+
+ // index
+ EXPECT_EQ(C2Param::Type(tun->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(tun->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(tun->type(), C2NumbersTuning::typeIndex);
+ EXPECT_EQ(tun->stream(), ~0u);
+
+ C2Param::BaseIndex index = C2NumbersTuning::baseIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_TRUE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+ C2Param::Type type = C2NumbersTuning::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_TRUE(type.isFlexible());
+ EXPECT_TRUE(type.isGlobal());
+ EXPECT_FALSE(type.forInput());
+ EXPECT_FALSE(type.forOutput());
+ EXPECT_FALSE(type.forStream());
+ EXPECT_FALSE(type.forPort());
+
+ EXPECT_EQ(C2NumbersTuning::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(tun.get()), tun.get());
+ EXPECT_EQ(C2NumbersPortTuning::From(tun.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::input::From(tun.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(tun.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::From(tun.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(tun.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(tun.get()), nullptr);
+ }
+
+ std::unique_ptr<C2NumbersPortTuning> outp1_(C2NumbersPortTuning::alloc_unique(1, true)),
+ inp1_ = C2NumbersPortTuning::alloc_unique(1, false);
+ outp1_->m.mNumbers[0] = 100;
+ inp1_->m.mNumbers[0] = 100;
+ std::unique_ptr<const C2NumbersPortTuning> outp1 = std::move(outp1_);
+ std::unique_ptr<const C2NumbersPortTuning> inp1 = std::move(inp1_);
+ std::shared_ptr<C2NumbersPortTuning> boutp1(C2NumbersPortTuning::alloc_shared(1)),
+ binp1 = C2NumbersPortTuning::alloc_shared(1),
+ binp3 = C2NumbersPortTuning::alloc_shared(1, false);
+ binp3->m.mNumbers[0] = 100;
+ std::unique_ptr<C2NumbersPortTuning::input> inp2_(C2NumbersPortTuning::input::alloc_unique(1));
+ inp2_->m.mNumbers[0] = 100;
+ std::unique_ptr<const C2NumbersPortTuning::input> inp2 = std::move(inp2_);
+ std::shared_ptr<C2NumbersPortTuning::input> binp2(C2NumbersPortTuning::input::alloc_shared(1));
+ std::unique_ptr<C2NumbersPortTuning::output> outp2_(C2NumbersPortTuning::output::alloc_unique(1));
+ outp2_->m.mNumbers[0] = 100;
+ std::unique_ptr<const C2NumbersPortTuning::output> outp2 = std::move(outp2_);
+ std::shared_ptr<C2NumbersPortTuning::output> boutp2(C2NumbersPortTuning::output::alloc_shared(1));
+
+ {
+ static_assert(canCallSetPort(*binp3), "should be able to");
+ static_assert(canCallSetPort(*binp1), "should be able to");
+ static_assert(!canCallSetPort(*inp1), "should not be able to (const)");
+ static_assert(!canCallSetPort(*inp2), "should not be able to (const & type)");
+ static_assert(!canCallSetPort(*binp2), "should not be able to (type)");
+
+ // flags & invariables
+ const C2NumbersPortTuning *P[] = { outp1.get(), inp1.get(), boutp1.get() };
+ for (const auto p : P) {
+ EXPECT_EQ(12u, p->size());
+ EXPECT_FALSE(p->isVendor());
+ EXPECT_TRUE(p->isFlexible());
+ EXPECT_FALSE(p->isGlobal());
+ EXPECT_FALSE(p->forStream());
+ EXPECT_TRUE(p->forPort());
+ }
+ const C2NumbersPortTuning::input *PI[] = { inp2.get(), binp2.get() };
+ for (const auto p : PI) {
+ EXPECT_EQ(12u, p->size());
+ EXPECT_FALSE(p->isVendor());
+ EXPECT_TRUE(p->isFlexible());
+ EXPECT_FALSE(p->isGlobal());
+ EXPECT_FALSE(p->forStream());
+ EXPECT_TRUE(p->forPort());
+ }
+ const C2NumbersPortTuning::output *PO[] = { outp2.get(), boutp2.get() };
+ for (const auto p : PO) {
+ EXPECT_EQ(12u, p->size());
+ EXPECT_FALSE(p->isVendor());
+ EXPECT_TRUE(p->isFlexible());
+ EXPECT_FALSE(p->isGlobal());
+ EXPECT_FALSE(p->forStream());
+ EXPECT_TRUE(p->forPort());
+ }
+
+ // port specific flags & invariables
+ EXPECT_FALSE(outp1->forInput());
+ EXPECT_TRUE(outp1->forOutput());
+
+ EXPECT_TRUE(inp1->forInput());
+ EXPECT_FALSE(inp1->forOutput());
+
+ const C2NumbersPortTuning *P2[] = { outp1.get(), inp1.get() };
+ for (const auto p : P2) {
+ EXPECT_TRUE((bool)(*p));
+ EXPECT_FALSE(!(*p));
+ EXPECT_EQ(100, p->m.mNumbers[0]);
+ }
+ for (const auto p : PO) {
+ EXPECT_TRUE((bool)(*p));
+ EXPECT_FALSE(!(*p));
+
+ EXPECT_FALSE(p->forInput());
+ EXPECT_TRUE(p->forOutput());
+ }
+ for (const auto p : PI) {
+ EXPECT_TRUE((bool)(*p));
+ EXPECT_FALSE(!(*p));
+
+ EXPECT_TRUE(p->forInput());
+ EXPECT_FALSE(p->forOutput());
+ }
+ const C2NumbersPortTuning *P3[] = { boutp1.get() };
+ for (const auto p : P3) {
+ EXPECT_FALSE((bool)(*p));
+ EXPECT_TRUE(!(*p));
+
+ EXPECT_FALSE(p->forInput());
+ EXPECT_FALSE(p->forOutput());
+ EXPECT_EQ(0, p->m.mNumbers[0]);
+ }
+
+ // values
+ EXPECT_EQ(100, inp2->m.mNumbers[0]);
+ EXPECT_EQ(100, outp2->m.mNumbers[0]);
+ EXPECT_EQ(0, binp1->m.mNumbers[0]);
+ EXPECT_EQ(0, binp2->m.mNumbers[0]);
+ EXPECT_EQ(0, boutp1->m.mNumbers[0]);
+ EXPECT_EQ(0, boutp2->m.mNumbers[0]);
+
+ EXPECT_TRUE(*inp1 != *outp1);
+ EXPECT_TRUE(*inp1 == *inp2);
+ EXPECT_TRUE(*outp1 == *outp2);
+ EXPECT_TRUE(*binp1 == *boutp1);
+ EXPECT_TRUE(*binp2 != *boutp2);
+
+ EXPECT_TRUE(*inp1 != *binp1);
+ binp1->m.mNumbers[0] = 100;
+ EXPECT_TRUE(*inp1 != *binp1);
+ binp1->setPort(false /* output */);
+ EXPECT_TRUE((bool)*binp1);
+ EXPECT_FALSE(!*binp1);
+ EXPECT_TRUE(*inp1 == *binp1);
+
+ EXPECT_TRUE(*inp2 != *binp2);
+ binp2->m.mNumbers[0] = 100;
+ EXPECT_TRUE(*inp2 == *binp2);
+
+ binp1->setPort(true /* output */);
+ EXPECT_TRUE(*outp1 == *binp1);
+
+ EXPECT_TRUE(*outp1 != *boutp1);
+ boutp1->m.mNumbers[0] = 100;
+ EXPECT_TRUE(*outp1 != *boutp1);
+ boutp1->setPort(true /* output */);
+ EXPECT_TRUE((bool)*boutp1);
+ EXPECT_FALSE(!*boutp1);
+ EXPECT_TRUE(*outp1 == *boutp1);
+
+ EXPECT_TRUE(*outp2 != *boutp2);
+ boutp2->m.mNumbers[0] = 100;
+ EXPECT_TRUE(*outp2 == *boutp2);
+
+ boutp1->setPort(false /* output */);
+ EXPECT_TRUE(*inp1 == *boutp1);
+
+ // index
+ EXPECT_EQ(C2Param::Type(inp1->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(inp1->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(inp1->type(), C2NumbersPortTuning::input::typeIndex);
+ EXPECT_EQ(inp1->stream(), ~0u);
+
+ EXPECT_EQ(C2Param::Type(inp2->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(inp2->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(inp2->type(), C2NumbersPortTuning::input::typeIndex);
+ EXPECT_EQ(inp2->stream(), ~0u);
+
+ EXPECT_EQ(C2Param::Type(outp1->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(outp1->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(outp1->type(), C2NumbersPortTuning::output::typeIndex);
+ EXPECT_EQ(outp1->stream(), ~0u);
+
+ EXPECT_EQ(C2Param::Type(outp2->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(outp2->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(outp2->type(), C2NumbersPortTuning::output::typeIndex);
+ EXPECT_EQ(outp2->stream(), ~0u);
+
+ C2Param::BaseIndex index = C2NumbersPortTuning::input::typeIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_TRUE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+ index = C2NumbersPortTuning::output::typeIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_TRUE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+ C2Param::Type type = C2NumbersPortTuning::input::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_TRUE(type.isFlexible());
+ EXPECT_FALSE(type.isGlobal());
+ EXPECT_TRUE(type.forInput());
+ EXPECT_FALSE(type.forOutput());
+ EXPECT_FALSE(type.forStream());
+ EXPECT_TRUE(type.forPort());
+
+ type = C2NumbersPortTuning::output::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_TRUE(type.isFlexible());
+ EXPECT_FALSE(type.isGlobal());
+ EXPECT_FALSE(type.forInput());
+ EXPECT_TRUE(type.forOutput());
+ EXPECT_FALSE(type.forStream());
+ EXPECT_TRUE(type.forPort());
+
+ EXPECT_EQ(C2NumbersPortTuning::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::input::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(inp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(inp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(outp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(outp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::From(inp1.get()), inp1.get());
+ EXPECT_EQ(C2NumbersPortTuning::From(inp2.get()), (C2NumbersPortTuning*)inp2.get());
+ EXPECT_EQ(C2NumbersPortTuning::From(outp1.get()), outp1.get());
+ EXPECT_EQ(C2NumbersPortTuning::From(outp2.get()), (C2NumbersPortTuning*)outp2.get());
+ EXPECT_EQ(C2NumbersPortTuning::input::From(inp1.get()), (C2NumbersPortTuning::input*)inp1.get());
+ EXPECT_EQ(C2NumbersPortTuning::input::From(inp2.get()), inp2.get());
+ EXPECT_EQ(C2NumbersPortTuning::input::From(outp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::input::From(outp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(inp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(inp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(outp1.get()), (C2NumbersPortTuning::output*)outp1.get());
+ EXPECT_EQ(C2NumbersPortTuning::output::From(outp2.get()), outp2.get());
+ EXPECT_EQ(C2NumbersStreamTuning::From(inp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::From(inp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::From(outp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::From(outp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(inp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(inp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(outp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(outp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(inp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(inp2.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(outp1.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(outp2.get()), nullptr);
+
+ }
+
+ std::unique_ptr<C2NumbersStreamTuning> outs1_(C2NumbersStreamTuning::alloc_unique(1, true, 1u));
+ outs1_->m.mNumbers[0] = 100;
+ std::unique_ptr<const C2NumbersStreamTuning> outs1 = std::move(outs1_);
+ std::unique_ptr<C2NumbersStreamTuning> ins1_(C2NumbersStreamTuning::alloc_unique(1, false, 1u));
+ ins1_->m.mNumbers[0] = 100;
+ std::unique_ptr<const C2NumbersStreamTuning> ins1 = std::move(ins1_);
+ std::shared_ptr<C2NumbersStreamTuning> bouts1(C2NumbersStreamTuning::alloc_shared(1));
+ std::shared_ptr<C2NumbersStreamTuning> bins1(C2NumbersStreamTuning::alloc_shared(1));
+ std::shared_ptr<C2NumbersStreamTuning> bins3(C2NumbersStreamTuning::alloc_shared(1, false, 1u));
+ bins3->m.mNumbers[0] = 100;
+ std::unique_ptr<C2NumbersStreamTuning::input> ins2_(C2NumbersStreamTuning::input::alloc_unique(1, 1u));
+ ins2_->m.mNumbers[0] = 100;
+ std::unique_ptr<const C2NumbersStreamTuning::input> ins2 = std::move(ins2_);
+ std::shared_ptr<C2NumbersStreamTuning::input> bins2(C2NumbersStreamTuning::input::alloc_shared(1));
+ std::unique_ptr<C2NumbersStreamTuning::output> outs2_(C2NumbersStreamTuning::output::alloc_unique(1, 1u));
+ outs2_->m.mNumbers[0] = 100;
+ std::unique_ptr<const C2NumbersStreamTuning::output> outs2 = std::move(outs2_);
+ std::shared_ptr<C2NumbersStreamTuning::output> bouts2(C2NumbersStreamTuning::output::alloc_shared(1));
+
+ {
+ static_assert(canCallSetPort(*bins3), "should be able to");
+ static_assert(canCallSetPort(*bins1), "should be able to");
+ static_assert(!canCallSetPort(*ins1), "should not be able to (const)");
+ static_assert(!canCallSetPort(*ins2), "should not be able to (const & type)");
+ static_assert(!canCallSetPort(*bins2), "should not be able to (type)");
+
+ // flags & invariables
+ const C2NumbersStreamTuning *S[] = { outs1.get(), ins1.get(), bouts1.get() };
+ for (const auto p : S) {
+ EXPECT_EQ(12u, p->size());
+ EXPECT_FALSE(p->isVendor());
+ EXPECT_TRUE(p->isFlexible());
+ EXPECT_FALSE(p->isGlobal());
+ EXPECT_TRUE(p->forStream());
+ EXPECT_FALSE(p->forPort());
+ }
+ const C2NumbersStreamTuning::input *SI[] = { ins2.get(), bins2.get() };
+ for (const auto p : SI) {
+ EXPECT_EQ(12u, p->size());
+ EXPECT_FALSE(p->isVendor());
+ EXPECT_TRUE(p->isFlexible());
+ EXPECT_FALSE(p->isGlobal());
+ EXPECT_TRUE(p->forStream());
+ EXPECT_FALSE(p->forPort());
+ }
+ const C2NumbersStreamTuning::output *SO[] = { outs2.get(), bouts2.get() };
+ for (const auto p : SO) {
+ EXPECT_EQ(12u, p->size());
+ EXPECT_FALSE(p->isVendor());
+ EXPECT_TRUE(p->isFlexible());
+ EXPECT_FALSE(p->isGlobal());
+ EXPECT_TRUE(p->forStream());
+ EXPECT_FALSE(p->forPort());
+ }
+
+ // port specific flags & invariables
+ EXPECT_FALSE(outs1->forInput());
+ EXPECT_TRUE(outs1->forOutput());
+
+ EXPECT_TRUE(ins1->forInput());
+ EXPECT_FALSE(ins1->forOutput());
+
+ const C2NumbersStreamTuning *S2[] = { outs1.get(), ins1.get() };
+ for (const auto p : S2) {
+ EXPECT_TRUE((bool)(*p));
+ EXPECT_FALSE(!(*p));
+ EXPECT_EQ(100, p->m.mNumbers[0]);
+ EXPECT_EQ(1u, p->stream());
+ }
+ for (const auto p : SO) {
+ EXPECT_TRUE((bool)(*p));
+ EXPECT_FALSE(!(*p));
+
+ EXPECT_FALSE(p->forInput());
+ EXPECT_TRUE(p->forOutput());
+ }
+ for (const auto p : SI) {
+ EXPECT_TRUE((bool)(*p));
+ EXPECT_FALSE(!(*p));
+
+ EXPECT_TRUE(p->forInput());
+ EXPECT_FALSE(p->forOutput());
+ }
+ const C2NumbersStreamTuning *S3[] = { bouts1.get() };
+ for (const auto p : S3) {
+ EXPECT_FALSE((bool)(*p));
+ EXPECT_TRUE(!(*p));
+
+ EXPECT_FALSE(p->forInput());
+ EXPECT_FALSE(p->forOutput());
+ EXPECT_EQ(0, p->m.mNumbers[0]);
+ }
+
+ // values
+ EXPECT_EQ(100, ins2->m.mNumbers[0]);
+ EXPECT_EQ(100, outs2->m.mNumbers[0]);
+ EXPECT_EQ(0, bins1->m.mNumbers[0]);
+ EXPECT_EQ(0, bins2->m.mNumbers[0]);
+ EXPECT_EQ(0, bouts1->m.mNumbers[0]);
+ EXPECT_EQ(0, bouts2->m.mNumbers[0]);
+
+ EXPECT_EQ(1u, ins2->stream());
+ EXPECT_EQ(1u, outs2->stream());
+ EXPECT_EQ(0u, bins1->stream());
+ EXPECT_EQ(0u, bins2->stream());
+ EXPECT_EQ(0u, bouts1->stream());
+ EXPECT_EQ(0u, bouts2->stream());
+
+ EXPECT_TRUE(*ins1 != *outs1);
+ EXPECT_TRUE(*ins1 == *ins2);
+ EXPECT_TRUE(*outs1 == *outs2);
+ EXPECT_TRUE(*bins1 == *bouts1);
+ EXPECT_TRUE(*bins2 != *bouts2);
+
+ EXPECT_TRUE(*ins1 != *bins1);
+ bins1->m.mNumbers[0] = 100;
+ EXPECT_TRUE(*ins1 != *bins1);
+ bins1->setPort(false /* output */);
+ EXPECT_TRUE(*ins1 != *bins1);
+ bins1->setStream(1u);
+ EXPECT_TRUE(*ins1 == *bins1);
+
+ EXPECT_TRUE(*ins2 != *bins2);
+ bins2->m.mNumbers[0] = 100;
+ EXPECT_TRUE(*ins2 != *bins2);
+ bins2->setStream(1u);
+ EXPECT_TRUE(*ins2 == *bins2);
+
+ bins1->setPort(true /* output */);
+ EXPECT_TRUE(*outs1 == *bins1);
+
+ EXPECT_TRUE(*outs1 != *bouts1);
+ bouts1->m.mNumbers[0] = 100;
+ EXPECT_TRUE(*outs1 != *bouts1);
+ bouts1->setPort(true /* output */);
+ EXPECT_TRUE(*outs1 != *bouts1);
+ bouts1->setStream(1u);
+ EXPECT_TRUE(*outs1 == *bouts1);
+
+ EXPECT_TRUE(*outs2 != *bouts2);
+ bouts2->m.mNumbers[0] = 100;
+ EXPECT_TRUE(*outs2 != *bouts2);
+ bouts2->setStream(1u);
+ EXPECT_TRUE(*outs2 == *bouts2);
+
+ bouts1->setPort(false /* output */);
+ EXPECT_TRUE(*ins1 == *bouts1);
+
+ // index
+ EXPECT_EQ(C2Param::Type(ins1->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(ins1->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(ins1->type(), C2NumbersStreamTuning::input::typeIndex);
+
+ EXPECT_EQ(C2Param::Type(ins2->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(ins2->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(ins2->type(), C2NumbersStreamTuning::input::typeIndex);
+
+ EXPECT_EQ(C2Param::Type(outs1->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(outs1->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(outs1->type(), C2NumbersStreamTuning::output::typeIndex);
+
+ EXPECT_EQ(C2Param::Type(outs2->type()).baseIndex(), C2NumbersStruct::baseIndex);
+ EXPECT_EQ(C2Param::Type(outs2->type()).paramIndex(), kParamIndexNumbers);
+ EXPECT_EQ(outs2->type(), C2NumbersStreamTuning::output::typeIndex);
+
+ C2Param::BaseIndex index = C2NumbersStreamTuning::input::typeIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_TRUE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+ index = C2NumbersStreamTuning::output::typeIndex;
+ EXPECT_FALSE(index.isVendor());
+ EXPECT_TRUE(index.isFlexible());
+ EXPECT_EQ(index.baseIndex(), kParamIndexNumbers | C2Param::BaseIndex::_kFlexibleFlag);
+ EXPECT_EQ(index.paramIndex(), kParamIndexNumbers);
+
+ C2Param::Type type = C2NumbersStreamTuning::input::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_TRUE(type.isFlexible());
+ EXPECT_FALSE(type.isGlobal());
+ EXPECT_TRUE(type.forInput());
+ EXPECT_FALSE(type.forOutput());
+ EXPECT_TRUE(type.forStream());
+ EXPECT_FALSE(type.forPort());
+
+ type = C2NumbersStreamTuning::output::typeIndex;
+ EXPECT_FALSE(type.isVendor());
+ EXPECT_TRUE(type.isFlexible());
+ EXPECT_FALSE(type.isGlobal());
+ EXPECT_FALSE(type.forInput());
+ EXPECT_TRUE(type.forOutput());
+ EXPECT_TRUE(type.forStream());
+ EXPECT_FALSE(type.forPort());
+
+ EXPECT_EQ(C2NumbersPortTuning::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::input::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(nullptr), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(ins1.get()), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(ins2.get()), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(outs1.get()), nullptr);
+ EXPECT_EQ(C2NumbersTuning::From(outs2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::From(ins1.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::From(ins2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::From(outs1.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::From(outs2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::input::From(ins1.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::input::From(ins2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::input::From(outs1.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::input::From(outs2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(ins1.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(ins2.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(outs1.get()), nullptr);
+ EXPECT_EQ(C2NumbersPortTuning::output::From(outs2.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::From(ins1.get()), ins1.get());
+ EXPECT_EQ(C2NumbersStreamTuning::From(ins2.get()), (C2NumbersStreamTuning*)ins2.get());
+ EXPECT_EQ(C2NumbersStreamTuning::From(outs1.get()), outs1.get());
+ EXPECT_EQ(C2NumbersStreamTuning::From(outs2.get()), (C2NumbersStreamTuning*)outs2.get());
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(ins1.get()), (C2NumbersStreamTuning::input*)ins1.get());
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(ins2.get()), ins2.get());
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(outs1.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::input::From(outs2.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(ins1.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(ins2.get()), nullptr);
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(outs1.get()), (C2NumbersStreamTuning::output*)outs1.get());
+ EXPECT_EQ(C2NumbersStreamTuning::output::From(outs2.get()), outs2.get());
+
+ }
+
+ {
+ C2Int32Value int32Value(INT32_MIN);
+ static_assert(std::is_same<decltype(int32Value.mValue), int32_t>::value, "should be int32_t");
+ EXPECT_EQ(INT32_MIN, int32Value.mValue);
+ std::list<const C2FieldDescriptor> fields = int32Value.fieldList;
+ EXPECT_EQ(1u, fields.size());
+ EXPECT_EQ(FD::INT32, fields.cbegin()->type());
+ EXPECT_EQ(1u, fields.cbegin()->length());
+ EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+ }
+
+ {
+ C2Uint32Value uint32Value(UINT32_MAX);
+ static_assert(std::is_same<decltype(uint32Value.mValue), uint32_t>::value, "should be uint32_t");
+ EXPECT_EQ(UINT32_MAX, uint32Value.mValue);
+ std::list<const C2FieldDescriptor> fields = uint32Value.fieldList;
+ EXPECT_EQ(1u, fields.size());
+ EXPECT_EQ(FD::UINT32, fields.cbegin()->type());
+ EXPECT_EQ(1u, fields.cbegin()->length());
+ EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+ }
+
+ {
+ C2Int64Value int64Value(INT64_MIN);
+ static_assert(std::is_same<decltype(int64Value.mValue), int64_t>::value, "should be int64_t");
+ EXPECT_EQ(INT64_MIN, int64Value.mValue);
+ std::list<const C2FieldDescriptor> fields = int64Value.fieldList;
+ EXPECT_EQ(1u, fields.size());
+ EXPECT_EQ(FD::INT64, fields.cbegin()->type());
+ EXPECT_EQ(1u, fields.cbegin()->length());
+ EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+ }
+
+ {
+ C2Uint64Value uint64Value(UINT64_MAX);
+ static_assert(std::is_same<decltype(uint64Value.mValue), uint64_t>::value, "should be uint64_t");
+ EXPECT_EQ(UINT64_MAX, uint64Value.mValue);
+ std::list<const C2FieldDescriptor> fields = uint64Value.fieldList;
+ EXPECT_EQ(1u, fields.size());
+ EXPECT_EQ(FD::UINT64, fields.cbegin()->type());
+ EXPECT_EQ(1u, fields.cbegin()->length());
+ EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+ }
+
+ {
+ C2FloatValue floatValue(123.4f);
+ static_assert(std::is_same<decltype(floatValue.mValue), float>::value, "should be float");
+ EXPECT_EQ(123.4f, floatValue.mValue);
+ std::list<const C2FieldDescriptor> fields = floatValue.fieldList;
+ EXPECT_EQ(1u, fields.size());
+ EXPECT_EQ(FD::FLOAT, fields.cbegin()->type());
+ EXPECT_EQ(1u, fields.cbegin()->length());
+ EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+ }
+
+ {
+ uint8_t initValue[] = "ABCD";
+ typedef C2GlobalParam<C2Setting, C2BlobValue, 0> BlobSetting;
+ std::unique_ptr<BlobSetting> blobValue = BlobSetting::alloc_unique(6, C2ConstMemoryBlock<uint8_t>(initValue));
+ static_assert(std::is_same<decltype(blobValue->m.mValue), uint8_t[]>::value, "should be uint8_t[]");
+ EXPECT_EQ(0, memcmp(blobValue->m.mValue, "ABCD\0", 6));
+ EXPECT_EQ(6u, blobValue->flexCount());
+ std::list<const C2FieldDescriptor> fields = blobValue->fieldList;
+ EXPECT_EQ(1u, fields.size());
+ EXPECT_EQ(FD::BLOB, fields.cbegin()->type());
+ EXPECT_EQ(0u, fields.cbegin()->length());
+ EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+
+ blobValue = BlobSetting::alloc_unique(3, C2ConstMemoryBlock<uint8_t>(initValue));
+ EXPECT_EQ(0, memcmp(blobValue->m.mValue, "ABC", 3));
+ EXPECT_EQ(3u, blobValue->flexCount());
+ }
+
+ {
+ constexpr char initValue[] = "ABCD";
+ typedef C2GlobalParam<C2Setting, C2StringValue, 0> StringSetting;
+ std::unique_ptr<StringSetting> stringValue = StringSetting::alloc_unique(6, C2ConstMemoryBlock<char>(initValue));
+ stringValue = StringSetting::alloc_unique(6, initValue);
+ static_assert(std::is_same<decltype(stringValue->m.mValue), char[]>::value, "should be char[]");
+ EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABCD\0", 6));
+ EXPECT_EQ(6u, stringValue->flexCount());
+ std::list<const C2FieldDescriptor> fields = stringValue->fieldList;
+ EXPECT_EQ(1u, fields.size());
+ EXPECT_EQ(FD::STRING, fields.cbegin()->type());
+ EXPECT_EQ(0u, fields.cbegin()->length());
+ EXPECT_EQ(C2String("value"), fields.cbegin()->name());
+
+ stringValue = StringSetting::alloc_unique(3, C2ConstMemoryBlock<char>(initValue));
+ EXPECT_EQ(0, memcmp(stringValue->m.mValue, "AB", 3));
+ EXPECT_EQ(3u, stringValue->flexCount());
+
+ stringValue = StringSetting::alloc_unique(11, "initValue");
+ EXPECT_EQ(0, memcmp(stringValue->m.mValue, "initValue\0", 11));
+ EXPECT_EQ(11u, stringValue->flexCount());
+
+ stringValue = StringSetting::alloc_unique(initValue);
+ EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABCD", 5));
+ EXPECT_EQ(5u, stringValue->flexCount());
+
+ stringValue = StringSetting::alloc_unique({ 'A', 'B', 'C', 'D' });
+ EXPECT_EQ(0, memcmp(stringValue->m.mValue, "ABC", 4));
+ EXPECT_EQ(4u, stringValue->flexCount());
+ }
+
+ {
+ uint32_t videoWidth[] = { 12u, C2NumbersStreamTuning::output::typeIndex, 100 };
+ C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
+ EXPECT_NE(nullptr, p1);
+ EXPECT_EQ(12u, p1->size());
+ EXPECT_EQ(C2NumbersStreamTuning::output::typeIndex, p1->type());
+
+ C2NumbersStreamTuning::output *vst = C2NumbersStreamTuning::output::From(p1);
+ EXPECT_NE(nullptr, vst);
+ if (vst) {
+ EXPECT_EQ(1u, vst->flexCount());
+ EXPECT_EQ(100, vst->m.mNumbers[0]);
+ }
+
+ p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
+ EXPECT_EQ(nullptr, p1);
+
+ p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
+ EXPECT_EQ(nullptr, p1);
+
+ p1 = C2Param::From(videoWidth, 3);
+ EXPECT_EQ(nullptr, p1);
+
+ p1 = C2Param::From(videoWidth, 0);
+ EXPECT_EQ(nullptr, p1);
+ }
+
+ {
+ uint32_t videoWidth[] = { 16u, C2NumbersPortTuning::input::typeIndex, 101, 102 };
+
+ C2Param *p1 = C2Param::From(videoWidth, sizeof(videoWidth));
+ EXPECT_NE(nullptr, p1);
+ EXPECT_EQ(16u, p1->size());
+ EXPECT_EQ(C2NumbersPortTuning::input::typeIndex, p1->type());
+
+ C2NumbersPortTuning::input *vpt = C2NumbersPortTuning::input::From(p1);
+ EXPECT_NE(nullptr, vpt);
+ if (vpt) {
+ EXPECT_EQ(2u, vpt->flexCount());
+ EXPECT_EQ(101, vpt->m.mNumbers[0]);
+ EXPECT_EQ(102, vpt->m.mNumbers[1]);
+ }
+
+ p1 = C2Param::From(videoWidth, sizeof(videoWidth) + 2);
+ EXPECT_EQ(nullptr, p1);
+
+ p1 = C2Param::From(videoWidth, sizeof(videoWidth) - 2);
+ EXPECT_EQ(nullptr, p1);
+
+ p1 = C2Param::From(videoWidth, 3);
+ EXPECT_EQ(nullptr, p1);
+
+ p1 = C2Param::From(videoWidth, 0);
+ EXPECT_EQ(nullptr, p1);
+ }
+}
+
+// ***********************
+
+}
+
+#include <util/C2ParamUtils.h>
+#include <C2Config.h>
+#include <C2Component.h>
+#include <unordered_map>
+
+namespace android {
+
+C2ENUM(
+ MetadataType, int32_t,
+ kInvalid = -1,
+ kNone = 0,
+ kGralloc,
+ kNativeHandle,
+ kANativeWindow,
+ kCamera,
+)
+
+enum {
+ kParamIndexVideoConfig = 0x1234,
+};
+
+struct C2VideoConfigStruct {
+ int32_t mWidth;
+ uint32_t mHeight;
+ MetadataType mMetadataType;
+ int32_t mSupportedFormats[];
+
+ C2VideoConfigStruct() {}
+
+ DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(VideoConfig, mSupportedFormats)
+ C2FIELD(mWidth, "width")
+ C2FIELD(mHeight, "height")
+ C2FIELD(mMetadataType, "metadata-type")
+ C2FIELD(mSupportedFormats, "formats")
+};
+
+typedef C2PortParam<C2Tuning, C2VideoConfigStruct> C2VideoConfigPortTuning;
+
+class MyReflector : public C2ParamReflector {
+private:
+ std::unique_ptr<C2VideoConfigPortTuning::input> inputVideoConfigTuning;
+ std::unique_ptr<C2VideoConfigPortTuning::output> outputVideoConfigTuning;
+
+public:
+ void describeSupportedValues() {
+ C2TypedFieldSupportedValues<int32_t> supportedWidths(16, 1920, 8);
+ C2FieldSupportedValues supportedWidths2(16, 1920, 8);
+
+
+ std::list<C2FieldSupportedValues> supported;
+ //supported.emplace_push(inputVideoConfigTuning->mNumber, range(16, 1920, 8));
+ //supported.emplace_push(inputVideoConfigTuning->mHeight, range(16, 1088, 8));
+ //supported.emplace_push(inputVideoConfigTuning->mMetadataType, all_enums);
+ //supported.emplace_push(inputVideoConfigTuning->mSupportedFormats, { 0, 1, 5, 7 });
+ }
+
+ virtual std::unique_ptr<android::C2StructDescriptor> describe(C2Param::BaseIndex paramType) {
+ switch (paramType.baseIndex()) {
+ case C2VideoConfigPortTuning::baseIndex:
+ return std::unique_ptr<C2StructDescriptor>(new C2StructDescriptor{
+ paramType.baseIndex(),
+ C2VideoConfigPortTuning::fieldList,
+ });
+ }
+ return nullptr;
+ }
+};
+
+class MyComponentInstance : public C2ComponentInterface {
+public:
+ virtual C2String getName() const {
+ /// \todo this seems too specific
+ return "sample.interface";
+ };
+
+ virtual node_id getId() const {
+ /// \todo how are these shared?
+ return 0;
+ }
+
+ virtual status_t commit_sm(
+ const std::vector<C2Param* const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>>* const failures) {
+ (void)params;
+ (void)failures;
+ return C2_UNSUPPORTED;
+ }
+
+ virtual status_t config_nb(
+ const std::vector<C2Param* const> ¶ms,
+ std::vector<std::unique_ptr<C2SettingResult>>* const failures) {
+ (void)params;
+ (void)failures;
+ return C2_UNSUPPORTED;
+ }
+
+ virtual status_t createTunnel_sm(node_id targetComponent) {
+ (void)targetComponent;
+ return C2_UNSUPPORTED;
+ }
+
+ virtual status_t query_nb(
+ const std::vector<C2Param* const> &stackParams,
+ const std::vector<C2Param::Index> &heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>>* const heapParams) const {
+ for (C2Param* const param : stackParams) {
+ if (!*param) { // param is already invalid - remember it
+ continue;
+ }
+
+ // note: this does not handle stream params (should use index...)
+ if (!mMyParams.count(param->type())) {
+ continue; // not my param
+ }
+
+ C2Param & myParam = mMyParams.find(param->type())->second;
+ if (myParam.size() != param->size()) { // incorrect size
+ param->invalidate();
+ continue;
+ }
+
+ param->updateFrom(myParam);
+ }
+
+ for (const C2Param::Index index : heapParamIndices) {
+ if (mMyParams.count(index)) {
+ C2Param & myParam = mMyParams.find(index)->second;
+ std::unique_ptr<C2Param> paramCopy(C2Param::From(&myParam, myParam.size()));
+ heapParams->push_back(std::move(paramCopy));
+ }
+ }
+
+ return C2_OK;
+ }
+
+ std::unordered_map<uint32_t, C2Param &> mMyParams;
+
+ C2ComponentDomainInfo mDomainInfo;
+
+ MyComponentInstance() {
+ mMyParams.insert({mDomainInfo.type(), mDomainInfo});
+ }
+
+ virtual status_t releaseTunnel_sm(node_id targetComponent) {
+ (void)targetComponent;
+ return C2_UNSUPPORTED;
+ }
+
+ class MyParamReflector : public C2ParamReflector {
+ const MyComponentInstance *instance;
+
+ public:
+ MyParamReflector(const MyComponentInstance *i) : instance(i) { }
+
+ virtual std::unique_ptr<C2StructDescriptor> describe(C2Param::BaseIndex paramIndex) {
+ switch (paramIndex.baseIndex()) {
+ case decltype(instance->mDomainInfo)::baseIndex:
+ default:
+ return std::unique_ptr<C2StructDescriptor>(new C2StructDescriptor{
+ instance->mDomainInfo.type(),
+ decltype(instance->mDomainInfo)::fieldList,
+ });
+ }
+ return nullptr;
+ }
+ };
+
+ virtual status_t getSupportedValues(
+ const std::vector<const C2ParamField> fields,
+ std::vector<C2FieldSupportedValues>* const values) const {
+ for (const C2ParamField &field : fields) {
+ if (field == C2ParamField(&mDomainInfo, &C2ComponentDomainInfo::mValue)) {
+ values->push_back(C2FieldSupportedValues(
+ false /* flag */,
+ &mDomainInfo.mValue
+ //,
+ //{(int32_t)C2DomainVideo}
+ ));
+ }
+ }
+ return C2_OK;
+ }
+
+ virtual std::shared_ptr<C2ParamReflector> getParamReflector() const {
+ return std::shared_ptr<C2ParamReflector>(new MyParamReflector(this));
+ }
+
+ virtual status_t getSupportedParams(std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) const {
+ params->push_back(std::make_shared<C2ParamDescriptor>(
+ true /* required */, "_domain", &mDomainInfo));
+ return C2_OK;
+ }
+
+ status_t getSupportedParams2(std::vector<std::shared_ptr<C2ParamDescriptor>> * const params) {
+ params->push_back(std::shared_ptr<C2ParamDescriptor>(
+ new C2ParamDescriptor(true /* required */, "_domain", &mDomainInfo)));
+ return C2_OK;
+ }
+
+};
+
+template<typename E, bool S=std::is_enum<E>::value>
+struct getter {
+ int32_t get(const C2FieldSupportedValues::Primitive &p, int32_t*) {
+ return p.i32;
+ }
+ int64_t get(const C2FieldSupportedValues::Primitive &p, int64_t*) {
+ return p.i64;
+ }
+ uint32_t get(const C2FieldSupportedValues::Primitive &p, uint32_t*) {
+ return p.u32;
+ }
+ uint64_t get(const C2FieldSupportedValues::Primitive &p, uint64_t*) {
+ return p.u64;
+ }
+ float get(const C2FieldSupportedValues::Primitive &p, float*) {
+ return p.fp;
+ }
+};
+
+template<typename E>
+struct getter<E, true> {
+ typename std::underlying_type<E>::type get(const C2FieldSupportedValues::Primitive &p, E*) {
+ using u=typename std::underlying_type<E>::type;
+ return getter<u>().get(p, (u*)0);
+ }
+};
+
+template<typename T, bool E=std::is_enum<T>::value>
+struct lax_underlying_type {
+ typedef typename std::underlying_type<T>::type type;
+};
+
+template<typename T>
+struct lax_underlying_type<T, false> {
+ typedef T type;
+};
+
+template<typename E>
+typename lax_underlying_type<E>::type get(
+ const C2FieldSupportedValues::Primitive &p, E*) {
+ return getter<E>().get(p, (E*)0);
+}
+
+template<typename T>
+void dumpFSV(const C2FieldSupportedValues &sv, T*t) {
+ using namespace std;
+ cout << (std::is_enum<T>::value ? (std::is_signed<typename std::underlying_type<T>::type>::value ? "i" : "u")
+ : std::is_integral<T>::value ? std::is_signed<T>::value ? "i" : "u" : "f")
+ << (8 * sizeof(T));
+ if (sv.type == sv.RANGE) {
+ cout << ".range(" << get(sv.range.min, t);
+ if (get(sv.range.step, t) != std::is_integral<T>::value) {
+ cout << ":" << get(sv.range.step, t);
+ }
+ if (get(sv.range.nom, t) != 1 || get(sv.range.denom, t) != 1) {
+ cout << ":" << get(sv.range.nom, t) << "/" << get(sv.range.denom, t);
+ }
+ cout << get(sv.range.max, t) << ")";
+ }
+ if (sv.values.size()) {
+ cout << (sv.type == sv.FLAGS ? ".flags(" : ".list(");
+ const char *sep = "";
+ for (const C2FieldSupportedValues::Primitive &p : sv.values) {
+ cout << sep << get(p, t);
+ sep = ",";
+ }
+ cout << ")";
+ }
+ cout << endl;
+}
+
+void dumpType(C2Param::Type type) {
+ using namespace std;
+ cout << (type.isVendor() ? "Vendor" : "C2");
+ if (type.forInput()) {
+ cout << "Input";
+ } else if (type.forOutput()) {
+ cout << "Output";
+ } else if (type.forPort() && !type.forStream()) {
+ cout << "Port";
+ }
+ if (type.forStream()) {
+ cout << "Stream";
+ }
+
+ if (type.isFlexible()) {
+ cout << "Flex";
+ }
+
+ cout << type.paramIndex();
+
+ switch (type.kind()) {
+ case C2Param::INFO: cout << "Info"; break;
+ case C2Param::SETTING: cout << "Setting"; break;
+ case C2Param::TUNING: cout << "Tuning"; break;
+ case C2Param::STRUCT: cout << "Struct"; break;
+ default: cout << "Kind" << (int32_t)type.kind(); break;
+ }
+}
+
+void dumpType(C2Param::BaseIndex type) {
+ using namespace std;
+ cout << (type.isVendor() ? "Vendor" : "C2");
+ if (type.isFlexible()) {
+ cout << "Flex";
+ }
+
+ cout << type.paramIndex() << "Struct";
+}
+
+void dumpType(FD::Type type) {
+ using namespace std;
+ switch (type) {
+ case FD::BLOB: cout << "blob "; break;
+ case FD::FLOAT: cout << "float "; break;
+ case FD::INT32: cout << "int32_t "; break;
+ case FD::INT64: cout << "int64_t "; break;
+ case FD::UINT32: cout << "uint32_t "; break;
+ case FD::UINT64: cout << "uint64_t "; break;
+ case FD::STRING: cout << "char "; break;
+ default:
+ cout << "struct ";
+ dumpType((C2Param::Type)type);
+ break;
+ }
+}
+
+void dumpStruct(const C2StructDescriptor &sd) {
+ using namespace std;
+ cout << "struct ";
+ dumpType(sd.baseIndex());
+ cout << " {" << endl;
+ //C2FieldDescriptor &f;
+ for (const C2FieldDescriptor &f : sd) {
+ PrintTo(f, &cout);
+ cout << endl;
+
+ if (f.namedValues().size()) {
+ cout << ".named(";
+ const char *sep = "";
+ for (const FD::named_value_type &p : f.namedValues()) {
+ cout << sep << p.first << "=";
+ switch (f.type()) {
+ case C2Value::INT32: cout << get(p.second, (int32_t *)0); break;
+ case C2Value::INT64: cout << get(p.second, (int64_t *)0); break;
+ case C2Value::UINT32: cout << get(p.second, (uint32_t *)0); break;
+ case C2Value::UINT64: cout << get(p.second, (uint64_t *)0); break;
+ case C2Value::FLOAT: cout << get(p.second, (float *)0); break;
+ default: cout << "???"; break;
+ }
+ sep = ",";
+ }
+ cout << ")";
+ }
+ }
+
+ cout << "};" << endl;
+}
+
+void dumpDesc(const C2ParamDescriptor &pd) {
+ using namespace std;
+ if (pd.isRequired()) {
+ cout << "required ";
+ }
+ if (pd.isPersistent()) {
+ cout << "persistent ";
+ }
+ cout << "struct ";
+ dumpType(pd.type());
+ cout << " " << pd.name() << ";" << endl;
+}
+
+TEST_F(C2ParamTest, ReflectorTest) {
+ C2ComponentDomainInfo domainInfo;
+ std::shared_ptr<C2ComponentInterface> comp(new MyComponentInstance);
+ std::vector<C2FieldSupportedValues> values;
+
+ std::unique_ptr<C2StructDescriptor> desc{
+ comp->getParamReflector()->describe(C2ComponentDomainInfo::indexFlags)};
+ dumpStruct(*desc);
+
+ EXPECT_EQ(
+ C2_OK,
+ comp->getSupportedValues(
+ { C2ParamField(&domainInfo, &C2ComponentDomainInfo::mValue) },
+ &values)
+ );
+
+ for (const C2FieldSupportedValues &sv : values) {
+ dumpFSV(sv, &domainInfo.mValue);
+ }
+}
+
+C2ENUM(Enum1, uint32_t,
+ Enum1Value1,
+ Enum1Value2,
+ Enum1Value4 = Enum1Value2 + 2,
+);
+
+C2ENUM_CUSTOM_PREFIX(Enum2, uint32_t, "Enum",
+ Enum2Value1,
+ Enum2Value2,
+ Enum2Value4 = Enum1Value2 + 2,
+);
+
+C2ENUM_CUSTOM_NAMES(Enum3, uint8_t,
+ ({ { "value1", Enum3Value1 },
+ { "value2", Enum3Value2 },
+ { "value4", Enum3Value4 },
+ { "invalid", Invalid } }),
+ Enum3Value1,
+ Enum3Value2,
+ Enum3Value4 = Enum3Value2 + 2,
+ Invalid,
+);
+
+TEST_F(C2ParamTest, EnumUtilsTest) {
+ std::vector<std::pair<C2String, Enum3>> pairs ( { { "value1", Enum3Value1 },
+ { "value2", Enum3Value2 },
+ { "value4", Enum3Value4 },
+ { "invalid", Invalid } });
+ Enum3 e3;
+ FD::namedValuesFor(e3);
+}
+
+TEST_F(C2ParamTest, ParamUtilsTest) {
+ // upper case
+ EXPECT_EQ("yes", C2ParamUtils::camelCaseToDashed("YES"));
+ EXPECT_EQ("no", C2ParamUtils::camelCaseToDashed("NO"));
+ EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YES_NO"));
+ EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YES__NO"));
+ EXPECT_EQ("a2dp", C2ParamUtils::camelCaseToDashed("A2DP"));
+ EXPECT_EQ("mp2-ts", C2ParamUtils::camelCaseToDashed("MP2_TS"));
+ EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("BLOCK_2D"));
+ EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("MPEG_2_TS"));
+ EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_HIDDEN_VALUE"));
+ EXPECT_EQ("__hidden-value2", C2ParamUtils::camelCaseToDashed("__HIDDEN_VALUE2"));
+ EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HIDDEN_VALUE_2"));
+
+ // camel case
+ EXPECT_EQ("yes", C2ParamUtils::camelCaseToDashed("Yes"));
+ EXPECT_EQ("no", C2ParamUtils::camelCaseToDashed("No"));
+ EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("YesNo"));
+ EXPECT_EQ("yes-no", C2ParamUtils::camelCaseToDashed("Yes_No"));
+ EXPECT_EQ("mp2-ts", C2ParamUtils::camelCaseToDashed("MP2Ts"));
+ EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("Block2D"));
+ EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg2ts"));
+ EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_HiddenValue"));
+ EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HiddenValue2"));
+
+ // mixed case
+ EXPECT_EQ("mp2t-s", C2ParamUtils::camelCaseToDashed("MP2T_s"));
+ EXPECT_EQ("block-2d", C2ParamUtils::camelCaseToDashed("Block_2D"));
+ EXPECT_EQ("block-2-d", C2ParamUtils::camelCaseToDashed("Block2_D"));
+ EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg_2ts"));
+ EXPECT_EQ("mpeg-2-ts", C2ParamUtils::camelCaseToDashed("Mpeg_2_TS"));
+ EXPECT_EQ("_hidden-value", C2ParamUtils::camelCaseToDashed("_Hidden__VALUE"));
+ EXPECT_EQ("__hidden-value-2", C2ParamUtils::camelCaseToDashed("__HiddenValue_2"));
+ EXPECT_EQ("_2", C2ParamUtils::camelCaseToDashed("_2"));
+ EXPECT_EQ("__23", C2ParamUtils::camelCaseToDashed("__23"));
+}
+
+TEST_F(C2ParamTest, C2ValueTest) {
+ C2Value val;
+ int32_t i32 = -32;
+ int64_t i64 = -64;
+ uint32_t u32 = 32;
+ uint64_t u64 = 64;
+ float fp = 1.5f;
+
+ EXPECT_EQ(C2Value::NO_INIT, val.type());
+ EXPECT_EQ(false, val.get(&i32));
+ EXPECT_EQ(-32, i32);
+ EXPECT_EQ(false, val.get(&i64));
+ EXPECT_EQ(-64, i64);
+ EXPECT_EQ(false, val.get(&u32));
+ EXPECT_EQ(32u, u32);
+ EXPECT_EQ(false, val.get(&u64));
+ EXPECT_EQ(64u, u64);
+ EXPECT_EQ(false, val.get(&fp));
+ EXPECT_EQ(1.5f, fp);
+
+ val = int32_t(-3216);
+ EXPECT_EQ(C2Value::INT32, val.type());
+ EXPECT_EQ(true, val.get(&i32));
+ EXPECT_EQ(-3216, i32);
+ EXPECT_EQ(false, val.get(&i64));
+ EXPECT_EQ(-64, i64);
+ EXPECT_EQ(false, val.get(&u32));
+ EXPECT_EQ(32u, u32);
+ EXPECT_EQ(false, val.get(&u64));
+ EXPECT_EQ(64u, u64);
+ EXPECT_EQ(false, val.get(&fp));
+ EXPECT_EQ(1.5f, fp);
+
+ val = uint32_t(3216);
+ EXPECT_EQ(C2Value::UINT32, val.type());
+ EXPECT_EQ(false, val.get(&i32));
+ EXPECT_EQ(-3216, i32);
+ EXPECT_EQ(false, val.get(&i64));
+ EXPECT_EQ(-64, i64);
+ EXPECT_EQ(true, val.get(&u32));
+ EXPECT_EQ(3216u, u32);
+ EXPECT_EQ(false, val.get(&u64));
+ EXPECT_EQ(64u, u64);
+ EXPECT_EQ(false, val.get(&fp));
+ EXPECT_EQ(1.5f, fp);
+
+ val = int64_t(-6432);
+ EXPECT_EQ(C2Value::INT64, val.type());
+ EXPECT_EQ(false, val.get(&i32));
+ EXPECT_EQ(-3216, i32);
+ EXPECT_EQ(true, val.get(&i64));
+ EXPECT_EQ(-6432, i64);
+ EXPECT_EQ(false, val.get(&u32));
+ EXPECT_EQ(3216u, u32);
+ EXPECT_EQ(false, val.get(&u64));
+ EXPECT_EQ(64u, u64);
+ EXPECT_EQ(false, val.get(&fp));
+ EXPECT_EQ(1.5f, fp);
+
+ val = uint64_t(6432);
+ EXPECT_EQ(C2Value::UINT64, val.type());
+ EXPECT_EQ(false, val.get(&i32));
+ EXPECT_EQ(-3216, i32);
+ EXPECT_EQ(false, val.get(&i64));
+ EXPECT_EQ(-6432, i64);
+ EXPECT_EQ(false, val.get(&u32));
+ EXPECT_EQ(3216u, u32);
+ EXPECT_EQ(true, val.get(&u64));
+ EXPECT_EQ(6432u, u64);
+ EXPECT_EQ(false, val.get(&fp));
+ EXPECT_EQ(1.5f, fp);
+
+ val = 15.25f;
+ EXPECT_EQ(C2Value::FLOAT, val.type());
+ EXPECT_EQ(false, val.get(&i32));
+ EXPECT_EQ(-3216, i32);
+ EXPECT_EQ(false, val.get(&i64));
+ EXPECT_EQ(-6432, i64);
+ EXPECT_EQ(false, val.get(&u32));
+ EXPECT_EQ(3216u, u32);
+ EXPECT_EQ(false, val.get(&u64));
+ EXPECT_EQ(6432u, u64);
+ EXPECT_EQ(true, val.get(&fp));
+ EXPECT_EQ(15.25f, fp);
+}
+
+} // namespace android
diff --git a/media/libstagefright/codec2/tests/C2_test.cpp b/media/libstagefright/codec2/tests/C2_test.cpp
new file mode 100644
index 0000000..92a3d91
--- /dev/null
+++ b/media/libstagefright/codec2/tests/C2_test.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2_test"
+
+#include <gtest/gtest.h>
+
+#include <C2.h>
+
+namespace android {
+
+/* ======================================= STATIC TESTS ======================================= */
+
+template<int N>
+struct c2_const_checker
+{
+ inline constexpr static int num() { return N; }
+};
+
+constexpr auto min_i32_i32 = c2_min(int32_t(1), int32_t(2));
+static_assert(std::is_same<decltype(min_i32_i32), const int32_t>::value, "should be int32_t");
+constexpr auto min_i32_i64 = c2_min(int32_t(3), int64_t(2));
+static_assert(std::is_same<decltype(min_i32_i64), const int64_t>::value, "should be int64_t");
+constexpr auto min_i8_i32 = c2_min(int8_t(0xff), int32_t(0xffffffff));
+static_assert(std::is_same<decltype(min_i8_i32), const int32_t>::value, "should be int32_t");
+
+static_assert(c2_const_checker<min_i32_i32>::num() == 1, "should be 1");
+static_assert(c2_const_checker<min_i32_i64>::num() == 2, "should be 2");
+static_assert(c2_const_checker<min_i8_i32>::num() == 0xffffffff, "should be 0xffffffff");
+
+constexpr auto min_u32_u32 = c2_min(uint32_t(1), uint32_t(2));
+static_assert(std::is_same<decltype(min_u32_u32), const uint32_t>::value, "should be uint32_t");
+constexpr auto min_u32_u64 = c2_min(uint32_t(3), uint64_t(2));
+static_assert(std::is_same<decltype(min_u32_u64), const uint32_t>::value, "should be uint32_t");
+constexpr auto min_u32_u8 = c2_min(uint32_t(0xffffffff), uint8_t(0xff));
+static_assert(std::is_same<decltype(min_u32_u8), const uint8_t>::value, "should be uint8_t");
+
+static_assert(c2_const_checker<min_u32_u32>::num() == 1, "should be 1");
+static_assert(c2_const_checker<min_u32_u64>::num() == 2, "should be 2");
+static_assert(c2_const_checker<min_u32_u8>::num() == 0xff, "should be 0xff");
+
+constexpr auto max_i32_i32 = c2_max(int32_t(1), int32_t(2));
+static_assert(std::is_same<decltype(max_i32_i32), const int32_t>::value, "should be int32_t");
+constexpr auto max_i32_i64 = c2_max(int32_t(3), int64_t(2));
+static_assert(std::is_same<decltype(max_i32_i64), const int64_t>::value, "should be int64_t");
+constexpr auto max_i8_i32 = c2_max(int8_t(0xff), int32_t(0xffffffff));
+static_assert(std::is_same<decltype(max_i8_i32), const int32_t>::value, "should be int32_t");
+
+static_assert(c2_const_checker<max_i32_i32>::num() == 2, "should be 2");
+static_assert(c2_const_checker<max_i32_i64>::num() == 3, "should be 3");
+static_assert(c2_const_checker<max_i8_i32>::num() == 0xffffffff, "should be 0xffffffff");
+
+constexpr auto max_u32_u32 = c2_max(uint32_t(1), uint32_t(2));
+static_assert(std::is_same<decltype(max_u32_u32), const uint32_t>::value, "should be uint32_t");
+constexpr auto max_u32_u64 = c2_max(uint32_t(3), uint64_t(2));
+static_assert(std::is_same<decltype(max_u32_u64), const uint64_t>::value, "should be uint64_t");
+constexpr auto max_u32_u8 = c2_max(uint32_t(0x7fffffff), uint8_t(0xff));
+static_assert(std::is_same<decltype(max_u32_u8), const uint32_t>::value, "should be uint32_t");
+
+static_assert(c2_const_checker<max_u32_u32>::num() == 2, "should be 2");
+static_assert(c2_const_checker<max_u32_u64>::num() == 3, "should be 3");
+static_assert(c2_const_checker<max_u32_u8>::num() == 0x7fffffff, "should be 0x7fffffff");
+
+} // namespace android
diff --git a/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp b/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp
new file mode 100644
index 0000000..7a1374bb
--- /dev/null
+++ b/media/libstagefright/codec2/tests/vndk/C2UtilTest.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <util/_C2MacroUtils.h>
+
+/** \file
+ * Tests for vndk/util.
+ */
+
+/* --------------------------------------- _C2MacroUtils --------------------------------------- */
+
+static_assert(0 == _C2_ARGC(), "should be 0");
+static_assert(1 == _C2_ARGC(1), "should be 1");
+static_assert(2 == _C2_ARGC(1, 2), "should be 2");
+static_assert(64 == _C2_ARGC(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64), "should be 64");
+
+static_assert(0 == _C2_ARGC(,), "should be 0");
+static_assert(1 == _C2_ARGC(1,), "should be 1");
+static_assert(2 == _C2_ARGC(1, 2,), "should be 2");
+static_assert(64 == _C2_ARGC(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,), "should be 64");
+
diff --git a/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h b/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h
new file mode 100644
index 0000000..edae303
--- /dev/null
+++ b/media/libstagefright/codec2/vndk/include/util/C2ParamUtils.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2UTILS_PARAM_UTILS_H_
+#define C2UTILS_PARAM_UTILS_H_
+
+#include <C2Param.h>
+#include <util/_C2MacroUtils.h>
+
+#include <iostream>
+
+/** \file
+ * Utilities for parameter handling to be used by Codec2 implementations.
+ */
+
+namespace android {
+
+/// \cond INTERNAL
+
+/* ---------------------------- UTILITIES FOR ENUMERATION REFLECTION ---------------------------- */
+
+/**
+ * Utility class that allows ignoring enum value assignment (e.g. both '(_C2EnumConst)kValue = x'
+ * and '(_C2EnumConst)kValue' will eval to kValue.
+ */
+template<typename T>
+class _C2EnumConst {
+public:
+ // implicit conversion from T
+ inline _C2EnumConst(T value) : _mValue(value) {}
+ // implicit conversion to T
+ inline operator T() { return _mValue; }
+ // implicit conversion to C2Value::Primitive
+ inline operator C2Value::Primitive() { return (T)_mValue; }
+ // ignore assignment and return T here to avoid implicit conversion to T later
+ inline T &operator =(T value __unused) { return _mValue; }
+private:
+ T _mValue;
+};
+
+/// mapper to get name of enum
+/// \note this will contain any initialization, which we will remove when converting to lower-case
+#define _C2_GET_ENUM_NAME(x, y) #x
+/// mapper to get value of enum
+#define _C2_GET_ENUM_VALUE(x, type) (_C2EnumConst<type>)x
+
+/// \endcond
+
+#define DEFINE_C2_ENUM_VALUE_AUTO_HELPER(name, type, prefix, ...) \
+template<> C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const name &r __unused) { \
+ return C2ParamUtils::sanitizeEnumValues( \
+ std::vector<C2Value::Primitive> { _C2_MAP(_C2_GET_ENUM_VALUE, type, __VA_ARGS__) }, \
+ { _C2_MAP(_C2_GET_ENUM_NAME, type, __VA_ARGS__) }, \
+ prefix); \
+}
+
+#define DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(name, type, names, ...) \
+template<> C2FieldDescriptor::named_values_type C2FieldDescriptor::namedValuesFor(const name &r __unused) { \
+ return C2ParamUtils::customEnumValues( \
+ std::vector<std::pair<C2StringLiteral, name>> names); \
+}
+
+
+class C2ParamUtils {
+private:
+ static size_t countLeadingUnderscores(C2StringLiteral a) {
+ size_t i = 0;
+ while (a[i] == '_') {
+ ++i;
+ }
+ return i;
+ }
+
+ static size_t countMatching(C2StringLiteral a, const C2String &b) {
+ for (size_t i = 0; i < b.size(); ++i) {
+ if (!a[i] || a[i] != b[i]) {
+ return i;
+ }
+ }
+ return b.size();
+ }
+
+ // ABCDef => abc-def
+ // ABCD2ef => abcd2-ef // 0
+ // ABCD2Ef => ancd2-ef // -1
+ // AbcDef => abc-def // -1
+ // Abc2Def => abc-2def
+ // Abc2def => abc-2-def
+ // _Yo => _yo
+ // _yo => _yo
+ // C2_yo => c2-yo
+ // C2__yo => c2-yo
+
+ static C2String camelCaseToDashed(C2String name) {
+ enum {
+ kNone = '.',
+ kLower = 'a',
+ kUpper = 'A',
+ kDigit = '1',
+ kDash = '-',
+ kUnderscore = '_',
+ } type = kNone;
+ size_t word_start = 0;
+ for (size_t ix = 0; ix < name.size(); ++ix) {
+ /* std::cout << name.substr(0, word_start) << "|"
+ << name.substr(word_start, ix - word_start) << "["
+ << name.substr(ix, 1) << "]" << name.substr(ix + 1)
+ << ": " << (char)type << std::endl; */
+ if (isupper(name[ix])) {
+ if (type == kLower) {
+ name.insert(ix++, 1, '-');
+ word_start = ix;
+ }
+ name[ix] = tolower(name[ix]);
+ type = kUpper;
+ } else if (islower(name[ix])) {
+ if (type == kDigit && ix > 0) {
+ name.insert(ix++, 1, '-');
+ word_start = ix;
+ } else if (type == kUpper && ix > word_start + 1) {
+ name.insert(ix++ - 1, 1, '-');
+ word_start = ix - 1;
+ }
+ type = kLower;
+ } else if (isdigit(name[ix])) {
+ if (type == kLower) {
+ name.insert(ix++, 1, '-');
+ word_start = ix;
+ }
+ type = kDigit;
+ } else if (name[ix] == '_') {
+ if (type == kDash) {
+ name.erase(ix--, 1);
+ } else if (type != kNone && type != kUnderscore) {
+ name[ix] = '-';
+ type = kDash;
+ word_start = ix + 1;
+ } else {
+ type = kUnderscore;
+ word_start = ix + 1;
+ }
+ } else {
+ name.resize(ix);
+ }
+ }
+ // std::cout << "=> " << name << std::endl;
+ return name;
+ }
+
+ static std::vector<C2String> sanitizeEnumValueNames(
+ const std::vector<C2StringLiteral> names,
+ C2StringLiteral _prefix = NULL) {
+ std::vector<C2String> sanitizedNames;
+ C2String prefix;
+ size_t extraUnderscores = 0;
+ bool first = true;
+ if (_prefix) {
+ extraUnderscores = countLeadingUnderscores(_prefix);
+ prefix = _prefix + extraUnderscores;
+ first = false;
+ // std::cout << "prefix:" << prefix << ", underscores:" << extraUnderscores << std::endl;
+ }
+
+ // calculate prefix and minimum leading underscores
+ for (C2StringLiteral s : names) {
+ // std::cout << s << std::endl;
+ size_t underscores = countLeadingUnderscores(s);
+ if (first) {
+ extraUnderscores = underscores;
+ prefix = s + underscores;
+ first = false;
+ } else {
+ size_t matching = countMatching(
+ s + underscores,
+ prefix);
+ prefix.resize(matching);
+ extraUnderscores = std::min(underscores, extraUnderscores);
+ }
+ // std::cout << "prefix:" << prefix << ", underscores:" << extraUnderscores << std::endl;
+ if (prefix.size() == 0 && extraUnderscores == 0) {
+ break;
+ }
+ }
+
+ // we swallow the first underscore after upper case prefixes
+ bool upperCasePrefix = true;
+ for (size_t i = 0; i < prefix.size(); ++i) {
+ if (islower(prefix[i])) {
+ upperCasePrefix = false;
+ break;
+ }
+ }
+
+ for (C2StringLiteral s : names) {
+ size_t underscores = countLeadingUnderscores(s);
+ C2String sanitized = C2String(s, underscores - extraUnderscores);
+ sanitized.append(s + prefix.size() + underscores +
+ (upperCasePrefix && s[prefix.size() + underscores] == '_'));
+ sanitizedNames.push_back(camelCaseToDashed(sanitized));
+ }
+
+ for (C2String s : sanitizedNames) {
+ std::cout << s << std::endl;
+ }
+
+ return sanitizedNames;
+ }
+
+ friend class C2ParamTest_ParamUtilsTest_Test;
+
+public:
+ static std::vector<C2String> getEnumValuesFromString(C2StringLiteral value) {
+ std::vector<C2String> foundNames;
+ size_t pos = 0, len = strlen(value);
+ do {
+ size_t endPos = strcspn(value + pos, " ,=") + pos;
+ if (endPos > pos) {
+ foundNames.emplace_back(value + pos, endPos - pos);
+ }
+ if (value[endPos] && value[endPos] != ',') {
+ endPos += strcspn(value + endPos, ",");
+ }
+ pos = strspn(value + endPos, " ,") + endPos;
+ } while (pos < len);
+ return foundNames;
+ }
+
+ template<typename T>
+ static C2FieldDescriptor::named_values_type sanitizeEnumValues(
+ std::vector<T> values,
+ std::vector<C2StringLiteral> names,
+ C2StringLiteral prefix = NULL) {
+ C2FieldDescriptor::named_values_type namedValues;
+ std::vector<C2String> sanitizedNames = sanitizeEnumValueNames(names, prefix);
+ for (size_t i = 0; i < values.size() && i < sanitizedNames.size(); ++i) {
+ namedValues.emplace_back(sanitizedNames[i], values[i]);
+ }
+ return namedValues;
+ }
+
+ template<typename E>
+ static C2FieldDescriptor::named_values_type customEnumValues(
+ std::vector<std::pair<C2StringLiteral, E>> items) {
+ C2FieldDescriptor::named_values_type namedValues;
+ for (auto &item : items) {
+ namedValues.emplace_back(item.first, item.second);
+ }
+ return namedValues;
+ }
+};
+
+/* ---------------------------- UTILITIES FOR PARAMETER REFLECTION ---------------------------- */
+
+/* ======================== UTILITY TEMPLATES FOR PARAMETER REFLECTION ======================== */
+
+#if 1
+template<typename... Params>
+class C2_HIDE _C2Tuple { };
+
+C2_HIDE
+void addC2Params(std::list<const C2FieldDescriptor> &, _C2Tuple<> *) {
+}
+
+template<typename T, typename... Params>
+C2_HIDE
+void addC2Params(std::list<const C2FieldDescriptor> &fields, _C2Tuple<T, Params...> *)
+{
+ //C2Param::index_t index = T::baseIndex;
+ //(void)index;
+ fields.insert(fields.end(), T::fieldList);
+ addC2Params(fields, (_C2Tuple<Params...> *)nullptr);
+}
+
+template<typename... Params>
+C2_HIDE
+std::list<const C2FieldDescriptor> describeC2Params() {
+ std::list<const C2FieldDescriptor> fields;
+ addC2Params(fields, (_C2Tuple<Params...> *)nullptr);
+ return fields;
+}
+
+#endif
+
+/* ---------------------------- UTILITIES FOR ENUMERATION REFLECTION ---------------------------- */
+
+} // namespace android
+
+#endif // C2UTILS_PARAM_UTILS_H_
+
diff --git a/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h b/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h
new file mode 100644
index 0000000..04e9ba5
--- /dev/null
+++ b/media/libstagefright/codec2/vndk/include/util/_C2MacroUtils.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef C2UTILS_MACRO_UTILS_H_
+#define C2UTILS_MACRO_UTILS_H_
+
+/** \file
+ * Macro utilities for the utils library used by Codec2 implementations.
+ */
+
+/// \if 0
+
+/* --------------------------------- VARIABLE ARGUMENT COUNTING --------------------------------- */
+
+// remove empty arguments - _C2_ARG() expands to '', while _C2_ARG(x) expands to ', x'
+// _C2_ARGn(...) does the same for n arguments
+#define _C2_ARG(...) , ##__VA_ARGS__
+#define _C2_ARG2(_1, _2) _C2_ARG(_1) _C2_ARG(_2)
+#define _C2_ARG4(_1, _2, _3, _4) _C2_ARG2(_1, _2) _C2_ARG2(_3, _4)
+#define _C2_ARG8(_1, _2, _3, _4, _5, _6, _7, _8) _C2_ARG4(_1, _2, _3, _4) _C2_ARG4(_5, _6, _7, _8)
+#define _C2_ARG16(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \
+ _C2_ARG8(_1, _2, _3, _4, _5, _6, _7, _8) _C2_ARG8(_9, _10, _11, _12, _13, _14, _15, _16)
+
+// return the 65th argument
+#define _C2_ARGC_3(_, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \
+ _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, \
+ _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, \
+ _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, ...) _64
+
+/// \endif
+
+/**
+ * Returns the number of arguments.
+ */
+// We do this by prepending 1 and appending 65 designed values such that the 65th element
+// will be the number of arguments.
+#define _C2_ARGC(...) _C2_ARGC_1(0, ##__VA_ARGS__, \
+ 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, \
+ 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, \
+ 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+/// \if 0
+
+// step 1. remove empty arguments - this is needed to allow trailing comma in enum definitions
+// (NOTE: we don't know which argument will have this trailing comma so we have to try all)
+#define _C2_ARGC_1(_, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, \
+ _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, \
+ _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, \
+ _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, ...) \
+ _C2_ARGC_2(_ _C2_ARG(_0) \
+ _C2_ARG16(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \
+ _C2_ARG16(_17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32) \
+ _C2_ARG16(_33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48) \
+ _C2_ARG16(_49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64), \
+ ##__VA_ARGS__)
+
+// step 2. this is needed as removed arguments cannot be passed directly as empty into a macro
+#define _C2_ARGC_2(...) _C2_ARGC_3(__VA_ARGS__)
+
+/// \endif
+
+/* -------------------------------- VARIABLE ARGUMENT CONVERSION -------------------------------- */
+
+/// \if 0
+
+// macros that convert _1, _2, _3, ... to fn(_1, arg), fn(_2, arg), fn(_3, arg), ...
+#define _C2_MAP_64(fn, arg, head, ...) fn(head, arg), _C2_MAP_63(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_63(fn, arg, head, ...) fn(head, arg), _C2_MAP_62(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_62(fn, arg, head, ...) fn(head, arg), _C2_MAP_61(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_61(fn, arg, head, ...) fn(head, arg), _C2_MAP_60(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_60(fn, arg, head, ...) fn(head, arg), _C2_MAP_59(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_59(fn, arg, head, ...) fn(head, arg), _C2_MAP_58(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_58(fn, arg, head, ...) fn(head, arg), _C2_MAP_57(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_57(fn, arg, head, ...) fn(head, arg), _C2_MAP_56(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_56(fn, arg, head, ...) fn(head, arg), _C2_MAP_55(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_55(fn, arg, head, ...) fn(head, arg), _C2_MAP_54(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_54(fn, arg, head, ...) fn(head, arg), _C2_MAP_53(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_53(fn, arg, head, ...) fn(head, arg), _C2_MAP_52(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_52(fn, arg, head, ...) fn(head, arg), _C2_MAP_51(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_51(fn, arg, head, ...) fn(head, arg), _C2_MAP_50(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_50(fn, arg, head, ...) fn(head, arg), _C2_MAP_49(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_49(fn, arg, head, ...) fn(head, arg), _C2_MAP_48(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_48(fn, arg, head, ...) fn(head, arg), _C2_MAP_47(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_47(fn, arg, head, ...) fn(head, arg), _C2_MAP_46(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_46(fn, arg, head, ...) fn(head, arg), _C2_MAP_45(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_45(fn, arg, head, ...) fn(head, arg), _C2_MAP_44(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_44(fn, arg, head, ...) fn(head, arg), _C2_MAP_43(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_43(fn, arg, head, ...) fn(head, arg), _C2_MAP_42(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_42(fn, arg, head, ...) fn(head, arg), _C2_MAP_41(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_41(fn, arg, head, ...) fn(head, arg), _C2_MAP_40(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_40(fn, arg, head, ...) fn(head, arg), _C2_MAP_39(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_39(fn, arg, head, ...) fn(head, arg), _C2_MAP_38(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_38(fn, arg, head, ...) fn(head, arg), _C2_MAP_37(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_37(fn, arg, head, ...) fn(head, arg), _C2_MAP_36(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_36(fn, arg, head, ...) fn(head, arg), _C2_MAP_35(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_35(fn, arg, head, ...) fn(head, arg), _C2_MAP_34(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_34(fn, arg, head, ...) fn(head, arg), _C2_MAP_33(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_33(fn, arg, head, ...) fn(head, arg), _C2_MAP_32(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_32(fn, arg, head, ...) fn(head, arg), _C2_MAP_31(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_31(fn, arg, head, ...) fn(head, arg), _C2_MAP_30(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_30(fn, arg, head, ...) fn(head, arg), _C2_MAP_29(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_29(fn, arg, head, ...) fn(head, arg), _C2_MAP_28(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_28(fn, arg, head, ...) fn(head, arg), _C2_MAP_27(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_27(fn, arg, head, ...) fn(head, arg), _C2_MAP_26(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_26(fn, arg, head, ...) fn(head, arg), _C2_MAP_25(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_25(fn, arg, head, ...) fn(head, arg), _C2_MAP_24(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_24(fn, arg, head, ...) fn(head, arg), _C2_MAP_23(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_23(fn, arg, head, ...) fn(head, arg), _C2_MAP_22(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_22(fn, arg, head, ...) fn(head, arg), _C2_MAP_21(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_21(fn, arg, head, ...) fn(head, arg), _C2_MAP_20(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_20(fn, arg, head, ...) fn(head, arg), _C2_MAP_19(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_19(fn, arg, head, ...) fn(head, arg), _C2_MAP_18(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_18(fn, arg, head, ...) fn(head, arg), _C2_MAP_17(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_17(fn, arg, head, ...) fn(head, arg), _C2_MAP_16(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_16(fn, arg, head, ...) fn(head, arg), _C2_MAP_15(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_15(fn, arg, head, ...) fn(head, arg), _C2_MAP_14(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_14(fn, arg, head, ...) fn(head, arg), _C2_MAP_13(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_13(fn, arg, head, ...) fn(head, arg), _C2_MAP_12(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_12(fn, arg, head, ...) fn(head, arg), _C2_MAP_11(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_11(fn, arg, head, ...) fn(head, arg), _C2_MAP_10(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_10(fn, arg, head, ...) fn(head, arg), _C2_MAP_9(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_9(fn, arg, head, ...) fn(head, arg), _C2_MAP_8(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_8(fn, arg, head, ...) fn(head, arg), _C2_MAP_7(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_7(fn, arg, head, ...) fn(head, arg), _C2_MAP_6(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_6(fn, arg, head, ...) fn(head, arg), _C2_MAP_5(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_5(fn, arg, head, ...) fn(head, arg), _C2_MAP_4(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_4(fn, arg, head, ...) fn(head, arg), _C2_MAP_3(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_3(fn, arg, head, ...) fn(head, arg), _C2_MAP_2(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_2(fn, arg, head, ...) fn(head, arg), _C2_MAP_1(fn, arg, ##__VA_ARGS__)
+#define _C2_MAP_1(fn, arg, head, ...) fn(head, arg)
+
+/// \endif
+
+/**
+ * Maps each argument using another macro x -> fn(x, arg)
+ */
+// use wrapper to call the proper mapper based on the number of arguments
+#define _C2_MAP(fn, arg, ...) _C2_MAP__(_C2_ARGC(__VA_ARGS__), fn, arg, ##__VA_ARGS__)
+
+/// \if 0
+
+// evaluate _n so it becomes a number
+#define _C2_MAP__(_n, fn, arg, ...) _C2_MAP_(_n, fn, arg, __VA_ARGS__)
+// call the proper mapper
+#define _C2_MAP_(_n, fn, arg, ...) _C2_MAP_##_n (fn, arg, __VA_ARGS__)
+
+/// \endif
+
+#endif // C2UTILS_MACRO_UTILS_H_
diff --git a/media/libstagefright/codecs/aacdec/Android.bp b/media/libstagefright/codecs/aacdec/Android.bp
index 9ee9b9d..6e04c1e 100644
--- a/media/libstagefright/codecs/aacdec/Android.bp
+++ b/media/libstagefright/codecs/aacdec/Android.bp
@@ -18,6 +18,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
static_libs: ["libFraunhoferAAC"],
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 1c5e3c6..9fbdb72 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -62,6 +62,7 @@
OMX_AUDIO_AACObjectHE_PS,
OMX_AUDIO_AACObjectLD,
OMX_AUDIO_AACObjectELD,
+ OMX_AUDIO_AACObjectER_Scalable,
};
SoftAAC2::SoftAAC2(
diff --git a/media/libstagefright/codecs/aacenc/Android.bp b/media/libstagefright/codecs/aacenc/Android.bp
index 0297630..1a7ffca 100644
--- a/media/libstagefright/codecs/aacenc/Android.bp
+++ b/media/libstagefright/codecs/aacenc/Android.bp
@@ -126,6 +126,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
static_libs: ["libFraunhoferAAC"],
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
index e16ea33..0704294 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
@@ -266,10 +266,6 @@
return OMX_ErrorUndefined;
}
- if (formatParams->nIndex > 0) {
- return OMX_ErrorNoMore;
- }
-
if ((formatParams->nPortIndex == 0
&& formatParams->eEncoding != OMX_AUDIO_CodingPCM)
|| (formatParams->nPortIndex == 1
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
index 5f516cb..96e668e 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -16,6 +16,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "SoftAACEncoder2"
+#include <log/log.h>
#include <utils/Log.h>
#include "SoftAACEncoder2.h"
@@ -61,6 +62,7 @@
mSentCodecSpecificData(false),
mInputSize(0),
mInputFrame(NULL),
+ mAllocatedFrameSize(0),
mInputTimeUs(-1ll),
mSawInputEOS(false),
mSignalledError(false) {
@@ -303,10 +305,6 @@
return OMX_ErrorUndefined;
}
- if (formatParams->nIndex > 0) {
- return OMX_ErrorNoMore;
- }
-
if ((formatParams->nPortIndex == 0
&& formatParams->eEncoding != OMX_AUDIO_CodingPCM)
|| (formatParams->nPortIndex == 1
@@ -510,6 +508,15 @@
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ if (outHeader->nOffset + encInfo.confSize > outHeader->nAllocLen) {
+ ALOGE("b/34617444");
+ android_errorWriteLog(0x534e4554,"34617444");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
outHeader->nFilledLen = encInfo.confSize;
outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
@@ -556,6 +563,15 @@
if (mInputFrame == NULL) {
mInputFrame = new int16_t[numBytesPerInputFrame / sizeof(int16_t)];
+ mAllocatedFrameSize = numBytesPerInputFrame;
+ } else if (mAllocatedFrameSize != numBytesPerInputFrame) {
+ ALOGE("b/34621073: changed size from %d to %d",
+ (int)mAllocatedFrameSize, (int)numBytesPerInputFrame);
+ android_errorWriteLog(0x534e4554,"34621073");
+ delete mInputFrame;
+ mInputFrame = new int16_t[numBytesPerInputFrame / sizeof(int16_t)];
+ mAllocatedFrameSize = numBytesPerInputFrame;
+
}
if (mInputSize == 0) {
@@ -706,6 +722,7 @@
delete[] mInputFrame;
mInputFrame = NULL;
mInputSize = 0;
+ mAllocatedFrameSize = 0;
mSentCodecSpecificData = false;
mInputTimeUs = -1ll;
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
index f1b81e1..123fd25 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.h
@@ -62,6 +62,7 @@
bool mSentCodecSpecificData;
size_t mInputSize;
int16_t *mInputFrame;
+ size_t mAllocatedFrameSize;
int64_t mInputTimeUs;
bool mSawInputEOS;
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index edf648d..7553153 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -309,7 +309,7 @@
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_EOS) && inHeader->nFilledLen == 0) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
@@ -471,7 +471,7 @@
mNumSamplesOutput += kNumSamplesPerFrameWB;
}
- if (inHeader->nFilledLen == 0) {
+ if (inHeader->nFilledLen == 0 && (inHeader->nFlags & OMX_BUFFERFLAG_EOS) == 0) {
inInfo->mOwnedByUs = false;
inQueue.erase(inQueue.begin());
inInfo = NULL;
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
index aaa6731..f97c44f 100644
--- a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
@@ -226,10 +226,6 @@
return OMX_ErrorUndefined;
}
- if (formatParams->nIndex > 0) {
- return OMX_ErrorNoMore;
- }
-
if ((formatParams->nPortIndex == 0
&& formatParams->eEncoding != OMX_AUDIO_CodingPCM)
|| (formatParams->nPortIndex == 1
diff --git a/media/libstagefright/codecs/amrwbenc/Android.bp b/media/libstagefright/codecs/amrwbenc/Android.bp
index a3068b6..5c5a122 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/Android.bp
@@ -131,6 +131,13 @@
],
cflags: ["-Werror"],
+ sanitize: {
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+
}
//###############################################################################
@@ -151,6 +158,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
static_libs: ["libstagefright_amrwbenc"],
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.bp b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.bp
index 9091296..d52fed3 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.bp
@@ -18,4 +18,11 @@
"libstagefright_amrwbenc",
"libstagefright_enc_common",
],
+
+ sanitize: {
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
}
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
index c7973d6..a644b66 100644
--- a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
@@ -264,10 +264,6 @@
return OMX_ErrorUndefined;
}
- if (formatParams->nIndex > 0) {
- return OMX_ErrorNoMore;
- }
-
if ((formatParams->nPortIndex == 0
&& formatParams->eEncoding != OMX_AUDIO_CodingPCM)
|| (formatParams->nPortIndex == 1
diff --git a/media/libstagefright/codecs/avcdec/Android.bp b/media/libstagefright/codecs/avcdec/Android.bp
index d5c2469..6b996a7 100644
--- a/media/libstagefright/codecs/avcdec/Android.bp
+++ b/media/libstagefright/codecs/avcdec/Android.bp
@@ -23,6 +23,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
ldflags: ["-Wl,-Bsymbolic"],
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index 8694c73..248ab6d 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -27,11 +27,10 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
#include <OMX_VideoExt.h>
+#include <inttypes.h>
namespace android {
-#define PRINT_TIME ALOGV
-
#define componentName "video_decoder.avc"
#define codingType OMX_VIDEO_CodingAVC
#define CODEC_MIME_TYPE MEDIA_MIMETYPE_VIDEO_AVC
@@ -49,58 +48,10 @@
(IVD_CONTROL_API_COMMAND_TYPE_T)IH264D_CMD_CTL_SET_NUM_CORES
static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel1 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel1b },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel11 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel12 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel13 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel2 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel21 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel22 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel3 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel31 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel32 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel4 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel41 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel42 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel5 },
- { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel51 },
{ OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel52 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel1 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel1b },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel11 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel12 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel13 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel2 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel21 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel22 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel3 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel31 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel32 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel4 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel41 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel42 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel5 },
- { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel51 },
{ OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel52 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel1 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel1b },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel11 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel12 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel13 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel2 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel21 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel22 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel3 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel31 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel32 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel4 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel41 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel42 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel5 },
- { OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel51 },
{ OMX_VIDEO_AVCProfileHigh, OMX_VIDEO_AVCLevel52 },
};
@@ -126,7 +77,7 @@
1 /* numMinInputBuffers */, kNumBuffers, INPUT_BUF_SIZE,
1 /* numMinOutputBuffers */, kNumBuffers, CODEC_MIME_TYPE);
- GETTIME(&mTimeStart, NULL);
+ mTimeStart = mTimeEnd = systemTime();
// If input dump is enabled, then open create an empty file
GENERATE_FILE_NAMES();
@@ -221,8 +172,7 @@
memset(mTimeStampsValid, 0, sizeof(mTimeStampsValid));
/* Initialize both start and end times */
- gettimeofday(&mTimeStart, NULL);
- gettimeofday(&mTimeEnd, NULL);
+ mTimeStart = mTimeEnd = systemTime();
return OK;
}
@@ -606,7 +556,7 @@
{
ivd_video_decode_ip_t s_dec_ip;
ivd_video_decode_op_t s_dec_op;
- WORD32 timeDelay, timeTaken;
+ nsecs_t timeDelay, timeTaken;
size_t sizeY, sizeUV;
if (!setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
@@ -618,10 +568,10 @@
// If input dump is enabled, then write to file
DUMP_TO_FILE(mInFile, s_dec_ip.pv_stream_buffer, s_dec_ip.u4_num_Bytes, mInputOffset);
- GETTIME(&mTimeStart, NULL);
+ mTimeStart = systemTime();
/* Compute time elapsed between end of previous decode()
* to start of current decode() */
- TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
+ timeDelay = mTimeStart - mTimeEnd;
IV_API_CALL_STATUS_T status;
status = ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
@@ -649,11 +599,12 @@
getVUIParams();
- GETTIME(&mTimeEnd, NULL);
+ mTimeEnd = systemTime();
/* Compute time taken for decode() */
- TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
+ timeTaken = mTimeEnd - mTimeStart;
- PRINT_TIME("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
+ ALOGV("timeTaken=%6lldus delay=%6lldus numBytes=%6d",
+ (long long) (timeTaken / 1000ll), (long long) (timeDelay / 1000ll),
s_dec_op.u4_num_bytes_consumed);
if (s_dec_op.u4_frame_decoded_flag && !mFlushNeeded) {
mFlushNeeded = true;
@@ -693,6 +644,7 @@
handlePortSettingsChange(&portWillReset, width, height);
if (portWillReset) {
resetDecoder();
+ resetPlugin();
return;
}
} else if (mUpdateColorAspects) {
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.h b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
index 2a71188..18b7556 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.h
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
@@ -41,14 +41,6 @@
/** Used to remove warnings about unused parameters */
#define UNUSED(x) ((void)(x))
-/** Get time */
-#define GETTIME(a, b) gettimeofday(a, b);
-
-/** Compute difference between start and end */
-#define TIME_DIFF(start, end, diff) \
- diff = (((end).tv_sec - (start).tv_sec) * 1000000) + \
- ((end).tv_usec - (start).tv_usec);
-
struct SoftAVC : public SoftVideoDecoderOMXComponent {
SoftAVC(const char *name, const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData, OMX_COMPONENTTYPE **component);
@@ -70,8 +62,8 @@
size_t mNumCores; // Number of cores to be uesd by the codec
- struct timeval mTimeStart; // Time at the start of decode()
- struct timeval mTimeEnd; // Time at the end of decode()
+ nsecs_t mTimeStart; // Time at the start of decode()
+ nsecs_t mTimeEnd; // Time at the end of decode()
// Internal buffer to be used to flush out the buffers from decoder
uint8_t *mFlushOutBuffer;
@@ -129,10 +121,9 @@
#define INPUT_DUMP_EXT "h264"
#define GENERATE_FILE_NAMES() { \
- GETTIME(&mTimeStart, NULL); \
strcpy(mInFile, ""); \
- sprintf(mInFile, "%s_%ld.%ld.%s", INPUT_DUMP_PATH, \
- mTimeStart.tv_sec, mTimeStart.tv_usec, \
+ sprintf(mInFile, "%s_%lld.%s", INPUT_DUMP_PATH, \
+ (long long) mTimeStart, \
INPUT_DUMP_EXT); \
}
diff --git a/media/libstagefright/codecs/avcenc/Android.bp b/media/libstagefright/codecs/avcenc/Android.bp
index c766a34..49021a9 100644
--- a/media/libstagefright/codecs/avcenc/Android.bp
+++ b/media/libstagefright/codecs/avcenc/Android.bp
@@ -23,6 +23,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
ldflags: ["-Wl,-Bsymbolic"],
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 9e7a3be..b1af17b 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -30,7 +30,6 @@
#include <media/stagefright/Utils.h>
#include <OMX_IndexExt.h>
#include <OMX_VideoExt.h>
-#include <ui/Rect.h>
#include "ih264_typedefs.h"
#include "iv2.h"
@@ -614,6 +613,7 @@
IV_STATUS_T status;
WORD32 level;
uint32_t displaySizeY;
+
CHECK(!mStarted);
OMX_ERRORTYPE errType = OMX_ErrorNone;
@@ -917,6 +917,9 @@
}
}
+ // clear other pointers into the space being free()d
+ mCodecCtx = NULL;
+
mStarted = false;
return OMX_ErrorNone;
@@ -1509,6 +1512,14 @@
return;
}
+void SoftAVC::onReset() {
+ SoftVideoEncoderOMXComponent::onReset();
+
+ if (releaseEncoder() != OMX_ErrorNone) {
+ ALOGW("releaseEncoder failed");
+ }
+}
+
} // namespace android
android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index 4d30ba0..818e4a1 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -136,6 +136,8 @@
protected:
virtual ~SoftAVC();
+ virtual void onReset();
+
private:
enum {
kNumBuffers = 2,
diff --git a/media/libstagefright/codecs/flac/enc/Android.bp b/media/libstagefright/codecs/flac/enc/Android.bp
index 5416949..d1413f6 100644
--- a/media/libstagefright/codecs/flac/enc/Android.bp
+++ b/media/libstagefright/codecs/flac/enc/Android.bp
@@ -15,6 +15,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: [
diff --git a/media/libstagefright/codecs/g711/dec/Android.bp b/media/libstagefright/codecs/g711/dec/Android.bp
index 581ed4f..b78b689 100644
--- a/media/libstagefright/codecs/g711/dec/Android.bp
+++ b/media/libstagefright/codecs/g711/dec/Android.bp
@@ -22,5 +22,9 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.cpp b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
index 9f7b590..f7c0429 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.cpp
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
@@ -219,7 +219,7 @@
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_EOS) && inHeader->nFilledLen == 0) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
@@ -266,11 +266,15 @@
outHeader->nFilledLen = inHeader->nFilledLen * sizeof(int16_t);
outHeader->nFlags = 0;
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ inHeader->nFilledLen = 0;
+ } else {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
diff --git a/media/libstagefright/codecs/gsm/dec/Android.bp b/media/libstagefright/codecs/gsm/dec/Android.bp
index 03b4f88..8e86ad6 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.bp
+++ b/media/libstagefright/codecs/gsm/dec/Android.bp
@@ -16,6 +16,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: [
diff --git a/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
index 04d5a33..11999b4 100644
--- a/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
+++ b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
@@ -202,7 +202,7 @@
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_EOS) && inHeader->nFilledLen == 0) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
@@ -246,11 +246,15 @@
outHeader->nFilledLen = n * sizeof(int16_t);
outHeader->nFlags = 0;
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ inHeader->nFilledLen = 0;
+ } else {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
diff --git a/media/libstagefright/codecs/hevcdec/Android.bp b/media/libstagefright/codecs/hevcdec/Android.bp
index 6e4df50..cd75c97 100644
--- a/media/libstagefright/codecs/hevcdec/Android.bp
+++ b/media/libstagefright/codecs/hevcdec/Android.bp
@@ -15,6 +15,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: [
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index 5c70387..2745087 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -48,14 +48,6 @@
(IVD_CONTROL_API_COMMAND_TYPE_T)IHEVCD_CXA_CMD_CTL_SET_NUM_CORES
static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel1 },
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel2 },
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel21 },
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel3 },
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel31 },
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel4 },
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel41 },
- { OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel5 },
{ OMX_VIDEO_HEVCProfileMain, OMX_VIDEO_HEVCMainTierLevel51 },
};
@@ -643,6 +635,7 @@
if (portWillReset) {
resetDecoder();
+ resetPlugin();
return;
}
} else if (mUpdateColorAspects) {
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.bp b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
index af75420..04ea075 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.bp
@@ -55,6 +55,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
@@ -94,5 +98,9 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index 1dd631a..411a251 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -255,13 +255,28 @@
mSignalledError = true;
return;
}
+
+ // Need to check if header contains new info, e.g., width/height, etc.
+ VopHeaderInfo header_info;
+ uint8_t *bitstreamTmp = bitstream;
+ if (PVDecodeVopHeader(
+ mHandle, &bitstreamTmp, ×tamp, &tmp,
+ &header_info, &useExtTimestamp,
+ outHeader->pBuffer) != PV_TRUE) {
+ ALOGE("failed to decode vop header.");
+
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+ if (handlePortSettingsChange()) {
+ return;
+ }
+
// The PV decoder is lying to us, sometimes it'll claim to only have
// consumed a subset of the buffer when it clearly consumed all of it.
// ignore whatever it says...
- if (PVDecodeVideoFrame(
- mHandle, &bitstream, ×tamp, &tmp,
- &useExtTimestamp,
- outHeader->pBuffer) != PV_TRUE) {
+ if (PVDecodeVopBody(mHandle, &tmp) != PV_TRUE) {
ALOGE("failed to decode video frame.");
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/mb_motion_comp.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/mb_motion_comp.cpp
index fbc7be1..877723d 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/mb_motion_comp.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/mb_motion_comp.cpp
@@ -15,6 +15,10 @@
* and limitations under the License.
* -------------------------------------------------------------------
*/
+
+#define LOG_TAG "m4v_h263"
+#include <log/log.h>
+
/*
------------------------------------------------------------------------------
INPUT AND OUTPUT DEFINITIONS
@@ -236,6 +240,11 @@
/* Pointer to previous luminance frame */
c_prev = prev->yChan;
+ if (!c_prev) {
+ ALOGE("b/35269635");
+ android_errorWriteLog(0x534e4554, "35269635");
+ return;
+ }
pred_block = video->mblock->pred_block;
@@ -574,7 +583,14 @@
/* zero motion compensation for previous frame */
/*mby*width + mbx;*/
- c_prev = prev->yChan + offset;
+ c_prev = prev->yChan;
+ if (!c_prev) {
+ ALOGE("b/35269635");
+ android_errorWriteLog(0x534e4554, "35269635");
+ return;
+ }
+ c_prev += offset;
+
/*by*width_uv + bx;*/
cu_prev = prev->uChan + (offset >> 2) + (xpos >> 2);
/*by*width_uv + bx;*/
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
index c1720c6..8d5d071 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/pvdec_api.cpp
@@ -15,6 +15,8 @@
* and limitations under the License.
* -------------------------------------------------------------------
*/
+#define LOG_TAG "pvdec_api"
+#include <log/log.h>
#include "mp4dec_lib.h"
#include "vlc_decode.h"
#include "bitstream.h"
@@ -1335,6 +1337,11 @@
}
}
+ if (!video->prevVop->yChan) {
+ ALOGE("b/35269635");
+ android_errorWriteLog(0x534e4554, "35269635");
+ return PV_FALSE;
+ }
oscl_memcpy(currVop->yChan, video->prevVop->yChan, (decCtrl->size*3) / 2);
video->prevVop = prevVop;
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
index 60c79a6..f18f789 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
@@ -15,6 +15,8 @@
* and limitations under the License.
* -------------------------------------------------------------------
*/
+#include "log/log.h"
+
#include "mp4dec_lib.h"
#include "bitstream.h"
#include "vlc_decode.h"
@@ -1336,8 +1338,7 @@
}
tmpvar = BitstreamReadBits16(stream, 9);
- video->displayWidth = (tmpvar + 1) << 2;
- video->width = (video->displayWidth + 15) & -16;
+ int tmpDisplayWidth = (tmpvar + 1) << 2;
/* marker bit */
if (!BitstreamRead1Bits(stream))
{
@@ -1350,14 +1351,21 @@
status = PV_FAIL;
goto return_point;
}
- video->displayHeight = tmpvar << 2;
- video->height = (video->displayHeight + 15) & -16;
+ int tmpDisplayHeight = tmpvar << 2;
+ int tmpHeight = (tmpDisplayHeight + 15) & -16;
+ int tmpWidth = (tmpDisplayWidth + 15) & -16;
- if (video->height * video->width > video->size)
+ if (tmpHeight * tmpWidth > video->size)
{
+ // This is just possibly "b/37079296".
+ ALOGE("b/37079296");
status = PV_FAIL;
goto return_point;
}
+ video->displayWidth = tmpDisplayWidth;
+ video->width = tmpWidth;
+ video->displayHeight = tmpDisplayHeight;
+ video->height = tmpHeight;
video->nTotalMB = video->width / MB_SIZE * video->height / MB_SIZE;
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.bp b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
index 4a127f3..da5b162 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
@@ -41,6 +41,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
@@ -81,6 +85,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
@@ -104,6 +112,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
static_libs: ["libstagefright_m4vh263enc"],
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index d5a26d3..6d4cb69 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -30,8 +30,6 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
-#include <ui/Rect.h>
-#include <ui/GraphicBufferMapper.h>
#include "SoftMPEG4Encoder.h"
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp b/media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp
index c2b7c8d..7ab8f45 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/mp4enc_api.cpp
@@ -773,7 +773,7 @@
|| (size_t)(size + (size >> 1)) > SIZE_MAX / sizeof(PIXEL)) {
goto CLEAN_UP;
}
- video->currVop->yChan = (PIXEL *)M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for currVop Y */
+ video->currVop->allChan = video->currVop->yChan = (PIXEL *)M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for currVop Y */
if (video->currVop->yChan == NULL) goto CLEAN_UP;
video->currVop->uChan = video->currVop->yChan + size;/* Memory for currVop U */
video->currVop->vChan = video->currVop->uChan + (size >> 2);/* Memory for currVop V */
@@ -791,7 +791,7 @@
video->prevBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Previous Base Vop */
if (video->prevBaseVop == NULL) goto CLEAN_UP;
- video->prevBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for prevBaseVop Y */
+ video->prevBaseVop->allChan = video->prevBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for prevBaseVop Y */
if (video->prevBaseVop->yChan == NULL) goto CLEAN_UP;
video->prevBaseVop->uChan = video->prevBaseVop->yChan + size; /* Memory for prevBaseVop U */
video->prevBaseVop->vChan = video->prevBaseVop->uChan + (size >> 2); /* Memory for prevBaseVop V */
@@ -808,7 +808,7 @@
{
video->nextBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Next Base Vop */
if (video->nextBaseVop == NULL) goto CLEAN_UP;
- video->nextBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for nextBaseVop Y */
+ video->nextBaseVop->allChan = video->nextBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for nextBaseVop Y */
if (video->nextBaseVop->yChan == NULL) goto CLEAN_UP;
video->nextBaseVop->uChan = video->nextBaseVop->yChan + size; /* Memory for nextBaseVop U */
video->nextBaseVop->vChan = video->nextBaseVop->uChan + (size >> 2); /* Memory for nextBaseVop V */
@@ -825,7 +825,7 @@
{
video->prevEnhanceVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Previous Enhancement Vop */
if (video->prevEnhanceVop == NULL) goto CLEAN_UP;
- video->prevEnhanceVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for Previous Ehancement Y */
+ video->prevEnhanceVop->allChan = video->prevEnhanceVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for Previous Ehancement Y */
if (video->prevEnhanceVop->yChan == NULL) goto CLEAN_UP;
video->prevEnhanceVop->uChan = video->prevEnhanceVop->yChan + size; /* Memory for Previous Enhancement U */
video->prevEnhanceVop->vChan = video->prevEnhanceVop->uChan + (size >> 2); /* Memory for Previous Enhancement V */
@@ -1196,39 +1196,35 @@
if (video->currVop)
{
- if (video->currVop->yChan)
+ if (video->currVop->allChan)
{
- video->currVop->yChan -= offset;
- M4VENC_FREE(video->currVop->yChan);
+ M4VENC_FREE(video->currVop->allChan);
}
M4VENC_FREE(video->currVop);
}
if (video->nextBaseVop)
{
- if (video->nextBaseVop->yChan)
+ if (video->nextBaseVop->allChan)
{
- video->nextBaseVop->yChan -= offset;
- M4VENC_FREE(video->nextBaseVop->yChan);
+ M4VENC_FREE(video->nextBaseVop->allChan);
}
M4VENC_FREE(video->nextBaseVop);
}
if (video->prevBaseVop)
{
- if (video->prevBaseVop->yChan)
+ if (video->prevBaseVop->allChan)
{
- video->prevBaseVop->yChan -= offset;
- M4VENC_FREE(video->prevBaseVop->yChan);
+ M4VENC_FREE(video->prevBaseVop->allChan);
}
M4VENC_FREE(video->prevBaseVop);
}
if (video->prevEnhanceVop)
{
- if (video->prevEnhanceVop->yChan)
+ if (video->prevEnhanceVop->allChan)
{
- video->prevEnhanceVop->yChan -= offset;
- M4VENC_FREE(video->prevEnhanceVop->yChan);
+ M4VENC_FREE(video->prevEnhanceVop->allChan);
}
M4VENC_FREE(video->prevEnhanceVop);
}
diff --git a/media/libstagefright/codecs/m4v_h263/enc/src/mp4lib_int.h b/media/libstagefright/codecs/m4v_h263/enc/src/mp4lib_int.h
index 3bc9421..b05099c 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/src/mp4lib_int.h
+++ b/media/libstagefright/codecs/m4v_h263/enc/src/mp4lib_int.h
@@ -39,6 +39,7 @@
typedef struct tagVOP
{
+ PIXEL *allChan; /* [yuv]Chan point into this buffer */
PIXEL *yChan; /* The Y component */
PIXEL *uChan; /* The U component */
PIXEL *vChan; /* The V component */
diff --git a/media/libstagefright/codecs/mp3dec/Android.bp b/media/libstagefright/codecs/mp3dec/Android.bp
index 8b2a761..0d0a2c6 100644
--- a/media/libstagefright/codecs/mp3dec/Android.bp
+++ b/media/libstagefright/codecs/mp3dec/Android.bp
@@ -57,6 +57,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
include_dirs: ["frameworks/av/media/libstagefright/include"],
@@ -91,6 +95,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: [
@@ -123,6 +131,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
static_libs: [
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.bp b/media/libstagefright/codecs/mpeg2dec/Android.bp
index 666e08f..0144581 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.bp
+++ b/media/libstagefright/codecs/mpeg2dec/Android.bp
@@ -25,5 +25,9 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
index 5ed037a..6e70ded 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
@@ -47,14 +47,8 @@
(IVD_CONTROL_API_COMMAND_TYPE_T)IMPEG2D_CMD_CTL_SET_NUM_CORES
static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_MPEG2ProfileSimple, OMX_VIDEO_MPEG2LevelLL },
- { OMX_VIDEO_MPEG2ProfileSimple, OMX_VIDEO_MPEG2LevelML },
- { OMX_VIDEO_MPEG2ProfileSimple, OMX_VIDEO_MPEG2LevelH14 },
{ OMX_VIDEO_MPEG2ProfileSimple, OMX_VIDEO_MPEG2LevelHL },
- { OMX_VIDEO_MPEG2ProfileMain , OMX_VIDEO_MPEG2LevelLL },
- { OMX_VIDEO_MPEG2ProfileMain , OMX_VIDEO_MPEG2LevelML },
- { OMX_VIDEO_MPEG2ProfileMain , OMX_VIDEO_MPEG2LevelH14 },
{ OMX_VIDEO_MPEG2ProfileMain , OMX_VIDEO_MPEG2LevelHL },
};
@@ -68,6 +62,7 @@
kProfileLevels, ARRAY_SIZE(kProfileLevels),
320 /* width */, 240 /* height */, callbacks,
appData, component),
+ mCodecCtx(NULL),
mMemRecords(NULL),
mFlushOutBuffer(NULL),
mOmxColorFormat(OMX_COLOR_FormatYUV420Planar),
@@ -75,18 +70,22 @@
mNewWidth(mWidth),
mNewHeight(mHeight),
mChangingResolution(false),
+ mSignalledError(false),
mStride(mWidth) {
initPorts(kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE);
// If input dump is enabled, then open create an empty file
GENERATE_FILE_NAMES();
CREATE_DUMP_FILE(mInFile);
-
- CHECK_EQ(initDecoder(), (status_t)OK);
}
SoftMPEG2::~SoftMPEG2() {
- CHECK_EQ(deInitDecoder(), (status_t)OK);
+ if (OK != deInitDecoder()) {
+ ALOGE("Failed to deinit decoder");
+ notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
}
@@ -204,6 +203,7 @@
setNumCores();
mStride = 0;
+ mSignalledError = false;
return OK;
}
@@ -433,6 +433,7 @@
mInitNeeded = true;
mChangingResolution = false;
+ mCodecCtx = NULL;
return OK;
}
@@ -444,10 +445,11 @@
ret = initDecoder();
if (OK != ret) {
- ALOGE("Create failure");
+ ALOGE("Failed to initialize decoder");
deInitDecoder();
- return NO_MEMORY;
+ return ret;
}
+ mSignalledError = false;
return OK;
}
@@ -586,10 +588,22 @@
void SoftMPEG2::onQueueFilled(OMX_U32 portIndex) {
UNUSED(portIndex);
+ if (mSignalledError) {
+ return;
+ }
if (mOutputPortSettingsChange != NONE) {
return;
}
+ if (NULL == mCodecCtx) {
+ if (OK != initDecoder()) {
+ ALOGE("Failed to initialize decoder");
+ notify(OMX_EventError, OMX_ErrorUnsupportedSetting, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+ }
+
List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
@@ -642,7 +656,9 @@
bool portWillReset = false;
handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
- CHECK_EQ(reInitDecoder(), (status_t)OK);
+ if (OK != reInitDecoder()) {
+ ALOGE("Failed to reinitialize decoder");
+ }
return;
}
@@ -715,7 +731,10 @@
bool portWillReset = false;
handlePortSettingsChange(&portWillReset, s_dec_op.u4_pic_wd, s_dec_op.u4_pic_ht);
- CHECK_EQ(reInitDecoder(), (status_t)OK);
+ if (OK != reInitDecoder()) {
+ ALOGE("Failed to reinitialize decoder");
+ return;
+ }
if (setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx)) {
ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
@@ -758,6 +777,7 @@
if (portWillReset) {
resetDecoder();
+ resetPlugin();
return;
}
} else if (mUpdateColorAspects) {
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
index 1921a23..6729a54 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
@@ -106,6 +106,7 @@
// codec. So the codec is switching to decode the new resolution.
bool mChangingResolution;
bool mFlushNeeded;
+ bool mSignalledError;
bool mWaitForI;
size_t mStride;
diff --git a/media/libstagefright/codecs/on2/dec/Android.bp b/media/libstagefright/codecs/on2/dec/Android.bp
index 24539c0..c4242c2 100644
--- a/media/libstagefright/codecs/on2/dec/Android.bp
+++ b/media/libstagefright/codecs/on2/dec/Android.bp
@@ -25,5 +25,9 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/codecs/on2/enc/Android.bp b/media/libstagefright/codecs/on2/enc/Android.bp
index 6c63fc9..114c1be 100644
--- a/media/libstagefright/codecs/on2/enc/Android.bp
+++ b/media/libstagefright/codecs/on2/enc/Android.bp
@@ -1,7 +1,11 @@
cc_library_shared {
name: "libstagefright_soft_vpxenc",
- srcs: ["SoftVPXEncoder.cpp"],
+ srcs: [
+ "SoftVPXEncoder.cpp",
+ "SoftVP8Encoder.cpp",
+ "SoftVP9Encoder.cpp",
+ ],
include_dirs: [
"frameworks/av/media/libstagefright/include",
@@ -13,6 +17,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
static_libs: ["libvpx"],
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp
new file mode 100644
index 0000000..04737a9
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SoftVP8Encoder"
+#include "SoftVP8Encoder.h"
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+#ifndef INT32_MAX
+#define INT32_MAX 2147483647
+#endif
+
+namespace android {
+
+static const CodecProfileLevel kVp8ProfileLevels[] = {
+ { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version0 },
+ { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version1 },
+ { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version2 },
+ { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version3 },
+};
+
+SoftVP8Encoder::SoftVP8Encoder(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SoftVPXEncoder(
+ name, callbacks, appData, component, "video_encoder.vp8",
+ OMX_VIDEO_CodingVP8, MEDIA_MIMETYPE_VIDEO_VP8, 2,
+ kVp8ProfileLevels, NELEM(kVp8ProfileLevels)),
+ mDCTPartitions(0),
+ mLevel(OMX_VIDEO_VP8Level_Version0) {
+}
+
+void SoftVP8Encoder::setCodecSpecificInterface() {
+ mCodecInterface = vpx_codec_vp8_cx();
+}
+
+void SoftVP8Encoder::setCodecSpecificConfiguration() {
+ switch (mLevel) {
+ case OMX_VIDEO_VP8Level_Version0:
+ mCodecConfiguration->g_profile = 0;
+ break;
+
+ case OMX_VIDEO_VP8Level_Version1:
+ mCodecConfiguration->g_profile = 1;
+ break;
+
+ case OMX_VIDEO_VP8Level_Version2:
+ mCodecConfiguration->g_profile = 2;
+ break;
+
+ case OMX_VIDEO_VP8Level_Version3:
+ mCodecConfiguration->g_profile = 3;
+ break;
+
+ default:
+ mCodecConfiguration->g_profile = 0;
+ }
+}
+
+vpx_codec_err_t SoftVP8Encoder::setCodecSpecificControls() {
+ vpx_codec_err_t codec_return = vpx_codec_control(mCodecContext,
+ VP8E_SET_TOKEN_PARTITIONS,
+ mDCTPartitions);
+ if (codec_return != VPX_CODEC_OK) {
+ ALOGE("Error setting dct partitions for vpx encoder.");
+ }
+ return codec_return;
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalGetParameter(OMX_INDEXTYPE index,
+ OMX_PTR param) {
+ // can include extension index OMX_INDEXEXTTYPE
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
+ case OMX_IndexParamVideoVp8:
+ return internalGetVp8Params(
+ (OMX_VIDEO_PARAM_VP8TYPE *)param);
+
+ default:
+ return SoftVPXEncoder::internalGetParameter(index, param);
+ }
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalSetParameter(OMX_INDEXTYPE index,
+ const OMX_PTR param) {
+ // can include extension index OMX_INDEXEXTTYPE
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
+ case OMX_IndexParamVideoVp8:
+ return internalSetVp8Params(
+ (const OMX_VIDEO_PARAM_VP8TYPE *)param);
+
+ default:
+ return SoftVPXEncoder::internalSetParameter(index, param);
+ }
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalGetVp8Params(
+ OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
+ if (vp8Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
+ vp8Params->eLevel = mLevel;
+ vp8Params->bErrorResilientMode = mErrorResilience;
+ vp8Params->nDCTPartitions = mDCTPartitions;
+ return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalSetVp8Params(
+ const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
+ if (vp8Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 ||
+ vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 ||
+ vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 ||
+ vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) {
+ mLevel = vp8Params->eLevel;
+ } else {
+ return OMX_ErrorBadParameter;
+ }
+
+ mErrorResilience = vp8Params->bErrorResilientMode;
+ if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) {
+ mDCTPartitions = vp8Params->nDCTPartitions;
+ } else {
+ return OMX_ErrorBadParameter;
+ }
+ return OMX_ErrorNone;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
new file mode 100644
index 0000000..b4904bfd
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VP8_ENCODER_H_
+
+#define SOFT_VP8_ENCODER_H_
+
+#include "SoftVPXEncoder.h"
+
+#include <OMX_VideoExt.h>
+#include <OMX_IndexExt.h>
+
+#include <hardware/gralloc.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8cx.h"
+
+namespace android {
+
+// Exposes a vp8 encoder as an OMX Component
+//
+// In addition to the base class settings, Only following encoder settings are
+// available:
+// - token partitioning
+struct SoftVP8Encoder : public SoftVPXEncoder {
+ SoftVP8Encoder(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ // Returns current values for requested OMX
+ // parameters
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR param);
+
+ // Validates, extracts and stores relevant OMX
+ // parameters
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR param);
+
+ // Populates |mCodecInterface| with codec specific settings.
+ virtual void setCodecSpecificInterface();
+
+ // Sets codec specific configuration.
+ virtual void setCodecSpecificConfiguration();
+
+ // Initializes codec specific encoder settings.
+ virtual vpx_codec_err_t setCodecSpecificControls();
+
+ // Gets vp8 specific parameters.
+ OMX_ERRORTYPE internalGetVp8Params(
+ OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+
+ // Handles vp8 specific parameters.
+ OMX_ERRORTYPE internalSetVp8Params(
+ const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+
+private:
+ // Max value supported for DCT partitions
+ static const uint32_t kMaxDCTPartitions = 3;
+
+ // vp8 specific configuration parameter
+ // that enables token partitioning of
+ // the stream into substreams
+ int32_t mDCTPartitions;
+
+ // Encoder profile corresponding to OMX level parameter
+ //
+ // The inconsistency in the naming is caused by
+ // OMX spec referring vpx profiles (g_profile)
+ // as "levels" whereas using the name "profile" for
+ // something else.
+ OMX_VIDEO_VP8LEVELTYPE mLevel;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftVP8Encoder);
+};
+
+} // namespace android
+
+#endif // SOFT_VP8_ENCODER_H_
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
new file mode 100644
index 0000000..4c7290d
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SoftVP9Encoder"
+#include "SoftVP9Encoder.h"
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+static const CodecProfileLevel kVp9ProfileLevels[] = {
+ { OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level41 },
+};
+
+SoftVP9Encoder::SoftVP9Encoder(
+ const char *name, const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SoftVPXEncoder(
+ name, callbacks, appData, component, "video_encoder.vp9",
+ OMX_VIDEO_CodingVP9, MEDIA_MIMETYPE_VIDEO_VP9, 4,
+ kVp9ProfileLevels, NELEM(kVp9ProfileLevels)),
+ mLevel(OMX_VIDEO_VP9Level1),
+ mTileColumns(0),
+ mFrameParallelDecoding(OMX_FALSE) {
+}
+
+void SoftVP9Encoder::setCodecSpecificInterface() {
+ mCodecInterface = vpx_codec_vp9_cx();
+}
+
+void SoftVP9Encoder::setCodecSpecificConfiguration() {
+ mCodecConfiguration->g_profile = 0;
+}
+
+vpx_codec_err_t SoftVP9Encoder::setCodecSpecificControls() {
+ vpx_codec_err_t codecReturn = vpx_codec_control(
+ mCodecContext, VP9E_SET_TILE_COLUMNS, mTileColumns);
+ if (codecReturn != VPX_CODEC_OK) {
+ ALOGE("Error setting VP9E_SET_TILE_COLUMNS to %d. vpx_codec_control() "
+ "returned %d", mTileColumns, codecReturn);
+ return codecReturn;
+ }
+ codecReturn = vpx_codec_control(
+ mCodecContext, VP9E_SET_FRAME_PARALLEL_DECODING,
+ mFrameParallelDecoding);
+ if (codecReturn != VPX_CODEC_OK) {
+ ALOGE("Error setting VP9E_SET_FRAME_PARALLEL_DECODING to %d."
+ "vpx_codec_control() returned %d", mFrameParallelDecoding,
+ codecReturn);
+ return codecReturn;
+ }
+ // For VP9, we always set CPU_USED to 8 (because the realtime default is 0
+ // which is too slow).
+ codecReturn = vpx_codec_control(mCodecContext, VP8E_SET_CPUUSED, 8);
+ if (codecReturn != VPX_CODEC_OK) {
+ ALOGE("Error setting VP8E_SET_CPUUSED to 8. vpx_codec_control() "
+ "returned %d", codecReturn);
+ return codecReturn;
+ }
+ return codecReturn;
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR param) {
+ // can include extension index OMX_INDEXEXTTYPE
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
+ case OMX_IndexParamVideoVp9:
+ return internalGetVp9Params(
+ (OMX_VIDEO_PARAM_VP9TYPE *)param);
+
+ default:
+ return SoftVPXEncoder::internalGetParameter(index, param);
+ }
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR param) {
+ // can include extension index OMX_INDEXEXTTYPE
+ const int32_t indexFull = index;
+
+ switch (indexFull) {
+ case OMX_IndexParamVideoVp9:
+ return internalSetVp9Params(
+ (const OMX_VIDEO_PARAM_VP9TYPE *)param);
+
+ default:
+ return SoftVPXEncoder::internalSetParameter(index, param);
+ }
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalGetVp9Params(
+ OMX_VIDEO_PARAM_VP9TYPE *vp9Params) {
+ if (vp9Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vp9Params->eProfile = OMX_VIDEO_VP9Profile0;
+ vp9Params->eLevel = mLevel;
+ vp9Params->bErrorResilientMode = mErrorResilience;
+ vp9Params->nTileColumns = mTileColumns;
+ vp9Params->bEnableFrameParallelDecoding = mFrameParallelDecoding;
+ return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalSetVp9Params(
+ const OMX_VIDEO_PARAM_VP9TYPE *vp9Params) {
+ if (vp9Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ if (vp9Params->eProfile != OMX_VIDEO_VP9Profile0) {
+ return OMX_ErrorBadParameter;
+ }
+
+ if (vp9Params->eLevel == OMX_VIDEO_VP9Level1 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level11 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level2 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level21 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level3 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level31 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level4 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level41 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level5 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level51 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level52 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level6 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level61 ||
+ vp9Params->eLevel == OMX_VIDEO_VP9Level62) {
+ mLevel = vp9Params->eLevel;
+ } else {
+ return OMX_ErrorBadParameter;
+ }
+
+ mErrorResilience = vp9Params->bErrorResilientMode;
+ mTileColumns = vp9Params->nTileColumns;
+ mFrameParallelDecoding = vp9Params->bEnableFrameParallelDecoding;
+ return OMX_ErrorNone;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
new file mode 100644
index 0000000..85df69a
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VP9_ENCODER_H_
+
+#define SOFT_VP9_ENCODER_H_
+
+#include "SoftVPXEncoder.h"
+
+#include <OMX_VideoExt.h>
+#include <OMX_IndexExt.h>
+
+#include <hardware/gralloc.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8cx.h"
+
+namespace android {
+
+// Exposes a VP9 encoder as an OMX Component
+//
+// In addition to the base class settings, Only following encoder settings are
+// available:
+// - tile rows
+// - tile columns
+// - frame parallel mode
+struct SoftVP9Encoder : public SoftVPXEncoder {
+ SoftVP9Encoder(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ // Returns current values for requested OMX
+ // parameters
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR param);
+
+ // Validates, extracts and stores relevant OMX
+ // parameters
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR param);
+
+ // Populates |mCodecInterface| with codec specific settings.
+ virtual void setCodecSpecificInterface();
+
+ // Sets codec specific configuration.
+ virtual void setCodecSpecificConfiguration();
+
+ // Initializes codec specific encoder settings.
+ virtual vpx_codec_err_t setCodecSpecificControls();
+
+ // Gets vp9 specific parameters.
+ OMX_ERRORTYPE internalGetVp9Params(
+ OMX_VIDEO_PARAM_VP9TYPE* vp9Params);
+
+ // Handles vp9 specific parameters.
+ OMX_ERRORTYPE internalSetVp9Params(
+ const OMX_VIDEO_PARAM_VP9TYPE* vp9Params);
+
+private:
+ // Encoder profile corresponding to OMX level parameter
+ //
+ // The inconsistency in the naming is caused by
+ // OMX spec referring vpx profiles (g_profile)
+ // as "levels" whereas using the name "profile" for
+ // something else.
+ OMX_VIDEO_VP9LEVELTYPE mLevel;
+
+ int32_t mTileColumns;
+
+ OMX_BOOL mFrameParallelDecoding;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftVP9Encoder);
+};
+
+} // namespace android
+
+#endif // SOFT_VP9_ENCODER_H_
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 56e1f77..a5666da 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -18,6 +18,9 @@
#define LOG_TAG "SoftVPXEncoder"
#include "SoftVPXEncoder.h"
+#include "SoftVP8Encoder.h"
+#include "SoftVP9Encoder.h"
+
#include <utils/Log.h>
#include <utils/misc.h>
@@ -42,7 +45,6 @@
params->nVersion.s.nStep = 0;
}
-
static int GetCPUCoreCount() {
int cpuCoreCount = 1;
#if defined(_SC_NPROCESSORS_ONLN)
@@ -55,30 +57,26 @@
return cpuCoreCount;
}
-static const CodecProfileLevel kProfileLevels[] = {
- { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version0 },
- { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version1 },
- { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version2 },
- { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version3 },
-};
-
SoftVPXEncoder::SoftVPXEncoder(const char *name,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
- OMX_COMPONENTTYPE **component)
+ OMX_COMPONENTTYPE **component,
+ const char* role,
+ OMX_VIDEO_CODINGTYPE codingType,
+ const char* mimeType,
+ int32_t minCompressionRatio,
+ const CodecProfileLevel *profileLevels,
+ size_t numProfileLevels)
: SoftVideoEncoderOMXComponent(
- name, "video_encoder.vp8", OMX_VIDEO_CodingVP8,
- kProfileLevels, NELEM(kProfileLevels),
+ name, role, codingType, profileLevels, numProfileLevels,
176 /* width */, 144 /* height */,
callbacks, appData, component),
mCodecContext(NULL),
mCodecConfiguration(NULL),
mCodecInterface(NULL),
mBitrateUpdated(false),
- mBitrateControlMode(VPX_VBR), // variable bitrate
- mDCTPartitions(0),
+ mBitrateControlMode(VPX_VBR),
mErrorResilience(OMX_FALSE),
- mLevel(OMX_VIDEO_VP8Level_Version0),
mKeyFrameInterval(0),
mMinQuantizer(0),
mMaxQuantizer(0),
@@ -96,10 +94,9 @@
initPorts(
kNumBuffers, kNumBuffers, kMinOutputBufferSize,
- MEDIA_MIMETYPE_VIDEO_VP8, 2 /* minCompressionRatio */);
+ mimeType, minCompressionRatio);
}
-
SoftVPXEncoder::~SoftVPXEncoder() {
releaseEncoder();
}
@@ -108,18 +105,18 @@
vpx_codec_err_t codec_return;
status_t result = UNKNOWN_ERROR;
- mCodecInterface = vpx_codec_vp8_cx();
+ setCodecSpecificInterface();
if (mCodecInterface == NULL) {
goto CLEAN_UP;
}
- ALOGD("VP8: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
+ ALOGD("VPx: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
(uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
mMinQuantizer, mMaxQuantizer);
mCodecConfiguration = new vpx_codec_enc_cfg_t;
codec_return = vpx_codec_enc_config_default(mCodecInterface,
mCodecConfiguration,
- 0); // Codec specific flags
+ 0);
if (codec_return != VPX_CODEC_OK) {
ALOGE("Error populating default configuration for vpx encoder.");
@@ -131,27 +128,6 @@
mCodecConfiguration->g_threads = GetCPUCoreCount();
mCodecConfiguration->g_error_resilient = mErrorResilience;
- switch (mLevel) {
- case OMX_VIDEO_VP8Level_Version0:
- mCodecConfiguration->g_profile = 0;
- break;
-
- case OMX_VIDEO_VP8Level_Version1:
- mCodecConfiguration->g_profile = 1;
- break;
-
- case OMX_VIDEO_VP8Level_Version2:
- mCodecConfiguration->g_profile = 2;
- break;
-
- case OMX_VIDEO_VP8Level_Version3:
- mCodecConfiguration->g_profile = 3;
- break;
-
- default:
- mCodecConfiguration->g_profile = 0;
- }
-
// OMX timebase unit is microsecond
// g_timebase is in seconds (i.e. 1/1000000 seconds)
mCodecConfiguration->g_timebase.num = 1;
@@ -161,6 +137,8 @@
mCodecConfiguration->rc_end_usage = mBitrateControlMode;
// Disable frame drop - not allowed in MediaCodec now.
mCodecConfiguration->rc_dropframe_thresh = 0;
+ // Disable lagged encoding.
+ mCodecConfiguration->g_lag_in_frames = 0;
if (mBitrateControlMode == VPX_CBR) {
// Disable spatial resizing.
mCodecConfiguration->rc_resize_allowed = 0;
@@ -181,8 +159,6 @@
mCodecConfiguration->rc_buf_sz = 1000;
// Enable error resilience - needed for packet loss.
mCodecConfiguration->g_error_resilient = 1;
- // Disable lagged encoding.
- mCodecConfiguration->g_lag_in_frames = 0;
// Maximum key frame interval - for CBR boost to 3000
mCodecConfiguration->kf_max_dist = 3000;
// Encoder determines optimal key frame placement automatically.
@@ -253,7 +229,6 @@
goto CLEAN_UP;
}
}
-
// Set bitrate values for each layer
for (size_t i = 0; i < mCodecConfiguration->ts_number_layers; i++) {
mCodecConfiguration->ts_target_bitrate[i] =
@@ -271,7 +246,7 @@
if (mMaxQuantizer > 0) {
mCodecConfiguration->rc_max_quantizer = mMaxQuantizer;
}
-
+ setCodecSpecificConfiguration();
mCodecContext = new vpx_codec_ctx_t;
codec_return = vpx_codec_enc_init(mCodecContext,
mCodecInterface,
@@ -283,14 +258,6 @@
goto CLEAN_UP;
}
- codec_return = vpx_codec_control(mCodecContext,
- VP8E_SET_TOKEN_PARTITIONS,
- mDCTPartitions);
- if (codec_return != VPX_CODEC_OK) {
- ALOGE("Error setting dct partitions for vpx encoder.");
- goto CLEAN_UP;
- }
-
// Extra CBR settings
if (mBitrateControlMode == VPX_CBR) {
codec_return = vpx_codec_control(mCodecContext,
@@ -318,6 +285,13 @@
}
}
+ codec_return = setCodecSpecificControls();
+
+ if (codec_return != VPX_CODEC_OK) {
+ // The codec specific method would have logged the error.
+ goto CLEAN_UP;
+ }
+
if (mColorFormat != OMX_COLOR_FormatYUV420Planar || mInputDataIsMeta) {
free(mConversionBuffer);
mConversionBuffer = NULL;
@@ -338,7 +312,6 @@
return result;
}
-
status_t SoftVPXEncoder::releaseEncoder() {
if (mCodecContext != NULL) {
vpx_codec_destroy(mCodecContext);
@@ -362,7 +335,6 @@
return OK;
}
-
OMX_ERRORTYPE SoftVPXEncoder::internalGetParameter(OMX_INDEXTYPE index,
OMX_PTR param) {
// can include extension index OMX_INDEXEXTTYPE
@@ -393,54 +365,15 @@
return OMX_ErrorNone;
}
- // VP8 specific parameters that use extension headers
- case OMX_IndexParamVideoVp8: {
- OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
- (OMX_VIDEO_PARAM_VP8TYPE *)param;
-
- if (!isValidOMXParam(vp8Params)) {
- return OMX_ErrorBadParameter;
- }
-
- if (vp8Params->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
-
- vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
- vp8Params->eLevel = mLevel;
- vp8Params->nDCTPartitions = mDCTPartitions;
- vp8Params->bErrorResilientMode = mErrorResilience;
- return OMX_ErrorNone;
- }
-
- case OMX_IndexParamVideoAndroidVp8Encoder: {
- OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
- (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param;
-
- if (!isValidOMXParam(vp8AndroidParams)) {
- return OMX_ErrorBadParameter;
- }
-
- if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
-
- vp8AndroidParams->nKeyFrameInterval = mKeyFrameInterval;
- vp8AndroidParams->eTemporalPattern = mTemporalPatternType;
- vp8AndroidParams->nTemporalLayerCount = mTemporalLayers;
- vp8AndroidParams->nMinQuantizer = mMinQuantizer;
- vp8AndroidParams->nMaxQuantizer = mMaxQuantizer;
- memcpy(vp8AndroidParams->nTemporalLayerBitrateRatio,
- mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
- return OMX_ErrorNone;
- }
+ case OMX_IndexParamVideoAndroidVp8Encoder:
+ return internalGetAndroidVpxParams(
+ (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
default:
return SoftVideoEncoderOMXComponent::internalGetParameter(index, param);
}
}
-
OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index,
const OMX_PTR param) {
// can include extension index OMX_INDEXEXTTYPE
@@ -458,27 +391,9 @@
return internalSetBitrateParams(bitRate);
}
- case OMX_IndexParamVideoVp8: {
- const OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
- (const OMX_VIDEO_PARAM_VP8TYPE*) param;
-
- if (!isValidOMXParam(vp8Params)) {
- return OMX_ErrorBadParameter;
- }
-
- return internalSetVp8Params(vp8Params);
- }
-
- case OMX_IndexParamVideoAndroidVp8Encoder: {
- const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
- (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE*) param;
-
- if (!isValidOMXParam(vp8AndroidParams)) {
- return OMX_ErrorBadParameter;
- }
-
- return internalSetAndroidVp8Params(vp8AndroidParams);
- }
+ case OMX_IndexParamVideoAndroidVp8Encoder:
+ return internalSetAndroidVpxParams(
+ (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
default:
return SoftVideoEncoderOMXComponent::internalSetParameter(index, param);
@@ -530,77 +445,21 @@
}
}
-OMX_ERRORTYPE SoftVPXEncoder::internalSetVp8Params(
- const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
- if (vp8Params->nPortIndex != kOutputPortIndex) {
+OMX_ERRORTYPE SoftVPXEncoder::internalGetBitrateParams(
+ OMX_VIDEO_PARAM_BITRATETYPE* bitrate) {
+ if (bitrate->nPortIndex != kOutputPortIndex) {
return OMX_ErrorUnsupportedIndex;
}
- if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) {
- return OMX_ErrorBadParameter;
- }
+ bitrate->nTargetBitrate = mBitrate;
- if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 ||
- vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 ||
- vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 ||
- vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) {
- mLevel = vp8Params->eLevel;
+ if (mBitrateControlMode == VPX_VBR) {
+ bitrate->eControlRate = OMX_Video_ControlRateVariable;
+ } else if (mBitrateControlMode == VPX_CBR) {
+ bitrate->eControlRate = OMX_Video_ControlRateConstant;
} else {
- return OMX_ErrorBadParameter;
+ return OMX_ErrorUnsupportedSetting;
}
-
- if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) {
- mDCTPartitions = vp8Params->nDCTPartitions;
- } else {
- return OMX_ErrorBadParameter;
- }
-
- mErrorResilience = vp8Params->bErrorResilientMode;
- return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVp8Params(
- const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams) {
- if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
- if (vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
- vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
- return OMX_ErrorBadParameter;
- }
- if (vp8AndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
- return OMX_ErrorBadParameter;
- }
- if (vp8AndroidParams->nMinQuantizer > vp8AndroidParams->nMaxQuantizer) {
- return OMX_ErrorBadParameter;
- }
-
- mTemporalPatternType = vp8AndroidParams->eTemporalPattern;
- if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
- mTemporalLayers = vp8AndroidParams->nTemporalLayerCount;
- } else if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
- mTemporalLayers = 0;
- }
- // Check the bitrate distribution between layers is in increasing order
- if (mTemporalLayers > 1) {
- for (size_t i = 0; i < mTemporalLayers - 1; i++) {
- if (vp8AndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
- vp8AndroidParams->nTemporalLayerBitrateRatio[i]) {
- ALOGE("Wrong bitrate ratio - should be in increasing order.");
- return OMX_ErrorBadParameter;
- }
- }
- }
- mKeyFrameInterval = vp8AndroidParams->nKeyFrameInterval;
- mMinQuantizer = vp8AndroidParams->nMinQuantizer;
- mMaxQuantizer = vp8AndroidParams->nMaxQuantizer;
- memcpy(mTemporalLayerBitrateRatio, vp8AndroidParams->nTemporalLayerBitrateRatio,
- sizeof(mTemporalLayerBitrateRatio));
- ALOGD("VP8: internalSetAndroidVp8Params. BRMode: %u. TS: %zu. KF: %u."
- " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
- (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
- mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
- mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
return OMX_ErrorNone;
}
@@ -623,71 +482,134 @@
return OMX_ErrorNone;
}
+OMX_ERRORTYPE SoftVPXEncoder::internalGetAndroidVpxParams(
+ OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams) {
+ if (vpxAndroidParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vpxAndroidParams->nKeyFrameInterval = mKeyFrameInterval;
+ vpxAndroidParams->eTemporalPattern = mTemporalPatternType;
+ vpxAndroidParams->nTemporalLayerCount = mTemporalLayers;
+ vpxAndroidParams->nMinQuantizer = mMinQuantizer;
+ vpxAndroidParams->nMaxQuantizer = mMaxQuantizer;
+ memcpy(vpxAndroidParams->nTemporalLayerBitrateRatio,
+ mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
+ return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVpxParams(
+ const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams) {
+ if (vpxAndroidParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+ if (vpxAndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
+ vpxAndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+ return OMX_ErrorBadParameter;
+ }
+ if (vpxAndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
+ return OMX_ErrorBadParameter;
+ }
+ if (vpxAndroidParams->nMinQuantizer > vpxAndroidParams->nMaxQuantizer) {
+ return OMX_ErrorBadParameter;
+ }
+
+ mTemporalPatternType = vpxAndroidParams->eTemporalPattern;
+ if (vpxAndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+ mTemporalLayers = vpxAndroidParams->nTemporalLayerCount;
+ } else if (vpxAndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
+ mTemporalLayers = 0;
+ }
+ // Check the bitrate distribution between layers is in increasing order
+ if (mTemporalLayers > 1) {
+ for (size_t i = 0; i < mTemporalLayers - 1; i++) {
+ if (vpxAndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
+ vpxAndroidParams->nTemporalLayerBitrateRatio[i]) {
+ ALOGE("Wrong bitrate ratio - should be in increasing order.");
+ return OMX_ErrorBadParameter;
+ }
+ }
+ }
+ mKeyFrameInterval = vpxAndroidParams->nKeyFrameInterval;
+ mMinQuantizer = vpxAndroidParams->nMinQuantizer;
+ mMaxQuantizer = vpxAndroidParams->nMaxQuantizer;
+ memcpy(mTemporalLayerBitrateRatio, vpxAndroidParams->nTemporalLayerBitrateRatio,
+ sizeof(mTemporalLayerBitrateRatio));
+ ALOGD("VPx: internalSetAndroidVpxParams. BRMode: %u. TS: %zu. KF: %u."
+ " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
+ (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
+ mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
+ mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
+ return OMX_ErrorNone;
+}
+
vpx_enc_frame_flags_t SoftVPXEncoder::getEncodeFlags() {
vpx_enc_frame_flags_t flags = 0;
- int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
- mTemporalPatternIdx++;
- switch (mTemporalPattern[patternIdx]) {
- case kTemporalUpdateLast:
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_REF_GF;
- flags |= VP8_EFLAG_NO_REF_ARF;
- break;
- case kTemporalUpdateGoldenWithoutDependency:
- flags |= VP8_EFLAG_NO_REF_GF;
- // Deliberately no break here.
- case kTemporalUpdateGolden:
- flags |= VP8_EFLAG_NO_REF_ARF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- break;
- case kTemporalUpdateAltrefWithoutDependency:
- flags |= VP8_EFLAG_NO_REF_ARF;
- flags |= VP8_EFLAG_NO_REF_GF;
- // Deliberately no break here.
- case kTemporalUpdateAltref:
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- break;
- case kTemporalUpdateNoneNoRefAltref:
- flags |= VP8_EFLAG_NO_REF_ARF;
- // Deliberately no break here.
- case kTemporalUpdateNone:
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- flags |= VP8_EFLAG_NO_UPD_ENTROPY;
- break;
- case kTemporalUpdateNoneNoRefGoldenRefAltRef:
- flags |= VP8_EFLAG_NO_REF_GF;
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- flags |= VP8_EFLAG_NO_UPD_ENTROPY;
- break;
- case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
- flags |= VP8_EFLAG_NO_REF_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- break;
- case kTemporalUpdateLastRefAltRef:
- flags |= VP8_EFLAG_NO_UPD_GF;
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_REF_GF;
- break;
- case kTemporalUpdateGoldenRefAltRef:
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_LAST;
- break;
- case kTemporalUpdateLastAndGoldenRefAltRef:
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_REF_GF;
- break;
- case kTemporalUpdateLastRefAll:
- flags |= VP8_EFLAG_NO_UPD_ARF;
- flags |= VP8_EFLAG_NO_UPD_GF;
- break;
+ if (mTemporalPatternLength > 0) {
+ int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
+ mTemporalPatternIdx++;
+ switch (mTemporalPattern[patternIdx]) {
+ case kTemporalUpdateLast:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ break;
+ case kTemporalUpdateGoldenWithoutDependency:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ // Deliberately no break here.
+ case kTemporalUpdateGolden:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateAltrefWithoutDependency:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ // Deliberately no break here.
+ case kTemporalUpdateAltref:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateNoneNoRefAltref:
+ flags |= VP8_EFLAG_NO_REF_ARF;
+ // Deliberately no break here.
+ case kTemporalUpdateNone:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ case kTemporalUpdateNoneNoRefGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+ break;
+ case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
+ flags |= VP8_EFLAG_NO_REF_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateLastRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ break;
+ case kTemporalUpdateGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_LAST;
+ break;
+ case kTemporalUpdateLastAndGoldenRefAltRef:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_REF_GF;
+ break;
+ case kTemporalUpdateLastRefAll:
+ flags |= VP8_EFLAG_NO_UPD_ARF;
+ flags |= VP8_EFLAG_NO_UPD_GF;
+ break;
+ }
}
return flags;
}
@@ -765,10 +687,7 @@
vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
kInputBufferAlignment, (uint8_t *)source);
- vpx_enc_frame_flags_t flags = 0;
- if (mTemporalPatternLength > 0) {
- flags = getEncodeFlags();
- }
+ vpx_enc_frame_flags_t flags = getEncodeFlags();
if (mKeyFrameRequested) {
flags |= VPX_EFLAG_FORCE_KF;
mKeyFrameRequested = false;
@@ -779,7 +698,7 @@
vpx_codec_err_t res = vpx_codec_enc_config_set(mCodecContext,
mCodecConfiguration);
if (res != VPX_CODEC_OK) {
- ALOGE("vp8 encoder failed to update bitrate: %s",
+ ALOGE("vpx encoder failed to update bitrate: %s",
vpx_codec_err_to_string(res));
notify(OMX_EventError,
OMX_ErrorUndefined,
@@ -856,9 +775,15 @@
} // namespace android
-
android::SoftOMXComponent *createSoftOMXComponent(
const char *name, const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData, OMX_COMPONENTTYPE **component) {
- return new android::SoftVPXEncoder(name, callbacks, appData, component);
+ if (!strcmp(name, "OMX.google.vp8.encoder")) {
+ return new android::SoftVP8Encoder(name, callbacks, appData, component);
+ } else if (!strcmp(name, "OMX.google.vp9.encoder")) {
+ return new android::SoftVP9Encoder(name, callbacks, appData, component);
+ } else {
+ CHECK(!"Unknown component");
+ }
+ return NULL;
}
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 2033e64..86dfad7 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -31,18 +31,18 @@
namespace android {
-// Exposes a vpx encoder as an OMX Component
+// Base class for a VPX Encoder OMX Component
//
// Boilerplate for callback bindings are taken care
// by the base class SimpleSoftOMXComponent and its
// parent SoftOMXComponent.
//
-// Only following encoder settings are available
+// Only following encoder settings are available (codec specific settings might
+// be available in the sub-classes):
// - target bitrate
// - rate control (constant / variable)
// - frame rate
// - error resilience
-// - token partitioning
// - reconstruction & loop filters (g_profile)
//
// Only following color formats are recognized
@@ -54,7 +54,7 @@
// - encoding deadline is realtime
// - multithreaded encoding utilizes a number of threads equal
// to online cpu's available
-// - the algorithm interface for encoder is vp8
+// - the algorithm interface for encoder is decided by the sub-class in use
// - fractional bits of frame rate is discarded
// - OMX timestamps are in microseconds, therefore
// encoder timebase is fixed to 1/1000000
@@ -63,7 +63,13 @@
SoftVPXEncoder(const char *name,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
- OMX_COMPONENTTYPE **component);
+ OMX_COMPONENTTYPE **component,
+ const char* role,
+ OMX_VIDEO_CODINGTYPE codingType,
+ const char* mimeType,
+ int32_t minCompressionRatio,
+ const CodecProfileLevel *profileLevels,
+ size_t numProfileLevels);
protected:
virtual ~SoftVPXEncoder();
@@ -89,7 +95,44 @@
virtual void onReset();
-private:
+ // Initializes vpx encoder with available settings.
+ status_t initEncoder();
+
+ // Populates mCodecInterface with codec specific settings.
+ virtual void setCodecSpecificInterface() = 0;
+
+ // Sets codec specific configuration.
+ virtual void setCodecSpecificConfiguration() = 0;
+
+ // Sets codec specific encoder controls.
+ virtual vpx_codec_err_t setCodecSpecificControls() = 0;
+
+ // Get current encode flags.
+ virtual vpx_enc_frame_flags_t getEncodeFlags();
+
+ // Releases vpx encoder instance, with it's associated
+ // data structures.
+ //
+ // Unless called earlier, this is handled by the
+ // dtor.
+ status_t releaseEncoder();
+
+ // Get bitrate parameters.
+ virtual OMX_ERRORTYPE internalGetBitrateParams(
+ OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
+
+ // Updates bitrate to reflect port settings.
+ virtual OMX_ERRORTYPE internalSetBitrateParams(
+ const OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
+
+ // Gets Android vpx specific parameters.
+ OMX_ERRORTYPE internalGetAndroidVpxParams(
+ OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams);
+
+ // Handles Android vpx specific parameters.
+ OMX_ERRORTYPE internalSetAndroidVpxParams(
+ const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams);
+
enum TemporalReferences {
// For 1 layer case: reference all (last, golden, and alt ref), but only
// update last.
@@ -139,9 +182,6 @@
static const uint32_t kInputBufferAlignment = 1;
static const uint32_t kOutputBufferAlignment = 2;
- // Max value supported for DCT partitions
- static const uint32_t kMaxDCTPartitions = 3;
-
// Number of supported input color formats
static const uint32_t kNumberOfSupportedColorFormats = 3;
@@ -163,23 +203,10 @@
// Bitrate control mode, either constant or variable
vpx_rc_mode mBitrateControlMode;
- // vp8 specific configuration parameter
- // that enables token partitioning of
- // the stream into substreams
- int32_t mDCTPartitions;
-
// Parameter that denotes whether error resilience
// is enabled in encoder
OMX_BOOL mErrorResilience;
- // Encoder profile corresponding to OMX level parameter
- //
- // The inconsistency in the naming is caused by
- // OMX spec referring vpx profiles (g_profile)
- // as "levels" whereas using the name "profile" for
- // something else.
- OMX_VIDEO_VP8LEVELTYPE mLevel;
-
// Key frame interval in frames
uint32_t mKeyFrameInterval;
@@ -218,31 +245,6 @@
bool mKeyFrameRequested;
- // Initializes vpx encoder with available settings.
- status_t initEncoder();
-
- // Releases vpx encoder instance, with it's associated
- // data structures.
- //
- // Unless called earlier, this is handled by the
- // dtor.
- status_t releaseEncoder();
-
- // Get current encode flags
- vpx_enc_frame_flags_t getEncodeFlags();
-
- // Updates bitrate to reflect port settings.
- OMX_ERRORTYPE internalSetBitrateParams(
- const OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
-
- // Handles vp8 specific parameters.
- OMX_ERRORTYPE internalSetVp8Params(
- const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
-
- // Handles Android vp8 specific parameters.
- OMX_ERRORTYPE internalSetAndroidVp8Params(
- const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams);
-
DISALLOW_EVIL_CONSTRUCTORS(SoftVPXEncoder);
};
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.bp b/media/libstagefright/codecs/on2/h264dec/Android.bp
index ca9c1d4..95c2075 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.bp
+++ b/media/libstagefright/codecs/on2/h264dec/Android.bp
@@ -107,6 +107,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: [
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/ARM_DELIVERY.TXT b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/ARM_DELIVERY.TXT
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/ARM_MANIFEST.TXT b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/ARM_MANIFEST.TXT
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/filelist_vc.txt b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/filelist_vc.txt
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_Copy16x16_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_Copy16x16_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_Copy8x8_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_Copy8x8_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_ExpandFrame_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/comm/src/omxVCCOMM_ExpandFrame_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_Average_4x_Align_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_Average_4x_Align_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DeblockingChroma_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DeblockingChroma_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DeblockingLuma_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DeblockingLuma_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DecodeCoeffsToPair_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DecodeCoeffsToPair_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DequantTables_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_DequantTables_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_Align_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_Align_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_Copy_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_Copy_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_DiagCopy_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfDiagHorVer4x4_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfDiagVerHor4x4_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfHor4x4_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_InterpolateLuma_HalfVer4x4_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_Interpolate_Chroma_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_Interpolate_Chroma_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_QuantTables_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_QuantTables_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_TransformResidual4x4_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_TransformResidual4x4_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_UnpackBlock4x4_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/armVCM4P10_UnpackBlock4x4_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_DequantTransformResidualFromPairAndAdd_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_FilterDeblockingLuma_HorEdge_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_FilterDeblockingLuma_VerEdge_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_InterpolateLuma_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_InterpolateLuma_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntraChroma_8x8_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntraChroma_8x8_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntra_16x16_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntra_16x16_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntra_4x4_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_PredictIntra_4x4_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_TransformDequantChromaDCFromPair_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_TransformDequantChromaDCFromPair_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_TransformDequantLumaDCFromPair_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p10/src/omxVCM4P10_TransformDequantLumaDCFromPair_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_Clip8_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_Clip8_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_DecodeVLCZigzag_AC_unsafe_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_DecodeVLCZigzag_AC_unsafe_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_SetPredDir_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/armVCM4P2_SetPredDir_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodePadMV_PVOP_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodePadMV_PVOP_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_Inter_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_Inter_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_IntraACVLC_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_IntraACVLC_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_IntraDCVLC_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_DecodeVLCZigzag_IntraDCVLC_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_FindMVpred_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_FindMVpred_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_IDCT8x8blk_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_IDCT8x8blk_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_MCReconBlock_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_MCReconBlock_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_PredictReconCoefIntra_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_PredictReconCoefIntra_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_QuantInvInter_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_QuantInvInter_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_QuantInvIntra_I_s.s b/media/libstagefright/codecs/on2/h264dec/omxdl/arm_neon/vc/m4p2/src/omxVCM4P2_QuantInvIntra_I_s.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_hor.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_hor.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_hor_ver.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_hor_ver.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_ver.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_chroma_ver.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_half.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_half.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_quarter.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_quarter.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_ver_quarter.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_hor_ver_quarter.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_mid_hor.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_mid_hor.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_ver_half.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_ver_half.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_ver_quarter.s b/media/libstagefright/codecs/on2/h264dec/source/arm11_asm/h264bsd_interpolate_ver_quarter.s
old mode 100755
new mode 100644
diff --git a/media/libstagefright/codecs/opus/dec/Android.bp b/media/libstagefright/codecs/opus/dec/Android.bp
index 56a9eb5..5d9c4c8 100644
--- a/media/libstagefright/codecs/opus/dec/Android.bp
+++ b/media/libstagefright/codecs/opus/dec/Android.bp
@@ -22,5 +22,9 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index 2e44ed7..2ac6ce0 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -430,7 +430,7 @@
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ if ((inHeader->nFlags & OMX_BUFFERFLAG_EOS) && inHeader->nFilledLen == 0) {
inQueue.erase(inQueue.begin());
inInfo->mOwnedByUs = false;
notifyEmptyBufferDone(inHeader);
@@ -498,11 +498,15 @@
mNumFramesOutput += numFrames;
- inInfo->mOwnedByUs = false;
- inQueue.erase(inQueue.begin());
- inInfo = NULL;
- notifyEmptyBufferDone(inHeader);
- inHeader = NULL;
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ inHeader->nFilledLen = 0;
+ } else {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ inInfo = NULL;
+ notifyEmptyBufferDone(inHeader);
+ inHeader = NULL;
+ }
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
diff --git a/media/libstagefright/codecs/raw/Android.bp b/media/libstagefright/codecs/raw/Android.bp
index 628c61e..c64027b 100644
--- a/media/libstagefright/codecs/raw/Android.bp
+++ b/media/libstagefright/codecs/raw/Android.bp
@@ -15,6 +15,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: [
diff --git a/media/libstagefright/colorconversion/Android.bp b/media/libstagefright/colorconversion/Android.bp
index 3a5ce9c..11fe5eb 100644
--- a/media/libstagefright/colorconversion/Android.bp
+++ b/media/libstagefright/colorconversion/Android.bp
@@ -10,6 +10,8 @@
"frameworks/native/include/media/openmax",
],
+ shared_libs: ["libui"],
+
static_libs: ["libyuv_static"],
cflags: ["-Werror"],
@@ -18,5 +20,9 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index bbc4d26..536d40d 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -23,8 +23,10 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <system/window.h>
+#include <ui/Fence.h>
#include <ui/GraphicBufferMapper.h>
-#include <gui/IGraphicBufferProducer.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/Rect.h>
namespace android {
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index b03c769..ce164a2 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -101,5 +101,12 @@
<Limit name="bitrate" range="1-40000000" />
<Feature name="bitrate-modes" value="VBR,CBR" />
</MediaCodec>
+ <MediaCodec name="OMX.google.vp9.encoder" type="video/x-vnd.on2.vp9">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index dce8644..e944224 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -24,6 +24,15 @@
],
shared_libs: [
+ "libgui",
"libmedia",
+ "libhidlmemory",
],
+
+ sanitize: {
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
}
diff --git a/media/libstagefright/filters/GraphicBufferListener.cpp b/media/libstagefright/filters/GraphicBufferListener.cpp
index c1aaa17..db061c1 100644
--- a/media/libstagefright/filters/GraphicBufferListener.cpp
+++ b/media/libstagefright/filters/GraphicBufferListener.cpp
@@ -22,6 +22,7 @@
#include <media/stagefright/MediaErrors.h>
#include <gui/BufferItem.h>
+#include <utils/String8.h>
#include "GraphicBufferListener.h"
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.cpp b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
index cbcf699..e00afd9 100644
--- a/media/libstagefright/filters/IntrinsicBlurFilter.cpp
+++ b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
@@ -19,7 +19,7 @@
#include <utils/Log.h>
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -88,7 +88,7 @@
}
status_t IntrinsicBlurFilter::processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
mBlur->forEach(mAllocOut);
mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.h b/media/libstagefright/filters/IntrinsicBlurFilter.h
index 4707ab7..a2aabfa 100644
--- a/media/libstagefright/filters/IntrinsicBlurFilter.h
+++ b/media/libstagefright/filters/IntrinsicBlurFilter.h
@@ -31,7 +31,7 @@
virtual void reset();
virtual status_t setParameters(const sp<AMessage> &msg);
virtual status_t processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
protected:
virtual ~IntrinsicBlurFilter() {};
diff --git a/media/libstagefright/filters/MediaFilter.cpp b/media/libstagefright/filters/MediaFilter.cpp
index cd69418..777ab5b 100644
--- a/media/libstagefright/filters/MediaFilter.cpp
+++ b/media/libstagefright/filters/MediaFilter.cpp
@@ -31,6 +31,8 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaFilter.h>
+#include <media/MediaCodecBuffer.h>
+
#include <gui/BufferItem.h>
#include "ColorConvert.h"
@@ -40,6 +42,9 @@
#include "SaturationFilter.h"
#include "ZeroFilter.h"
+#include "../include/ACodecBufferChannel.h"
+#include "../include/SharedMemoryBuffer.h"
+
namespace android {
// parameter: number of input and output buffers
@@ -49,6 +54,9 @@
: mState(UNINITIALIZED),
mGeneration(0),
mGraphicBufferListener(NULL) {
+ mBufferChannel = std::make_shared<ACodecBufferChannel>(
+ new AMessage(kWhatInputBufferFilled, this),
+ new AMessage(kWhatOutputBufferDrained, this));
}
MediaFilter::~MediaFilter() {
@@ -56,8 +64,8 @@
//////////////////// PUBLIC FUNCTIONS //////////////////////////////////////////
-void MediaFilter::setNotificationMessage(const sp<AMessage> &msg) {
- mNotify = msg;
+std::shared_ptr<BufferChannelBase> MediaFilter::getBufferChannel() {
+ return mBufferChannel;
}
void MediaFilter::initiateAllocateComponent(const sp<AMessage> &msg) {
@@ -189,29 +197,6 @@
}
}
-//////////////////// PORT DESCRIPTION //////////////////////////////////////////
-
-MediaFilter::PortDescription::PortDescription() {
-}
-
-void MediaFilter::PortDescription::addBuffer(
- IOMX::buffer_id id, const sp<ABuffer> &buffer) {
- mBufferIDs.push_back(id);
- mBuffers.push_back(buffer);
-}
-
-size_t MediaFilter::PortDescription::countBuffers() {
- return mBufferIDs.size();
-}
-
-IOMX::buffer_id MediaFilter::PortDescription::bufferIDAt(size_t index) const {
- return mBufferIDs.itemAt(index);
-}
-
-sp<ABuffer> MediaFilter::PortDescription::bufferAt(size_t index) const {
- return mBuffers.itemAt(index);
-}
-
//////////////////// HELPER FUNCTIONS //////////////////////////////////////////
void MediaFilter::signalProcessBuffers() {
@@ -219,10 +204,7 @@
}
void MediaFilter::signalError(status_t error) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatError);
- notify->setInt32("err", error);
- notify->post();
+ mCallback->onError(error, ACTION_CODE_FATAL);
}
status_t MediaFilter::allocateBuffersOnPort(OMX_U32 portIndex) {
@@ -250,7 +232,8 @@
info.mBufferID = i;
info.mGeneration = mGeneration;
info.mOutputFlags = 0;
- info.mData = new ABuffer(mem->pointer(), bufferSize);
+ info.mData = new SharedMemoryBuffer(
+ isInput ? mInputFormat : mOutputFormat, mem);
info.mData->meta()->setInt64("timeUs", 0);
mBuffers[portIndex].push_back(info);
@@ -261,21 +244,15 @@
}
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
-
- notify->setInt32("portIndex", portIndex);
-
- sp<PortDescription> desc = new PortDescription;
-
+ std::vector<ACodecBufferChannel::BufferAndId> array(mBuffers[portIndex].size());
for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
- const BufferInfo &info = mBuffers[portIndex][i];
-
- desc->addBuffer(info.mBufferID, info.mData);
+ array[i] = {mBuffers[portIndex][i].mData, mBuffers[portIndex][i].mBufferID};
}
-
- notify->setObject("portDesc", desc);
- notify->post();
+ if (portIndex == kPortIndexInput) {
+ mBufferChannel->setInputBufferArray(array);
+ } else {
+ mBufferChannel->setOutputBufferArray(array);
+ }
return OK;
}
@@ -309,20 +286,14 @@
info->mGeneration = mGeneration;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
- notify->setInt32("buffer-id", info->mBufferID);
-
info->mData->meta()->clear();
- notify->setBuffer("buffer", info->mData);
sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, this);
reply->setInt32("buffer-id", info->mBufferID);
- notify->setMessage("reply", reply);
-
info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
- notify->post();
+
+ mBufferChannel->fillThisBuffer(info->mBufferID);
}
void MediaFilter::postDrainThisBuffer(BufferInfo *info) {
@@ -330,50 +301,20 @@
info->mGeneration = mGeneration;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
- notify->setInt32("buffer-id", info->mBufferID);
- notify->setInt32("flags", info->mOutputFlags);
- notify->setBuffer("buffer", info->mData);
-
sp<AMessage> reply = new AMessage(kWhatOutputBufferDrained, this);
reply->setInt32("buffer-id", info->mBufferID);
- notify->setMessage("reply", reply);
-
- notify->post();
+ mBufferChannel->drainThisBuffer(info->mBufferID, info->mOutputFlags);
info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
}
void MediaFilter::postEOS() {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatEOS);
- notify->setInt32("err", ERROR_END_OF_STREAM);
- notify->post();
+ mCallback->onEos(ERROR_END_OF_STREAM);
ALOGV("Sent kWhatEOS.");
}
-void MediaFilter::sendFormatChange() {
- sp<AMessage> notify = mNotify->dup();
-
- notify->setInt32("what", kWhatOutputFormatChanged);
-
- AString mime;
- CHECK(mOutputFormat->findString("mime", &mime));
- notify->setString("mime", mime.c_str());
-
- notify->setInt32("stride", mStride);
- notify->setInt32("slice-height", mSliceHeight);
- notify->setInt32("color-format", mColorFormatOut);
- notify->setRect("crop", 0, 0, mStride - 1, mSliceHeight - 1);
- notify->setInt32("width", mWidth);
- notify->setInt32("height", mHeight);
-
- notify->post();
-}
-
void MediaFilter::requestFillEmptyInput() {
if (mPortEOS[kPortIndexInput]) {
return;
@@ -459,11 +400,8 @@
return;
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatComponentAllocated);
// HACK - need "OMX.google" to use MediaCodec's software renderer
- notify->setString("componentName", "OMX.google.MediaFilter");
- notify->post();
+ mCallback->onComponentAllocated("OMX.google.MediaFilter");
mState = INITIALIZED;
ALOGV("Handled kWhatAllocateComponent.");
}
@@ -540,16 +478,9 @@
mOutputFormat->setInt32("width", mWidth);
mOutputFormat->setInt32("height", mHeight);
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatComponentConfigured);
- notify->setString("componentName", "MediaFilter");
- notify->setMessage("input-format", mInputFormat);
- notify->setMessage("output-format", mOutputFormat);
- notify->post();
+ mCallback->onComponentConfigured(mInputFormat, mOutputFormat);
mState = CONFIGURED;
ALOGV("Handled kWhatConfigureComponent.");
-
- sendFormatChange();
}
void MediaFilter::onStart() {
@@ -559,6 +490,8 @@
allocateBuffersOnPort(kPortIndexOutput);
+ mCallback->onStartCompleted();
+
status_t err = mFilter->start();
if (err != (status_t)OK) {
ALOGE("Failed to start filter component, err %d", err);
@@ -597,11 +530,12 @@
CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
info->mStatus = BufferInfo::OWNED_BY_US;
- sp<ABuffer> buffer;
+ sp<MediaCodecBuffer> buffer;
int32_t err = OK;
bool eos = false;
- if (!msg->findBuffer("buffer", &buffer)) {
+ sp<RefBase> obj;
+ if (!msg->findObject("buffer", &obj)) {
// these are unfilled buffers returned by client
CHECK(msg->findInt32("err", &err));
@@ -616,6 +550,8 @@
}
buffer.clear();
+ } else {
+ buffer = static_cast<MediaCodecBuffer *>(obj.get());
}
int32_t isCSD;
@@ -688,9 +624,11 @@
mState = INITIALIZED;
}
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
- notify->post();
+ if (keepComponentAllocated) {
+ mCallback->onStopCompleted();
+ } else {
+ mCallback->onReleaseCompleted();
+ }
}
void MediaFilter::onFlush() {
@@ -712,9 +650,7 @@
mPortEOS[kPortIndexOutput] = false;
mInputEOSResult = OK;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatFlushCompleted);
- notify->post();
+ mCallback->onFlushCompleted();
ALOGV("Posted kWhatFlushCompleted");
// MediaCodec returns all input buffers after flush, so in
@@ -746,13 +682,10 @@
return;
}
- sp<AMessage> reply = mNotify->dup();
- reply->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
- reply->setObject(
- "input-surface",
+ mCallback->onInputSurfaceCreated(
+ nullptr, nullptr,
new BufferProducerWrapper(
mGraphicBufferListener->getIGraphicBufferProducer()));
- reply->post();
}
void MediaFilter::onInputFrameAvailable() {
@@ -768,7 +701,8 @@
// TODO: check input format and convert only if necessary
// copy RGBA graphic buffer into temporary ARGB input buffer
BufferInfo *inputInfo = new BufferInfo;
- inputInfo->mData = new ABuffer(buf->getWidth() * buf->getHeight() * 4);
+ inputInfo->mData = new MediaCodecBuffer(
+ mInputFormat, new ABuffer(buf->getWidth() * buf->getHeight() * 4));
ALOGV("Copying surface data into temp buffer.");
convertRGBAToARGB(
(uint8_t*)bufPtr, buf->getWidth(), buf->getHeight(),
@@ -813,9 +747,7 @@
}
mPortEOS[kPortIndexOutput] = true;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
- notify->post();
+ mCallback->onSignaledInputEOS(OK);
ALOGV("Output stream saw EOS.");
}
diff --git a/media/libstagefright/filters/RSFilter.cpp b/media/libstagefright/filters/RSFilter.cpp
index b569945..225a375 100644
--- a/media/libstagefright/filters/RSFilter.cpp
+++ b/media/libstagefright/filters/RSFilter.cpp
@@ -19,7 +19,7 @@
#include <utils/Log.h>
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -85,7 +85,7 @@
}
status_t RSFilter::processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
mCallback->processBuffers(mAllocIn.get(), mAllocOut.get());
mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
diff --git a/media/libstagefright/filters/RSFilter.h b/media/libstagefright/filters/RSFilter.h
index c5b5074..3326284 100644
--- a/media/libstagefright/filters/RSFilter.h
+++ b/media/libstagefright/filters/RSFilter.h
@@ -35,7 +35,7 @@
virtual void reset();
virtual status_t setParameters(const sp<AMessage> &msg);
virtual status_t processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
protected:
virtual ~RSFilter();
diff --git a/media/libstagefright/filters/SaturationFilter.cpp b/media/libstagefright/filters/SaturationFilter.cpp
index ba5f75a..0a1df05 100644
--- a/media/libstagefright/filters/SaturationFilter.cpp
+++ b/media/libstagefright/filters/SaturationFilter.cpp
@@ -19,7 +19,7 @@
#include <utils/Log.h>
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -88,7 +88,7 @@
}
status_t SaturationFilter::processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
mScript->forEach_root(mAllocIn, mAllocOut);
mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
diff --git a/media/libstagefright/filters/SaturationFilter.h b/media/libstagefright/filters/SaturationFilter.h
index 0545021..317e469 100644
--- a/media/libstagefright/filters/SaturationFilter.h
+++ b/media/libstagefright/filters/SaturationFilter.h
@@ -33,7 +33,7 @@
virtual void reset();
virtual status_t setParameters(const sp<AMessage> &msg);
virtual status_t processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
protected:
virtual ~SaturationFilter() {};
diff --git a/media/libstagefright/filters/SimpleFilter.h b/media/libstagefright/filters/SimpleFilter.h
index 4cd37ef..a3c2d76 100644
--- a/media/libstagefright/filters/SimpleFilter.h
+++ b/media/libstagefright/filters/SimpleFilter.h
@@ -21,11 +21,11 @@
#include <utils/Errors.h>
#include <utils/RefBase.h>
-struct ABuffer;
-struct AMessage;
-
namespace android {
+struct AMessage;
+class MediaCodecBuffer;
+
struct SimpleFilter : public RefBase {
public:
SimpleFilter() : mWidth(0), mHeight(0), mStride(0), mSliceHeight(0),
@@ -37,7 +37,7 @@
virtual void reset() = 0;
virtual status_t setParameters(const sp<AMessage> &msg) = 0;
virtual status_t processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) = 0;
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) = 0;
protected:
int32_t mWidth, mHeight;
diff --git a/media/libstagefright/filters/ZeroFilter.cpp b/media/libstagefright/filters/ZeroFilter.cpp
index 3f1243c..74b94b7 100644
--- a/media/libstagefright/filters/ZeroFilter.cpp
+++ b/media/libstagefright/filters/ZeroFilter.cpp
@@ -17,7 +17,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ZeroFilter"
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -38,7 +38,7 @@
}
status_t ZeroFilter::processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
// assuming identical input & output buffers, since we're a copy filter
if (mInvertData) {
uint32_t* src = (uint32_t*)srcBuffer->data();
diff --git a/media/libstagefright/filters/ZeroFilter.h b/media/libstagefright/filters/ZeroFilter.h
index bd34dfb..f941cc8 100644
--- a/media/libstagefright/filters/ZeroFilter.h
+++ b/media/libstagefright/filters/ZeroFilter.h
@@ -29,7 +29,7 @@
virtual void reset() {};
virtual status_t setParameters(const sp<AMessage> &msg);
virtual status_t processBuffers(
- const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+ const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
protected:
virtual ~ZeroFilter() {};
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 5f11fb6..8a7c3eb 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -100,7 +100,7 @@
}
}
-static void makeFourCC(uint32_t fourcc, char *s) {
+static void makeFourCC(uint32_t fourcc, char *s, size_t bufsz) {
s[0] = (fourcc >> 24) & 0xff;
if (s[0]) {
s[1] = (fourcc >> 16) & 0xff;
@@ -108,7 +108,7 @@
s[3] = fourcc & 0xff;
s[4] = 0;
} else {
- sprintf(s, "%u", fourcc);
+ snprintf(s, bufsz, "%u", fourcc);
}
}
@@ -146,7 +146,7 @@
if (verboseStats) {
for (size_t j = 0; j < handler->mMessages.size(); j++) {
char fourcc[15];
- makeFourCC(handler->mMessages.keyAt(j), fourcc);
+ makeFourCC(handler->mMessages.keyAt(j), fourcc, sizeof(fourcc));
s.appendFormat("\n %s: %u",
fourcc,
handler->mMessages.valueAt(j));
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 1b0db33..f55de64 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -240,6 +240,24 @@
return false;
}
+bool AMessage::findAsInt64(const char *name, int64_t *value) const {
+ size_t i = findItemIndex(name, strlen(name));
+ if (i < mNumItems) {
+ const Item *item = &mItems[i];
+ switch (item->mType) {
+ case kTypeInt64:
+ *value = item->u.int64Value;
+ return true;
+ case kTypeInt32:
+ *value = item->u.int32Value;
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
bool AMessage::contains(const char *name) const {
size_t i = findItemIndex(name, strlen(name));
return i < mNumItems;
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index b167543..04fac19 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#define LOG_TAG "AString"
+#include <utils/Log.h>
+
#include <ctype.h>
#include <stdarg.h>
#include <stdio.h>
@@ -40,14 +43,24 @@
: mData(NULL),
mSize(0),
mAllocSize(1) {
- setTo(s);
+ if (!s) {
+ ALOGW("ctor got NULL, using empty string instead");
+ clear();
+ } else {
+ setTo(s);
+ }
}
AString::AString(const char *s, size_t size)
: mData(NULL),
mSize(0),
mAllocSize(1) {
- setTo(s, size);
+ if (!s) {
+ ALOGW("ctor got NULL, using empty string instead");
+ clear();
+ } else {
+ setTo(s, size);
+ }
}
AString::AString(const String8 &from)
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index 8387e1a..eeeb284 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -23,6 +23,7 @@
COMMON_LIBS = [
"libbinder",
"libutils",
+ "libui",
"libcutils",
"liblog",
]
@@ -47,7 +48,10 @@
"libhardware_headers",
],
- export_shared_lib_headers: ["libbinder"],
+ export_shared_lib_headers: [
+ "libbinder",
+ "libui",
+ ],
cflags: [
"-Wno-multichar",
@@ -62,6 +66,10 @@
"unsigned-integer-overflow",
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index d7439b2..88a8351 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -343,6 +343,23 @@
}
// static
+ColorAspects ColorUtils::unpackToColorAspects(uint32_t packed) {
+ ColorAspects aspects;
+ aspects.mRange = (ColorAspects::Range)((packed >> 24) & 0xFF);
+ aspects.mPrimaries = (ColorAspects::Primaries)((packed >> 16) & 0xFF);
+ aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((packed >> 8) & 0xFF);
+ aspects.mTransfer = (ColorAspects::Transfer)(packed & 0xFF);
+
+ return aspects;
+}
+
+// static
+uint32_t ColorUtils::packToU32(const ColorAspects &aspects) {
+ return (aspects.mRange << 24) | (aspects.mPrimaries << 16)
+ | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer;
+}
+
+// static
void ColorUtils::setDefaultCodecColorAspectsIfNeeded(
ColorAspects &aspects, int32_t width, int32_t height) {
ColorAspects::MatrixCoeffs coeffs;
diff --git a/media/libstagefright/foundation/MediaBufferGroup.cpp b/media/libstagefright/foundation/MediaBufferGroup.cpp
index 8e4d064..cb62d92 100644
--- a/media/libstagefright/foundation/MediaBufferGroup.cpp
+++ b/media/libstagefright/foundation/MediaBufferGroup.cpp
@@ -199,6 +199,7 @@
}
void MediaBufferGroup::signalBufferReturned(MediaBuffer *) {
+ Mutex::Autolock autoLock(mLock);
mCondition.signal();
}
diff --git a/media/libstagefright/foundation/hexdump.cpp b/media/libstagefright/foundation/hexdump.cpp
index a44d832..872c5f3 100644
--- a/media/libstagefright/foundation/hexdump.cpp
+++ b/media/libstagefright/foundation/hexdump.cpp
@@ -49,7 +49,7 @@
appendIndent(&line, indent);
char tmp[32];
- sprintf(tmp, "%08lx: ", (unsigned long)offset);
+ snprintf(tmp, sizeof(tmp), "%08lx: ", (unsigned long)offset);
line.append(tmp);
@@ -60,7 +60,7 @@
if (offset + i >= size) {
line.append(" ");
} else {
- sprintf(tmp, "%02x ", data[offset + i]);
+ snprintf(tmp, sizeof(tmp), "%02x ", data[offset + i]);
line.append(tmp);
}
}
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h
new file mode 100644
index 0000000..49aa0dc
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AData.h
@@ -0,0 +1,843 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_A_DATA_H_
+#define STAGEFRIGHT_FOUNDATION_A_DATA_H_
+
+#include <memory> // for std::shared_ptr, weak_ptr and unique_ptr
+#include <type_traits> // for std::aligned_union
+
+#include <utils/StrongPointer.h> // for android::sp and wp
+
+#include <media/stagefright/foundation/TypeTraits.h>
+#include <media/stagefright/foundation/Flagged.h>
+
+namespace android {
+
+/**
+ * AData is a flexible union type that supports non-POD members. It supports arbitrary types as long
+ * as they are either moveable or copyable.
+ *
+ * Internally, AData is using AUnion - a structure providing the union support. AUnion should not
+ * be used by generic code as it is very unsafe - it opens type aliasing errors where an object of
+ * one type can be easily accessed as an object of another type. AData prevents this.
+ *
+ * AData allows a custom type flagger to be used for future extensions (e.g. allowing automatic
+ * type conversion). A strict and a relaxed flagger are provided as internal types.
+ *
+ * Use as follows:
+ *
+ * AData<int, float>::Basic data; // strict type support
+ * int i = 1;
+ * float f = 7.0f;
+ *
+ * data.set(5);
+ * EXPECT_TRUE(data.find(&i));
+ * EXPECT_FALSE(data.find(&f));
+ * EXPECT_EQ(i, 5);
+ *
+ * data.set(6.0f);
+ * EXPECT_FALSE(data.find(&i));
+ * EXPECT_TRUE(data.find(&f));
+ * EXPECT_EQ(f, 6.0f);
+ *
+ * AData<int, sp<RefBase>>::RelaxedBasic objdata; // relaxed type support
+ * sp<ABuffer> buf = new ABuffer(16), buf2;
+ * sp<RefBase> obj;
+ *
+ * objdata.set(buf);
+ * EXPECT_TRUE(objdata.find(&buf2));
+ * EXPECT_EQ(buf, buf2);
+ * EXPECT_FALSE(objdata.find(&i));
+ * EXPECT_TRUE(objdata.find(&obj));
+ * EXPECT_TRUE(obj == buf);
+ *
+ * obj = buf;
+ * objdata.set(obj); // storing as sp<RefBase>
+ * EXPECT_FALSE(objdata.find(&buf2)); // not stored as ABuffer(!)
+ * EXPECT_TRUE(objdata.find(&obj));
+ */
+
+/// \cond Internal
+
+/**
+ * Helper class to call constructor and destructor for a specific type in AUnion.
+ * This class is needed as member function specialization is not allowed for a
+ * templated class.
+ */
+struct _AUnion_impl {
+ /**
+ * Calls placement constuctor for type T with arbitrary arguments for a storage at an address.
+ * Storage MUST be large enough to contain T.
+ * Also clears the slack space after type T. \todo This is not technically needed, so we may
+ * choose to do this just for debugging.
+ *
+ * \param totalSize size of the storage
+ * \param addr pointer to where object T should be constructed
+ * \param args arbitrary arguments for constructor
+ */
+ template<typename T, typename ...Args>
+ inline static void emplace(size_t totalSize, T *addr, Args&&... args) {
+ new(addr)T(std::forward<Args>(args)...);
+ // clear slack space - this is not technically required
+ constexpr size_t size = sizeof(T);
+ memset(reinterpret_cast<uint8_t*>(addr) + size, 0, totalSize - size);
+ }
+
+ /**
+ * Calls destuctor for an object of type T located at a specific address.
+ *
+ * \note we do not clear the storage in this case as the storage should not be used
+ * until another object is placed there, at which case the storage will be cleared.
+ *
+ * \param addr pointer to where object T is stored
+ */
+ template<typename T>
+ inline static void del(T *addr) {
+ addr->~T();
+ }
+};
+
+/** Constructor specialization for void type */
+template<>
+inline void _AUnion_impl::emplace<void>(size_t totalSize, void *addr) {
+ memset(addr, 0, totalSize);
+}
+
+/** Destructor specialization for void type */
+template<>
+inline void _AUnion_impl::del<void>(void *) {
+}
+
+/// \endcond
+
+/**
+ * A templated union class that can contain specific types of data, and provides
+ * constructors, destructor and access methods strictly for those types.
+ *
+ * \note This class is VERY UNSAFE compared to a union, but it allows non-POD unions.
+ * In particular care must be taken that methods are called in a careful order to
+ * prevent accessing objects of one type as another type. This class provides no
+ * facilities to help with this ordering. This is meant to be wrapped by safer
+ * utility classes that do that.
+ *
+ * \param Ts types stored in this union.
+ */
+template<typename ...Ts>
+struct AUnion {
+private:
+ using _type = typename std::aligned_union<0, Ts...>::type; ///< storage type
+ _type mValue; ///< storage
+
+public:
+ /**
+ * Constructs an object of type T with arbitrary arguments in this union. After this call,
+ * this union will contain this object.
+ *
+ * This method MUST be called only when either 1) no object or 2) a void object (equivalent to
+ * no object) is contained in this union.
+ *
+ * \param T type of object to be constructed. This must be one of the template parameters of
+ * the union class with the same cv-qualification, or void.
+ * \param args arbitrary arguments for the constructor
+ */
+ template<
+ typename T, typename ...Args,
+ typename=typename std::enable_if<is_one_of<T, void, Ts...>::value>::type>
+ inline void emplace(Args&&... args) {
+ _AUnion_impl::emplace(
+ sizeof(_type), reinterpret_cast<T*>(&mValue), std::forward<Args>(args)...);
+ }
+
+ /**
+ * Destructs an object of type T in this union. After this call, this union will contain no
+ * object.
+ *
+ * This method MUST be called only when this union contains an object of type T.
+ *
+ * \param T type of object to be destructed. This must be one of the template parameters of
+ * the union class with the same cv-qualification, or void.
+ */
+ template<
+ typename T,
+ typename=typename std::enable_if<is_one_of<T, void, Ts...>::value>::type>
+ inline void del() {
+ _AUnion_impl::del(reinterpret_cast<T*>(&mValue));
+ }
+
+ /**
+ * Returns a const reference to the object of type T in this union.
+ *
+ * This method MUST be called only when this union contains an object of type T.
+ *
+ * \param T type of object to be returned. This must be one of the template parameters of
+ * the union class with the same cv-qualification.
+ */
+ template<
+ typename T,
+ typename=typename std::enable_if<is_one_of<T, Ts...>::value>::type>
+ inline const T &get() const {
+ return *reinterpret_cast<const T*>(&mValue);
+ }
+
+ /**
+ * Returns a reference to the object of type T in this union.
+ *
+ * This method MUST be called only when this union contains an object of type T.
+ *
+ * \param T type of object to be returned. This must be one of the template parameters of
+ * the union class with the same cv-qualification.
+ */
+ template<typename T>
+ inline T &get() {
+ return *reinterpret_cast<T*>(&mValue);
+ }
+};
+
+/**
+ * Helper utility class that copies an object of type T to a destination.
+ *
+ * T must be copy assignable or copy constructible.
+ *
+ * It provides:
+ *
+ * void assign(T*, const U&) // for copiable types - this leaves the source unchanged, hence const.
+ *
+ * \param T type of object to assign to
+ */
+template<
+ typename T,
+ bool=std::is_copy_assignable<T>::value>
+struct _AData_copier {
+ static_assert(std::is_copy_assignable<T>::value, "T must be copy assignable here");
+
+ /**
+ * Copies src to data without modifying data.
+ *
+ * \param data pointer to destination
+ * \param src source object
+ */
+ inline static void assign(T *data, const T &src) {
+ *data = src;
+ }
+
+ template<typename U>
+ using enable_if_T_is_same_as = typename std::enable_if<std::is_same<U, T>::value>::type;
+
+ /**
+ * Downcast specializations for sp<>, shared_ptr<> and weak_ptr<>
+ */
+ template<typename Tp, typename U, typename=enable_if_T_is_same_as<sp<Tp>>>
+ inline static void assign(sp<Tp> *data, const sp<U> &src) {
+ *data = static_cast<Tp*>(src.get());
+ }
+
+ template<typename Tp, typename U, typename=enable_if_T_is_same_as<wp<Tp>>>
+ inline static void assign(wp<Tp> *data, const wp<U> &src) {
+ sp<U> __tmp = src.promote();
+ *data = static_cast<Tp*>(__tmp.get());
+ }
+
+ template<typename Tp, typename U, typename=enable_if_T_is_same_as<sp<Tp>>>
+ inline static void assign(sp<Tp> *data, sp<U> &&src) {
+ sp<U> __tmp = std::move(src); // move src out as get cannot
+ *data = static_cast<Tp*>(__tmp.get());
+ }
+
+ template<typename Tp, typename U, typename=enable_if_T_is_same_as<std::shared_ptr<Tp>>>
+ inline static void assign(std::shared_ptr<Tp> *data, const std::shared_ptr<U> &src) {
+ *data = std::static_pointer_cast<Tp>(src);
+ }
+
+ template<typename Tp, typename U, typename=enable_if_T_is_same_as<std::shared_ptr<Tp>>>
+ inline static void assign(std::shared_ptr<Tp> *data, std::shared_ptr<U> &&src) {
+ std::shared_ptr<U> __tmp = std::move(src); // move src out as static_pointer_cast cannot
+ *data = std::static_pointer_cast<Tp>(__tmp);
+ }
+
+ template<typename Tp, typename U, typename=enable_if_T_is_same_as<std::weak_ptr<Tp>>>
+ inline static void assign(std::weak_ptr<Tp> *data, const std::weak_ptr<U> &src) {
+ *data = std::static_pointer_cast<Tp>(src.lock());
+ }
+
+ // shared_ptrs are implicitly convertible to weak_ptrs but not vice versa, but picking the
+ // first compatible type in Ts requires having shared_ptr types before weak_ptr types, so that
+ // they are stored as shared_ptrs.
+ /**
+ * Provide sensible error message if encountering shared_ptr/weak_ptr ambiguity. This method
+ * is not enough to detect this, only if someone is trying to find the shared_ptr.
+ */
+ template<typename Tp, typename U>
+ inline static void assign(std::shared_ptr<Tp> *, const std::weak_ptr<U> &) {
+ static_assert(std::is_same<Tp, void>::value,
+ "shared/weak pointer ambiguity. move shared ptr types before weak_ptrs");
+ }
+};
+
+/**
+ * Template specialization for non copy assignable, but copy constructible types.
+ *
+ * \todo Test this. No basic classes are copy constructible but not assignable.
+ *
+ */
+template<typename T>
+struct _AData_copier<T, false> {
+ static_assert(!std::is_copy_assignable<T>::value, "T must not be copy assignable here");
+ static_assert(std::is_copy_constructible<T>::value, "T must be copy constructible here");
+
+ inline static void copy(T *data, const T &src) {
+ data->~T();
+ new(data)T(src);
+ }
+};
+
+/**
+ * Helper utility class that moves an object of type T to a destination.
+ *
+ * T must be move assignable or move constructible.
+ *
+ * It provides multiple methods:
+ *
+ * void assign(T*, T&&)
+ *
+ * \param T type of object to assign
+ */
+template<
+ typename T,
+ bool=std::is_move_assignable<T>::value>
+struct _AData_mover {
+ static_assert(std::is_move_assignable<T>::value, "T must be move assignable here");
+
+ /**
+ * Moves src to data while likely modifying it.
+ *
+ * \param data pointer to destination
+ * \param src source object
+ */
+ inline static void assign(T *data, T &&src) {
+ *data = std::move(src);
+ }
+
+ template<typename U>
+ using enable_if_T_is_same_as = typename std::enable_if<std::is_same<U, T>::value>::type;
+
+ /**
+ * Downcast specializations for sp<>, shared_ptr<> and weak_ptr<>
+ */
+ template<typename Tp, typename U, typename=enable_if_T_is_same_as<sp<Tp>>>
+ inline static void assign(sp<Tp> *data, sp<U> &&src) {
+ sp<U> __tmp = std::move(src); // move src out as get cannot
+ *data = static_cast<Tp*>(__tmp.get());
+ }
+
+ template<typename Tp, typename U, typename=enable_if_T_is_same_as<std::shared_ptr<Tp>>>
+ inline static void assign(std::shared_ptr<Tp> *data, std::shared_ptr<U> &&src) {
+ std::shared_ptr<U> __tmp = std::move(src); // move src out as static_pointer_cast cannot
+ *data = std::static_pointer_cast<Tp>(__tmp);
+ }
+
+ template<typename Tp, typename Td, typename U, typename Ud,
+ typename=enable_if_T_is_same_as<std::unique_ptr<Tp, Td>>>
+ inline static void assign(std::unique_ptr<Tp, Td> *data, std::unique_ptr<U, Ud> &&src) {
+ *data = std::unique_ptr<Tp, Td>(static_cast<Tp*>(src.release()));
+ }
+
+ // shared_ptrs are implicitly convertible to weak_ptrs but not vice versa, but picking the
+ // first compatible type in Ts requires having shared_ptr types before weak_ptr types, so that
+ // they are stored as shared_ptrs.
+ /**
+ * Provide sensible error message if encountering shared_ptr/weak_ptr ambiguity. This method
+ * is not enough to detect this, only if someone is trying to remove the shared_ptr.
+ */
+ template<typename Tp, typename U>
+ inline static void assign(std::shared_ptr<Tp> *, std::weak_ptr<U> &&) {
+ static_assert(std::is_same<Tp, void>::value,
+ "shared/weak pointer ambiguity. move shared ptr types before weak_ptrs");
+ }
+
+ // unique_ptrs are implicitly convertible to shared_ptrs but not vice versa, but picking the
+ // first compatible type in Ts requires having unique_ptrs types before shared_ptrs types, so
+ // that they are stored as unique_ptrs.
+ /**
+ * Provide sensible error message if encountering shared_ptr/unique_ptr ambiguity. This method
+ * is not enough to detect this, only if someone is trying to remove the unique_ptr.
+ */
+ template<typename Tp, typename U>
+ inline static void assign(std::unique_ptr<Tp> *, std::shared_ptr<U> &&) {
+ static_assert(std::is_same<Tp, void>::value,
+ "unique/shared pointer ambiguity. move unique ptr types before shared_ptrs");
+ }
+};
+
+/**
+ * Template specialization for non move assignable, but move constructible types.
+ *
+ * \todo Test this. No basic classes are move constructible but not assignable.
+ *
+ */
+template<typename T>
+struct _AData_mover<T, false> {
+ static_assert(!std::is_move_assignable<T>::value, "T must not be move assignable here");
+ static_assert(std::is_move_constructible<T>::value, "T must be move constructible here");
+
+ inline static void assign(T *data, T &&src) {
+ data->~T();
+ new(data)T(std::move(src));
+ }
+};
+
+/**
+ * Helper template that deletes an object of a specific type (member) in an AUnion.
+ *
+ * \param Flagger type flagger class (see AData)
+ * \param U AUnion object in which the member should be deleted
+ * \param Ts types to consider for the member
+ */
+template<typename Flagger, typename U, typename ...Ts>
+struct _AData_deleter;
+
+/**
+ * Template specialization when there are still types to consider (T and rest)
+ */
+template<typename Flagger, typename U, typename T, typename ...Ts>
+struct _AData_deleter<Flagger, U, T, Ts...> {
+ static bool del(typename Flagger::type flags, U &data) {
+ if (Flagger::canDeleteAs(flags, Flagger::flagFor((T*)0))) {
+ data.template del<T>();
+ return true;
+ }
+ return _AData_deleter<Flagger, U, Ts...>::del(flags, data);
+ }
+};
+
+/**
+ * Template specialization when there are no more types to consider.
+ */
+template<typename Flagger, typename U>
+struct _AData_deleter<Flagger, U> {
+ inline static bool del(typename Flagger::type, U &) {
+ return false;
+ }
+};
+
+/**
+ * Container that can store an arbitrary object of a set of specified types.
+ *
+ * This struct is an outer class that contains various inner classes based on desired type
+ * strictness. The following inner classes are supported:
+ *
+ * AData<types...>::Basic - strict type support using uint32_t flag.
+ *
+ * AData<types...>::Strict<Flag> - strict type support using custom flag.
+ * AData<types...>::Relaxed<Flag, MaxSize, Align>
+ * - relaxed type support with compatible (usually derived) class support
+ * for pointer types with added size checking for minimal additional
+ * safety.
+ *
+ * AData<types...>::RelaxedBasic - strict type support using uint32_t flag.
+ *
+ * AData<types...>::Custom<flagger> - custom type support (flaggers determine the supported types
+ * and the base type to use for each)
+ *
+ */
+template<typename ...Ts>
+struct AData {
+private:
+ static_assert(are_unique<Ts...>::value, "types must be unique");
+
+ static constexpr size_t num_types = sizeof...(Ts); ///< number of types to support
+
+public:
+ /**
+ * Default (strict) type flagger provided.
+ *
+ * The default flagger simply returns the index of the type within Ts, or 0 for void.
+ *
+ * Type flaggers return a flag for a supported type.
+ *
+ * They must provide:
+ *
+ * - a flagFor(T*) method for supported types _and_ for T=void. T=void is used to mark that no
+ * object is stored in the container. For this, an arbitrary unique value may be returned.
+ * - a mask field that contains the flag mask.
+ * - a canDeleteAs(Flag, Flag) flag comparison method that checks if a type of a flag can be
+ * deleted as another type.
+ *
+ * \param Flag the underlying unsigned integral to use for the flags.
+ */
+ template<typename Flag>
+ struct flagger {
+ private:
+ static_assert(std::is_unsigned<Flag>::value, "Flag must be unsigned");
+ static_assert(std::is_integral<Flag>::value, "Flag must be an integral type");
+
+ static constexpr Flag count = num_types + 1;
+
+ public:
+ typedef Flag type; ///< flag type
+
+ static constexpr Flag mask = _Flagged_helper::minMask<Flag>(count); ///< flag mask
+
+ /**
+ * Return the stored type for T. This is itself.
+ */
+ template<typename T>
+ struct store {
+ typedef T as_type; ///< the base type that T is stored as
+ };
+
+ /**
+ * Constexpr method that returns if two flags are compatible for deletion.
+ *
+ * \param objectFlag flag for object to be deleted
+ * \param deleteFlag flag for type that object is to be deleted as
+ */
+ static constexpr bool canDeleteAs(Flag objectFlag, Flag deleteFlag) {
+ // default flagger requires strict type equality
+ return objectFlag == deleteFlag;
+ }
+
+ /**
+ * Constexpr method that returns the flag to use for a given type.
+ *
+ * Function overload for void*.
+ */
+ static constexpr Flag flagFor(void*) {
+ return 0u;
+ }
+
+ /**
+ * Constexpr method that returns the flag to use for a given supported type (T).
+ */
+ template<typename T, typename=typename std::enable_if<is_one_of<T, Ts...>::value>::type>
+ static constexpr Flag flagFor(T*) {
+ return find_first<T, Ts...>::index;
+ }
+ };
+
+ /**
+ * Relaxed flagger returns the index of the type within Ts. However, for pointers T* it returns
+ * the first type in Ts that T* can be converted into (this is normally a base type, but also
+ * works for sp<>, shared_ptr<> or unique_ptr<>). For a bit more strictness, the flag also
+ * contains the size of the class to avoid finding objects that were stored as a different
+ * derived class of the same base class.
+ *
+ * Flag is basically the index of the (base) type in Ts multiplied by the max size stored plus
+ * the size of the type (divided by alignment) for derived pointer types.
+ *
+ * \param MaxSize max supported size for derived class pointers
+ * \param Align alignment to assume for derived class pointers
+ */
+ template<typename Flag, size_t MaxSize=1024, size_t Align=4>
+ struct relaxed_flagger {
+ private:
+ static_assert(std::is_unsigned<Flag>::value, "Flag must be unsigned");
+ static_assert(std::is_integral<Flag>::value, "Flag must be an integral type");
+
+ static constexpr Flag count = num_types + 1;
+ static_assert(std::numeric_limits<Flag>::max() / count > (MaxSize / Align),
+ "not enough bits to fit into flag");
+
+ static constexpr Flag max_size_stored = MaxSize / Align + 1;
+
+ // T can be converted if it's size is <= MaxSize and it can be converted to one of the Ts
+ template<typename T, size_t size>
+ using enable_if_can_be_converted = typename std::enable_if<
+ (size / Align < max_size_stored
+ && find_first_convertible_to<T, Ts...>::index)>::type;
+
+
+ template<typename W, typename T, typename=enable_if_can_be_converted<W, sizeof(T)>>
+ static constexpr Flag relaxedFlagFor(W*, T*) {
+ return find_first_convertible_to<W, Ts...>::index * max_size_stored
+ + (is_one_of<W, Ts...>::value ? 0 : (sizeof(T) / Align));
+ }
+
+ public:
+ typedef Flag type; ///< flag type
+
+ static constexpr Flag mask =
+ _Flagged_helper::minMask<Flag>(count * max_size_stored); ///< flag mask
+
+ /**
+ * Constexpr method that returns if two flags are compatible for deletion.
+ *
+ * \param objectFlag flag for object to be deleted
+ * \param deleteFlag flag for type that object is to be deleted as
+ */
+ static constexpr bool canDeleteAs(Flag objectFlag, Flag deleteFlag) {
+ // can delete if objects have the same base type
+ return
+ objectFlag / max_size_stored == deleteFlag / max_size_stored &&
+ (deleteFlag % max_size_stored) == 0;
+ }
+
+ /**
+ * Constexpr method that returns the flag to use for a given type.
+ *
+ * Function overload for void*.
+ */
+ static constexpr Flag flagFor(void*) {
+ return 0u;
+ }
+
+ /**
+ * Constexpr method that returns the flag to use for a given supported type (T).
+ *
+ * This is a member method to enable both overloading as well as template specialization.
+ */
+ template<typename T, typename=typename std::enable_if<is_one_of<T, Ts...>::value>::type>
+ static constexpr Flag flagFor(T*) {
+ return find_first<T, Ts...>::index * max_size_stored;
+ }
+
+ /**
+ * For precaution, we only consider converting pointers to their base classes.
+ */
+
+ /**
+ * Template specialization for derived class pointers and managed pointers.
+ */
+ template<typename T>
+ static constexpr Flag flagFor(T**p) { return relaxedFlagFor(p, (T*)0); }
+ template<typename T>
+ static constexpr Flag flagFor(std::shared_ptr<T>*p) { return relaxedFlagFor(p, (T*)0); }
+ template<typename T>
+ static constexpr Flag flagFor(std::unique_ptr<T>*p) { return relaxedFlagFor(p, (T*)0); }
+ template<typename T>
+ static constexpr Flag flagFor(std::weak_ptr<T>*p) { return relaxedFlagFor(p, (T*)0); }
+ template<typename T>
+ static constexpr Flag flagFor(sp<T>*p) { return relaxedFlagFor(p, (T*)0); }
+ template<typename T>
+ static constexpr Flag flagFor(wp<T>*p) { return relaxedFlagFor(p, (T*)0); }
+
+ /**
+ * Type support template that provodes the stored type for T.
+ * This is itself if it is one of Ts, or the first type in Ts that T is convertible to.
+ *
+ * NOTE: This template may provide a base class for an unsupported type. Support is
+ * determined by flagFor().
+ */
+ template<typename T>
+ struct store {
+ typedef typename std::conditional<
+ is_one_of<T, Ts...>::value,
+ T,
+ typename find_first_convertible_to<T, Ts...>::type>::type as_type;
+ };
+ };
+
+ /**
+ * Implementation of AData.
+ */
+ template<typename Flagger>
+ struct Custom : protected Flagged<AUnion<Ts...>, typename Flagger::type, Flagger::mask> {
+ using data_t = AUnion<Ts...>;
+ using base_t = Flagged<AUnion<Ts...>, typename Flagger::type, Flagger::mask>;
+
+ /**
+ * Constructor. Initializes this to a container that does not contain any object.
+ */
+ Custom() : base_t(Flagger::flagFor((void*)0)) { }
+
+ /**
+ * Removes the contained object, if any.
+ */
+ ~Custom() {
+ if (!this->clear()) {
+ __builtin_trap();
+ // std::cerr << "could not delete data of type " << this->flags() << std::endl;
+ }
+ }
+
+ /**
+ * Returns whether there is any object contained.
+ */
+ inline bool used() const {
+ return this->flags() != Flagger::flagFor((void*)0);
+ }
+
+ /**
+ * Removes the contained object, if any. Returns true if there are no objects contained,
+ * or false on any error (this is highly unexpected).
+ */
+ bool clear() {
+ if (this->used()) {
+ if (_AData_deleter<Flagger, data_t, Ts...>::del(this->flags(), this->get())) {
+ this->setFlags(Flagger::flagFor((void*)0));
+ return true;
+ }
+ return false;
+ }
+ return true;
+ }
+
+ template<typename T>
+ using is_supported_by_flagger =
+ typename std::enable_if<Flagger::flagFor((T*)0) != Flagger::flagFor((void*)0)>::type;
+
+ /**
+ * Checks if there is a copiable object of type T in this container. If there is, it copies
+ * that object into the provided address and returns true. Otherwise, it does nothing and
+ * returns false.
+ *
+ * This method normally requires a flag equality between the stored and retrieved types.
+ * However, it also allows retrieving the stored object as the stored type
+ * (usually base type).
+ *
+ * \param T type of object to sought
+ * \param data address at which the object should be retrieved
+ *
+ * \return true if the object was retrieved. false if it was not.
+ */
+ template<
+ typename T,
+ typename=is_supported_by_flagger<T>>
+ bool find(T *data) const {
+ using B = typename Flagger::template store<T>::as_type;
+ if (this->flags() == Flagger::flagFor((T*)0) ||
+ Flagger::canDeleteAs(this->flags(), Flagger::flagFor((T*)0))) {
+ _AData_copier<T>::assign(data, this->get().template get<B>());
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Checks if there is an object of type T in this container. If there is, it moves that
+ * object into the provided address and returns true. Otherwise, it does nothing and returns
+ * false.
+ *
+ * This method normally requires a flag equality between the stored and retrieved types.
+ * However, it also allows retrieving the stored object as the stored type
+ * (usually base type).
+ *
+ * \param T type of object to sought
+ * \param data address at which the object should be retrieved.
+ *
+ * \return true if the object was retrieved. false if it was not.
+ */
+ template<
+ typename T,
+ typename=is_supported_by_flagger<T>>
+ bool remove(T *data) {
+ using B = typename Flagger::template store<T>::as_type;
+ if (this->flags() == Flagger::flagFor((T*)0) ||
+ Flagger::canDeleteAs(this->flags(), Flagger::flagFor((T*)0))) {
+ _AData_mover<T>::assign(data, std::move(this->get().template get<B>()));
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Stores an object into this container by copying. If it was successful, returns true.
+ * Otherwise, (e.g. it could not destroy the already stored object) it returns false. This
+ * latter would be highly unexpected.
+ *
+ * \param T type of object to store
+ * \param data object to store
+ *
+ * \return true if the object was stored. false if it was not.
+ */
+ template<
+ typename T,
+ typename=is_supported_by_flagger<T>,
+ typename=typename std::enable_if<
+ std::is_copy_constructible<T>::value ||
+ (std::is_default_constructible<T>::value &&
+ std::is_copy_assignable<T>::value)>::type>
+ bool set(const T &data) {
+ using B = typename Flagger::template store<T>::as_type;
+
+ // if already contains an object of this type, simply assign
+ if (this->flags() == Flagger::flagFor((T*)0) && std::is_same<T, B>::value) {
+ _AData_copier<B>::assign(&this->get().template get<B>(), data);
+ return true;
+ } else if (this->used()) {
+ // destroy previous object
+ if (!this->clear()) {
+ return false;
+ }
+ }
+ this->get().template emplace<B>(data);
+ this->setFlags(Flagger::flagFor((T *)0));
+ return true;
+ }
+
+ /**
+ * Moves an object into this container. If it was successful, returns true. Otherwise,
+ * (e.g. it could not destroy the already stored object) it returns false. This latter
+ * would be highly unexpected.
+ *
+ * \param T type of object to store
+ * \param data object to store
+ *
+ * \return true if the object was stored. false if it was not.
+ */
+ template<
+ typename T,
+ typename=is_supported_by_flagger<T>>
+ bool set(T &&data) {
+ using B = typename Flagger::template store<T>::as_type;
+
+ // if already contains an object of this type, simply assign
+ if (this->flags() == Flagger::flagFor((T*)0) && std::is_same<T, B>::value) {
+ _AData_mover<B>::assign(&this->get().template get<B>(), std::forward<T&&>(data));
+ return true;
+ } else if (this->used()) {
+ // destroy previous object
+ if (!this->clear()) {
+ return false;
+ }
+ }
+ this->get().template emplace<B>(std::forward<T&&>(data));
+ this->setFlags(Flagger::flagFor((T *)0));
+ return true;
+ }
+ };
+
+ /**
+ * Basic AData using the default type flagger and requested flag type.
+ *
+ * \param Flag desired flag type to use. Must be an unsigned and std::integral type.
+ */
+ template<typename Flag>
+ using Strict = Custom<flagger<Flag>>;
+
+ /**
+ * Basic AData using the default type flagger and uint32_t flag.
+ */
+ using Basic = Strict<uint32_t>;
+
+ /**
+ * AData using the relaxed type flagger for max size and requested flag type.
+ *
+ * \param Flag desired flag type to use. Must be an unsigned and std::integral type.
+ */
+ template<typename Flag, size_t MaxSize = 1024, size_t Align = 4>
+ using Relaxed = Custom<relaxed_flagger<Flag, MaxSize, Align>>;
+
+ /**
+ * Basic AData using the relaxed type flagger and uint32_t flag.
+ */
+ using RelaxedBasic = Relaxed<uint32_t>;
+};
+
+} // namespace android
+
+#endif // STAGEFRIGHT_FOUNDATION_A_DATA_H_
+
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/ADebug.h b/media/libstagefright/foundation/include/media/stagefright/foundation/ADebug.h
index bc1acdc..b498c91 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/ADebug.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/ADebug.h
@@ -111,10 +111,30 @@
#define CHECK_GE(x,y) CHECK_OP(x,y,GE,>=)
#define CHECK_GT(x,y) CHECK_OP(x,y,GT,>)
-#define TRESPASS() \
+#define TRESPASS(...) \
LOG_ALWAYS_FATAL( \
__FILE__ ":" LITERAL_TO_STRING(__LINE__) \
- " Should not be here.");
+ " Should not be here. " __VA_ARGS__);
+
+#ifdef NDEBUG
+#define CHECK_DBG CHECK
+#define CHECK_EQ_DBG CHECK_EQ
+#define CHECK_NE_DBG CHECK_NE
+#define CHECK_LE_DBG CHECK_LE
+#define CHECK_LT_DBG CHECK_LT
+#define CHECK_GE_DBG CHECK_GE
+#define CHECK_GT_DBG CHECK_GT
+#define TRESPASS_DBG TRESPASS
+#else
+#define CHECK_DBG(condition)
+#define CHECK_EQ_DBG(x,y)
+#define CHECK_NE_DBG(x,y)
+#define CHECK_LE_DBG(x,y)
+#define CHECK_LT_DBG(x,y)
+#define CHECK_GE_DBG(x,y)
+#define CHECK_GT_DBG(x,y)
+#define TRESPASS_DBG(...)
+#endif
struct ADebug {
enum Level {
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
index 782f8e6..8580eb5 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
@@ -123,6 +123,9 @@
bool findBuffer(const char *name, sp<ABuffer> *buffer) const;
bool findMessage(const char *name, sp<AMessage> *obj) const;
+ // finds signed integer types cast to int64_t
+ bool findAsInt64(const char *name, int64_t *value) const;
+
// finds any numeric type cast to a float
bool findAsFloat(const char *name, float *value) const;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
index 2368b82..b889a02 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
@@ -138,6 +138,12 @@
int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
ColorAspects &aspects);
+ // unpack a uint32_t to a full ColorAspects struct
+ static ColorAspects unpackToColorAspects(uint32_t packed);
+
+ // pack a full ColorAspects struct into a uint32_t
+ static uint32_t packToU32(const ColorAspects &aspects);
+
// updates Unspecified color aspects to their defaults based on the video size
static void setDefaultCodecColorAspectsIfNeeded(
ColorAspects &aspects, int32_t width, int32_t height);
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/FileDescriptor.h b/media/libstagefright/foundation/include/media/stagefright/foundation/FileDescriptor.h
new file mode 100644
index 0000000..7acf4b8
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/FileDescriptor.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_FILE_DESCRIPTOR_H_
+#define STAGEFRIGHT_FOUNDATION_FILE_DESCRIPTOR_H_
+
+#include <memory>
+
+namespace android {
+
+/**
+ * FileDescriptor is a utility class for managing file descriptors in a scoped way.
+ *
+ * usage:
+ *
+ * status_t function(int fd) {
+ * FileDescriptor::Autoclose managedFd(fd);
+ * if (error_condition)
+ * return ERROR;
+ * next_function(managedFd.release());
+ * }
+ */
+struct FileDescriptor {
+ // created this class with minimal methods. more methods can be added here to manage
+ // a shared file descriptor object.
+
+ /**
+ * A locally scoped managed file descriptor object. This object is not shareable/copiable and
+ * is not thread safe.
+ */
+ struct Autoclose {
+ // created this class with minimal methods
+ /**
+ * Creates a locally scoped file descriptor holder object taking ownership of the passed in
+ * file descriptor.
+ */
+ Autoclose(int fd)
+ : mFd(fd) {
+
+ }
+
+ ~Autoclose() {
+ if (isValid()) {
+ ::close(mFd);
+ mFd = kInvalidFileDescriptor;
+ }
+ }
+
+ /**
+ * Releases the managed file descriptor from the holder. This invalidates the (remaining)
+ * file descriptor in this object.
+ */
+ int release() {
+ int managedFd = mFd;
+ mFd = kInvalidFileDescriptor;
+ return managedFd;
+ }
+
+ /**
+ * Checks whether the managed file descriptor is valid
+ */
+ bool isValid() const {
+ return mFd >= 0;
+ }
+
+ private:
+ // not yet needed
+
+ /**
+ * Returns the managed file descriptor from this object without releasing the ownership.
+ * The returned file descriptor has the same lifecycle as the managed file descriptor
+ * in this object. Therefore, care must be taken that it is not closed, and that this
+ * object keeps managing the returned file descriptor for the duration of its use.
+ */
+ int get() const {
+ return mFd;
+ }
+
+ private:
+ int mFd;
+
+ enum {
+ kInvalidFileDescriptor = -1,
+ };
+
+ DISALLOW_EVIL_CONSTRUCTORS(Autoclose);
+ };
+
+private:
+ std::shared_ptr<Autoclose> mSharedFd;
+};
+
+} // namespace android
+
+#endif // STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/Flagged.h b/media/libstagefright/foundation/include/media/stagefright/foundation/Flagged.h
new file mode 100644
index 0000000..bf0afbf
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/Flagged.h
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+#define STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+
+#include <media/stagefright/foundation/TypeTraits.h>
+
+namespace android {
+
+/**
+ * Flagged<T, Flag> is basically a specialized std::pair<Flag, T> that automatically optimizes out
+ * the flag if the wrapped type T is already flagged and we can combine the outer and inner flags.
+ *
+ * Flags can be queried/manipulated via flags() an setFlags(Flags). The wrapped value can be
+ * accessed via get(). This template is meant to be inherited by other utility/wrapper classes
+ * that need to store integral information along with the value.
+ *
+ * Users must specify the used bits (MASK) in the flags. Flag getters and setters will enforce this
+ * mask. _Flagged_helper::minMask<Flag> is provided to easily calculate a mask for a max value.
+ *
+ * E.g. adding a safe flag can be achieved like this:
+ *
+ *
+ * enum SafeFlags : uint32_t {
+ * kUnsafe,
+ * kSafe,
+ * kSafeMask = _Flagged_helper::minMask(kSafe),
+ * };
+ * typedef Flagged<int32_t, SafeFlags, kSafeMask> safeInt32;
+ *
+ * safeInt32 a;
+ * a.setFlags(kSafe);
+ * a.get() = 15;
+ * EXPECT_EQ(a.flags(), kSafe);
+ * EXPECT_EQ(a.get(), 15);
+ *
+ *
+ * Flagged also supports lazy or calculated wrapping of already flagged types. Lazy wrapping is
+ * provided automatically (flags are automatically shared if possible, e.g. mask is shifted
+ * automatically to not overlap with used bits of the wrapped type's flags, and fall back to
+ * unshared version of the template.):
+ *
+ * enum OriginFlags : uint32_t {
+ * kUnknown,
+ * kConst,
+ * kCalculated,
+ * kComponent,
+ * kApplication,
+ * kFile,
+ * kBinder,
+ * kOriginMask = _Flagged_helper::minMask(kBinder),
+ * };
+ * typedef Flagged<safeInt32, OriginFlags, kOriginMask>
+ * trackedSafeInt32;
+ *
+ * static_assert(sizeof(trackedSafeInt32) == sizeof(safeInt32), "");
+ *
+ * trackedSafeInt32 b(kConst, kSafe, 1);
+ * EXPECT_EQ(b.flags(), kConst);
+ * EXPECT_EQ(b.get().flags(), kSafe);
+ * EXPECT_EQ(b.get().get(), 1);
+ * b.setFlags(kCalculated);
+ * b.get().setFlags(overflow ? kUnsafe : kSafe);
+ *
+ * One can also choose to share some flag-bits with the wrapped class:
+ *
+ * enum ValidatedFlags : uint32_t {
+ * kUnsafeV = kUnsafe,
+ * kSafeV = kSafe,
+ * kValidated = kSafe | 2,
+ * kSharedMaskV = kSafeMask,
+ * kValidatedMask = _Flagged_helper::minMask(kValidated),
+ * };
+ * typedef Flagged<safeInt32, ValidatedFlags, kValidatedMask, kSharedMaskV> validatedInt32;
+ *
+ * validatedInt32 v(kUnsafeV, kSafe, 10);
+ * EXPECT_EQ(v.flags(), kUnsafeV);
+ * EXPECT_EQ(v.get().flags(), kUnsafe); // !kUnsafeV overrides kSafe
+ * EXPECT_EQ(v.get().get(), 10);
+ * v.setFlags(kValidated);
+ * EXPECT_EQ(v.flags(), kValidated);
+ * EXPECT_EQ(v.get().flags(), kSafe);
+ * v.get().setFlags(kUnsafe);
+ * EXPECT_EQ(v.flags(), 2); // NOTE: sharing masks with enums allows strange situations to occur
+ */
+
+/**
+ * Helper class for Flagged support. Encapsulates common utilities used by all
+ * templated classes.
+ */
+struct _Flagged_helper {
+ /**
+ * Calculates the value with a given number of top-most bits set.
+ *
+ * This method may be called with a signed flag.
+ *
+ * \param num number of bits to set. This must be between 0 and the number of bits in Flag.
+ *
+ * \return the value where only the given number of top-most bits are set.
+ */
+ template<typename Flag>
+ static constexpr Flag topBits(int num) {
+ return Flag(num > 0 ?
+ ~((Flag(1) << (sizeof(Flag) * 8 - is_signed_integral<Flag>::value - num)) - 1) :
+ 0);
+ }
+
+ /**
+ * Calculates the minimum mask required to cover a value. Used with the maximum enum value for
+ * an unsigned flag.
+ *
+ * \param maxValue maximum value to cover
+ * \param shift DO NO USE. used internally
+ *
+ * \return mask that can be used that covers the maximum value.
+ */
+ template<typename Flag>
+ static constexpr Flag minMask(Flag maxValue, int shift=sizeof(Flag) * 4) {
+ static_assert(is_unsigned_integral<Flag>::value,
+ "this method only makes sense for unsigned flags");
+ return shift ? minMask<Flag>(Flag(maxValue | (maxValue >> shift)), shift >> 1) : maxValue;
+ }
+
+ /**
+ * Returns a value left-shifted by an argument as a potential constexpr.
+ *
+ * This method helps around the C-language limitation, when left-shift of a negative value with
+ * even 0 cannot be a constexpr.
+ *
+ * \param value value to shift
+ * \param shift amount of shift
+ * \returns the shifted value as an integral type
+ */
+ template<typename Flag, typename IntFlag = typename underlying_integral_type<Flag>::type>
+ static constexpr IntFlag lshift(Flag value, int shift) {
+ return shift ? value << shift : value;
+ }
+
+private:
+
+ /**
+ * Determines whether mask can be combined with base-mask for a given left shift.
+ *
+ * \param mask desired mask
+ * \param baseMask mask used by T or 0 if T is not flagged by Flag
+ * \param sharedMask desired shared mask (if this is non-0, this must be mask & baseMask)
+ * \param shift desired left shift to be used for mask
+ * \param baseShift left shift used by T or 0 if T is not flagged by Flag
+ * \param effectiveMask effective mask used by T or 0 if T is not flagged by Flag
+ *
+ * \return bool whether mask can be combined with baseMask using the desired values.
+ */
+ template<typename Flag, typename IntFlag=typename underlying_integral_type<Flag>::type>
+ static constexpr bool canCombine(
+ Flag mask, IntFlag baseMask, Flag sharedMask, int shift,
+ int baseShift, IntFlag effectiveMask) {
+ return
+ // verify that shift is valid and mask can be shifted
+ shift >= 0 && (mask & topBits<Flag>(shift)) == 0 &&
+
+ // verify that base mask is part of effective mask (sanity check on arguments)
+ (baseMask & ~(effectiveMask >> baseShift)) == 0 &&
+
+ // if sharing masks, shift must be the base's shift.
+ // verify that shared mask is the overlap of base mask and mask
+ (sharedMask ?
+ ((sharedMask ^ (baseMask & mask)) == 0 &&
+ shift == baseShift) :
+
+
+ // otherwise, verify that there is no overlap between mask and base's effective mask
+ (mask & (effectiveMask >> shift)) == 0);
+ }
+
+
+ /**
+ * Calculates the minimum (left) shift required to combine a mask with the mask of an
+ * underlying type (T, also flagged by Flag).
+ *
+ * \param mask desired mask
+ * \param baseMask mask used by T or 0 if T is not flagged by Flag
+ * \param sharedMask desired shared mask (if this is non-0, this must be mask & baseMask)
+ * \param baseShift left shift used by T
+ * \param effectiveMask effective mask used by T
+ *
+ * \return a non-negative minimum left shift value if mask can be combined with baseMask,
+ * or -1 if the masks cannot be combined. -2 if the input is invalid.
+ */
+ template<typename Flag,
+ typename IntFlag = typename underlying_integral_type<Flag>::type>
+ static constexpr int getShift(
+ Flag mask, IntFlag baseMask, Flag sharedMask, int baseShift, IntFlag effectiveMask) {
+ return
+ // baseMask must be part of the effective mask
+ (baseMask & ~(effectiveMask >> baseShift)) ? -2 :
+
+ // if sharing masks, shift must be base's shift. verify that shared mask is part of
+ // base mask and mask, and that desired mask still fits with base's shift value
+ sharedMask ?
+ (canCombine(mask, baseMask, sharedMask, baseShift /* shift */,
+ baseShift, effectiveMask) ? baseShift : -1) :
+
+ // otherwise, see if 0-shift works
+ ((mask & effectiveMask) == 0) ? 0 :
+
+ // otherwise, verify that mask can be shifted up
+ ((mask & topBits<Flag>(1)) || (mask < 0)) ? -1 :
+
+ incShift(getShift(Flag(mask << 1), baseMask /* unused */, sharedMask /* 0 */,
+ baseShift /* unused */, effectiveMask));
+ }
+
+ /**
+ * Helper method that increments a non-negative (shift) value.
+ *
+ * This method is used to make it easier to create a constexpr for getShift.
+ *
+ * \param shift (shift) value to increment
+ *
+ * \return original shift if it was negative; otherwise, the shift incremented by one.
+ */
+ static constexpr int incShift(int shift) {
+ return shift + (shift >= 0);
+ }
+
+#ifdef FRIEND_TEST
+ FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+public:
+ /**
+ * Base class for all Flagged<T, Flag> classes.
+ *
+ * \note flagged types do not have a member variable for the mask used by the type. As such,
+ * they should be be cast to this base class.
+ *
+ * \todo can we replace this base class check with a static member check to remove possibility
+ * of cast?
+ */
+ template<typename Flag>
+ struct base {};
+
+ /**
+ * Type support utility that retrieves the mask of a class (T) if it is a type flagged by
+ * Flag (e.g. Flagged<T, Flag>).
+ *
+ * \note This retrieves 0 if T is a flagged class, that is not flagged by Flag or an equivalent
+ * underlying type.
+ *
+ * Generic implementation for a non-flagged class.
+ */
+ template<
+ typename T, typename Flag,
+ bool=std::is_base_of<base<typename underlying_integral_type<Flag>::type>, T>::value>
+ struct mask_of {
+ using IntFlag = typename underlying_integral_type<Flag>::type;
+ static constexpr IntFlag value = Flag(0); ///< mask of a potentially flagged class
+ static constexpr int shift = 0; ///<left shift of flags in a potentially flagged class
+ static constexpr IntFlag effective_value = IntFlag(0); ///<effective mask of flagged class
+ };
+
+ /**
+ * Type support utility that calculates the minimum (left) shift required to combine a mask
+ * with the mask of an underlying type T also flagged by Flag.
+ *
+ * \note if T is not flagged, not flagged by Flag, or the masks cannot be combined due to
+ * incorrect sharing or the flags not having enough bits, the minimum is -1.
+ *
+ * \param MASK desired mask
+ * \param SHARED_MASK desired shared mask (if this is non-0, T must be an type flagged by
+ * Flag with a mask that has exactly these bits common with MASK)
+ */
+ template<typename T, typename Flag, Flag MASK, Flag SHARED_MASK>
+ struct min_shift {
+ /// minimum (left) shift required, or -1 if masks cannot be combined
+ static constexpr int value =
+ getShift(MASK, mask_of<T, Flag>::value, SHARED_MASK,
+ mask_of<T, Flag>::shift, mask_of<T, Flag>::effective_value);
+ };
+
+ /**
+ * Type support utility that calculates whether the flags of T can be combined with MASK.
+ *
+ * \param MASK desired mask
+ * \param SHARED_MASK desired shared mask (if this is non-0, T MUST be an type flagged by
+ * Flag with a mask that has exactly these bits common with MASK)
+ */
+ template<
+ typename T, typename Flag, Flag MASK,
+ Flag SHARED_MASK=Flag(0),
+ int SHIFT=min_shift<T, Flag, MASK, SHARED_MASK>::value>
+ struct can_combine {
+ using IntFlag = typename underlying_integral_type<Flag>::type;
+ /// true if this mask can be combined with T's existing flag. false otherwise.
+ static constexpr bool value =
+ std::is_base_of<base<IntFlag>, T>::value
+ && canCombine(MASK, mask_of<T, Flag>::value, SHARED_MASK, SHIFT,
+ mask_of<T, Flag>::shift, mask_of<T, Flag>::effective_value);
+ };
+};
+
+/**
+ * Template specialization for the case when T is flagged by Flag or a compatible type.
+ */
+template<typename T, typename Flag>
+struct _Flagged_helper::mask_of<T, Flag, true> {
+ using IntType = typename underlying_integral_type<Flag>::type;
+ static constexpr IntType value = T::sFlagMask;
+ static constexpr int shift = T::sFlagShift;
+ static constexpr IntType effective_value = T::sEffectiveMask;
+};
+
+/**
+ * Main Flagged template that adds flags to an object of another type (in essence, creates a pair)
+ *
+ * Flag must be an integral type (enums are allowed).
+ *
+ * \note We could make SHARED_MASK be a boolean as it must be either 0 or MASK & base's mask, but we
+ * want it to be spelled out for safety.
+ *
+ * \param T type of object wrapped
+ * \param Flag type of flag
+ * \param MASK mask for the bits used in flag (before any shift)
+ * \param SHARED_MASK optional mask to be shared with T (if this is not zero, SHIFT must be 0, and
+ * it must equal to MASK & T's mask)
+ * \param SHIFT optional left shift for MASK to combine with T's mask (or -1, if masks should not
+ * be combined.)
+ */
+template<
+ typename T, typename Flag, Flag MASK, Flag SHARED_MASK=(Flag)0,
+ int SHIFT=_Flagged_helper::min_shift<T, Flag, MASK, SHARED_MASK>::value,
+ typename IntFlag=typename underlying_integral_type<Flag>::type,
+ bool=_Flagged_helper::can_combine<T, IntFlag, MASK, SHARED_MASK, SHIFT>::value>
+class Flagged : public _Flagged_helper::base<IntFlag> {
+ static_assert(SHARED_MASK == 0,
+ "shared mask can only be used with common flag types "
+ "and must be part of mask and mask of base type");
+ static_assert((_Flagged_helper::topBits<Flag>(SHIFT) & MASK) == 0, "SHIFT overflows MASK");
+
+ static constexpr Flag sFlagMask = MASK; ///< the mask
+ static constexpr int sFlagShift = SHIFT > 0 ? SHIFT : 0; ///< the left shift applied to flags
+
+ friend struct _Flagged_helper;
+#ifdef FRIEND_TEST
+ static constexpr bool sFlagCombined = false;
+ FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+ T mValue; ///< wrapped value
+ IntFlag mFlags; ///< flags
+
+protected:
+ /// The effective combined mask used by this class and any wrapped classes if the flags are
+ /// combined.
+ static constexpr IntFlag sEffectiveMask = _Flagged_helper::lshift(MASK, sFlagShift);
+
+ /**
+ * Helper method used by subsequent flagged wrappers to query flags. Returns the
+ * flags for a particular mask and left shift.
+ *
+ * \param mask bitmask to use
+ * \param shift left shifts to use
+ *
+ * \return the requested flags
+ */
+ inline constexpr IntFlag getFlagsHelper(IntFlag mask, int shift) const {
+ return (mFlags >> shift) & mask;
+ }
+
+ /**
+ * Helper method used by subsequent flagged wrappers to apply combined flags. Sets the flags
+ * in the bitmask using a particulare left shift.
+ *
+ * \param mask bitmask to use
+ * \param shift left shifts to use
+ * \param flags flags to update (any flags within the bitmask are updated to their value in this
+ * argument)
+ */
+ inline void setFlagsHelper(IntFlag mask, int shift, IntFlag flags) {
+ mFlags = Flag((mFlags & ~(mask << shift)) | ((flags & mask) << shift));
+ }
+
+public:
+ /**
+ * Wrapper around base class constructor. These take the flags as their first
+ * argument and pass the rest of the arguments to the base class constructor.
+ *
+ * \param flags initial flags
+ */
+ template<typename ...Args>
+ constexpr Flagged(Flag flags, Args... args)
+ : mValue(std::forward<Args>(args)...),
+ mFlags(Flag(_Flagged_helper::lshift(flags & sFlagMask, sFlagShift))) { }
+
+ /** Gets the wrapped value as const. */
+ inline constexpr const T &get() const { return mValue; }
+
+ /** Gets the wrapped value. */
+ inline T &get() { return mValue; }
+
+ /** Gets the flags. */
+ constexpr Flag flags() const {
+ return Flag(getFlagsHelper(sFlagMask, sFlagShift));
+ }
+
+ /** Sets the flags. */
+ void setFlags(Flag flags) {
+ setFlagsHelper(sFlagMask, sFlagShift, flags);
+ }
+};
+
+/*
+ * TRICKY: we cannot implement the specialization as:
+ *
+ * class Flagged : base<Flag> {
+ * T value;
+ * };
+ *
+ * Because T also inherits from base<Flag> and this runs into a compiler bug where
+ * sizeof(Flagged) > sizeof(T).
+ *
+ * Instead, we must inherit directly from the wrapped class
+ *
+ */
+#if 0
+template<
+ typename T, typename Flag, Flag MASK, Flag SHARED_MASK, int SHIFT>
+class Flagged<T, Flag, MASK, SHARED_MASK, SHIFT, true> : public _Flagged_helper::base<Flag> {
+private:
+ T mValue;
+};
+#else
+/**
+ * Specialization for the case when T is derived from Flagged<U, Flag> and flags can be combined.
+ */
+template<
+ typename T, typename Flag, Flag MASK, Flag SHARED_MASK, int SHIFT, typename IntFlag>
+class Flagged<T, Flag, MASK, SHARED_MASK, SHIFT, IntFlag, true> : private T {
+ static_assert(is_integral_or_enum<Flag>::value, "flag must be integer or enum");
+
+ static_assert(SHARED_MASK == 0 || SHIFT == 0, "cannot overlap masks when using SHIFT");
+ static_assert((SHARED_MASK & ~MASK) == 0, "shared mask must be part of the mask");
+ static_assert((SHARED_MASK & ~T::sEffectiveMask) == 0,
+ "shared mask must be part of the base mask");
+ static_assert(SHARED_MASK == 0 || (~SHARED_MASK & (MASK & T::sEffectiveMask)) == 0,
+ "mask and base mask can only overlap in shared mask");
+
+ static constexpr Flag sFlagMask = MASK; ///< the mask
+ static constexpr int sFlagShift = SHIFT; ///< the left shift applied to the flags
+
+#ifdef FRIEND_TEST
+ const static bool sFlagCombined = true;
+ FRIEND_TEST(FlaggedTest, _Flagged_helper_Test);
+#endif
+
+protected:
+ /// The effective combined mask used by this class and any wrapped classes if the flags are
+ /// combined.
+ static constexpr IntFlag sEffectiveMask = Flag((MASK << SHIFT) | T::sEffectiveMask);
+ friend struct _Flagged_helper;
+
+public:
+ /**
+ * Wrapper around base class constructor. These take the flags as their first
+ * argument and pass the rest of the arguments to the base class constructor.
+ *
+ * \param flags initial flags
+ */
+ template<typename ...Args>
+ constexpr Flagged(Flag flags, Args... args)
+ : T(std::forward<Args>(args)...) {
+ // we construct the base class first and apply the flags afterwards as
+ // base class may not have a constructor that takes flags even if it is derived from
+ // Flagged<U, Flag>
+ setFlags(flags);
+ }
+
+ /** Gets the wrapped value as const. */
+ inline constexpr T &get() const { return *this; }
+
+ /** Gets the wrapped value. */
+ inline T &get() { return *this; }
+
+ /** Gets the flags. */
+ Flag constexpr flags() const {
+ return Flag(this->getFlagsHelper(sFlagMask, sFlagShift));
+ }
+
+ /** Sets the flags. */
+ void setFlags(Flag flags) {
+ this->setFlagsHelper(sFlagMask, sFlagShift, flags);
+ }
+};
+#endif
+
+} // namespace android
+
+#endif // STAGEFRIGHT_FOUNDATION_FLAGGED_H_
+
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h b/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h
new file mode 100644
index 0000000..1250e9b
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/TypeTraits.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+#define STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+
+#include <type_traits>
+
+namespace android {
+
+/**
+ * std::is_signed, is_unsigned and is_integral does not consider enums even though the standard
+ * considers them integral. Create modified versions of these here. Also create a wrapper around
+ * std::underlying_type that does not require checking if the type is an enum.
+ */
+
+/**
+ * Type support utility class to check if a type is an integral type or an enum.
+ */
+template<typename T>
+struct is_integral_or_enum
+ : std::integral_constant<bool, std::is_integral<T>::value || std::is_enum<T>::value> { };
+
+/**
+ * Type support utility class to get the underlying std::is_integral supported type for a type.
+ * This returns the underlying type for enums, and the same type for types covered by
+ * std::is_integral.
+ *
+ * This is also used as a conditional to return an alternate type if the template param is not
+ * an integral or enum type (as in underlying_integral_type<T, TypeIfNotEnumOrIntegral>::type).
+ */
+template<typename T,
+ typename U=typename std::enable_if<is_integral_or_enum<T>::value>::type,
+ bool=std::is_enum<T>::value,
+ bool=std::is_integral<T>::value>
+struct underlying_integral_type {
+ static_assert(!std::is_enum<T>::value, "T should not be enum here");
+ static_assert(!std::is_integral<T>::value, "T should not be integral here");
+ typedef U type;
+};
+
+/** Specialization for enums. */
+template<typename T, typename U>
+struct underlying_integral_type<T, U, true, false> {
+ static_assert(std::is_enum<T>::value, "T should be enum here");
+ static_assert(!std::is_integral<T>::value, "T should not be integral here");
+ typedef typename std::underlying_type<T>::type type;
+};
+
+/** Specialization for non-enum std-integral types. */
+template<typename T, typename U>
+struct underlying_integral_type<T, U, false, true> {
+ static_assert(!std::is_enum<T>::value, "T should not be enum here");
+ static_assert(std::is_integral<T>::value, "T should be integral here");
+ typedef T type;
+};
+
+/**
+ * Type support utility class to check if the underlying integral type is signed.
+ */
+template<typename T>
+struct is_signed_integral
+ : std::integral_constant<bool, std::is_signed<
+ typename underlying_integral_type<T, unsigned>::type>::value> { };
+
+/**
+ * Type support utility class to check if the underlying integral type is unsigned.
+ */
+template<typename T>
+struct is_unsigned_integral
+ : std::integral_constant<bool, std::is_unsigned<
+ typename underlying_integral_type<T, signed>::type>::value> {
+};
+
+/**
+ * Type support relationship query template.
+ *
+ * If T occurs as one of the types in Us with the same const-volatile qualifications, provides the
+ * member constant |value| equal to true. Otherwise value is false.
+ */
+template<typename T, typename ...Us>
+struct is_one_of;
+
+/// \if 0
+/**
+ * Template specialization when first type matches the searched type.
+ */
+template<typename T, typename ...Us>
+struct is_one_of<T, T, Us...> : std::true_type {};
+
+/**
+ * Template specialization when first type does not match the searched type.
+ */
+template<typename T, typename U, typename ...Us>
+struct is_one_of<T, U, Us...> : is_one_of<T, Us...> {};
+
+/**
+ * Template specialization when there are no types to search.
+ */
+template<typename T>
+struct is_one_of<T> : std::false_type {};
+/// \endif
+
+/**
+ * Type support relationship query template.
+ *
+ * If all types in Us are unique, provides the member constant |value| equal to true.
+ * Otherwise value is false.
+ */
+template<typename ...Us>
+struct are_unique;
+
+/// \if 0
+/**
+ * Template specialization when there are no types.
+ */
+template<>
+struct are_unique<> : std::true_type {};
+
+/**
+ * Template specialization when there is at least one type to check.
+ */
+template<typename T, typename ...Us>
+struct are_unique<T, Us...>
+ : std::integral_constant<bool, are_unique<Us...>::value && !is_one_of<T, Us...>::value> {};
+/// \endif
+
+/// \if 0
+template<size_t Base, typename T, typename ...Us>
+struct _find_first_impl;
+
+/**
+ * Template specialization when there are no types to search.
+ */
+template<size_t Base, typename T>
+struct _find_first_impl<Base, T> : std::integral_constant<size_t, 0> {};
+
+/**
+ * Template specialization when T is the first type in Us.
+ */
+template<size_t Base, typename T, typename ...Us>
+struct _find_first_impl<Base, T, T, Us...> : std::integral_constant<size_t, Base> {};
+
+/**
+ * Template specialization when T is not the first type in Us.
+ */
+template<size_t Base, typename T, typename U, typename ...Us>
+struct _find_first_impl<Base, T, U, Us...>
+ : std::integral_constant<size_t, _find_first_impl<Base + 1, T, Us...>::value> {};
+
+/// \endif
+
+/**
+ * Type support relationship query template.
+ *
+ * If T occurs in Us, index is the 1-based left-most index of T in Us. Otherwise, index is 0.
+ */
+template<typename T, typename ...Us>
+struct find_first {
+ static constexpr size_t index = _find_first_impl<1, T, Us...>::value;
+};
+
+/// \if 0
+/**
+ * Helper class for find_first_convertible_to template.
+ *
+ * Adds a base index.
+ */
+template<size_t Base, typename T, typename ...Us>
+struct _find_first_convertible_to_helper;
+
+/**
+ * Template specialization for when there are more types to consider
+ */
+template<size_t Base, typename T, typename U, typename ...Us>
+struct _find_first_convertible_to_helper<Base, T, U, Us...> {
+ static constexpr size_t index =
+ std::is_convertible<T, U>::value ? Base :
+ _find_first_convertible_to_helper<Base + 1, T, Us...>::index;
+ typedef typename std::conditional<
+ std::is_convertible<T, U>::value, U,
+ typename _find_first_convertible_to_helper<Base + 1, T, Us...>::type>::type type;
+};
+
+/**
+ * Template specialization for when there are no more types to consider
+ */
+template<size_t Base, typename T>
+struct _find_first_convertible_to_helper<Base, T> {
+ static constexpr size_t index = 0;
+ typedef void type;
+};
+
+/// \endif
+
+/**
+ * Type support template that returns the type that T can be implicitly converted into, and its
+ * index, from a list of other types (Us).
+ *
+ * Returns index of 0 and type of void if there are no convertible types.
+ *
+ * \param T type that is converted
+ * \param Us types into which the conversion is considered
+ */
+template<typename T, typename ...Us>
+struct find_first_convertible_to : public _find_first_convertible_to_helper<1, T, Us...> { };
+
+} // namespace android
+
+#endif // STAGEFRIGHT_FOUNDATION_TYPE_TRAITS_H_
+
diff --git a/media/libstagefright/foundation/tests/AData_test.cpp b/media/libstagefright/foundation/tests/AData_test.cpp
new file mode 100644
index 0000000..f014c25
--- /dev/null
+++ b/media/libstagefright/foundation/tests/AData_test.cpp
@@ -0,0 +1,981 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AData_test"
+
+#include <gtest/gtest.h>
+#include <utils/RefBase.h>
+//#include <utils/StrongPointer.h>
+
+#include <media/stagefright/foundation/AData.h>
+#include <media/stagefright/foundation/ABuffer.h>
+
+namespace android {
+
+class ADataTest : public ::testing::Test {
+};
+
+// ============ AUnion
+
+struct Events {
+ int dtor;
+ int ctor_empty;
+ int ctor_copy;
+};
+
+struct EventCounter : public RefBase {
+ EventCounter(int *counter, int magic=1234) : mCounter(counter), mMagic(magic) { }
+ virtual ~EventCounter() { ++*mCounter; mMagic = 0; }
+ int magic() const { return mMagic; }
+private:
+ int *mCounter;
+ int mMagic;
+};
+
+struct DerivedCounter : public EventCounter {
+ DerivedCounter(int *counter, int magic=1234) : EventCounter(counter, magic) { }
+};
+
+TEST_F(ADataTest, AUnion_Test) {
+ AUnion<int, const char *, char> u;
+ u.emplace<int>(4);
+ u.del<int>();
+ EXPECT_EQ(4, u.get<int>()); // verify that del<> is a no-op for trivial types, such as int.
+ // specifically, verify that it does not clear the objet memory
+
+ u.emplace<const char *>("hello");
+ EXPECT_STREQ("hello", u.get<const char *>());
+ u.del<const char *>();
+
+ // u.del<char *>();
+ // u.emplace<const int>(4);
+ u.emplace<void>();
+ u.del<void>();
+
+ u.emplace<int>(~0);
+ u.del<int>();
+ EXPECT_EQ(~0, u.get<int>());
+ u.emplace<char>(0x15);
+ // verify that rest of memory after char is cleared upon construction
+ EXPECT_EQ(0, memcmp((char *)(&u) + sizeof(char), "\0\0\0", 3));
+ EXPECT_EQ(0x15, u.get<char>());
+ u.del<char>();
+
+ AUnion<EventCounter, EventCounter *> d;
+ int destructions = 0;
+
+ d.emplace<EventCounter>(&destructions);
+ d.del<EventCounter>();
+ EXPECT_EQ(1, destructions);
+
+ EventCounter *ctr = new EventCounter(&destructions);
+ d.emplace<EventCounter *>(ctr);
+ d.del<EventCounter *>();
+ EXPECT_EQ(1, destructions);
+
+ delete ctr;
+ EXPECT_EQ(2, destructions);
+
+ AUnion<std::shared_ptr<EventCounter>, std::unique_ptr<EventCounter>> md;
+ md.emplace<std::shared_ptr<EventCounter>>(new EventCounter(&destructions));
+ std::shared_ptr<EventCounter> copy(md.get<std::shared_ptr<EventCounter>>());
+ std::weak_ptr<EventCounter> weak(copy);
+ EXPECT_EQ(2, destructions);
+
+ copy.reset();
+ EXPECT_EQ(2, destructions);
+ md.del<std::shared_ptr<EventCounter>>();
+ EXPECT_EQ(3, destructions);
+ EXPECT_TRUE(weak.expired());
+
+ md.emplace<std::unique_ptr<EventCounter>>(new EventCounter(&destructions));
+ EXPECT_EQ(3, destructions);
+
+ std::unique_ptr<EventCounter> unique = std::move(md.get<std::unique_ptr<EventCounter>>());
+ EXPECT_EQ(3, destructions);
+ EXPECT_FALSE((bool)md.get<std::unique_ptr<EventCounter>>());
+
+ md.del<std::unique_ptr<EventCounter>>();
+ EXPECT_EQ(3, destructions);
+ md.emplace<std::unique_ptr<EventCounter>>(std::move(unique));
+ EXPECT_TRUE((bool)md.get<std::unique_ptr<EventCounter>>());
+ EXPECT_EQ(3, destructions);
+
+ md.del<std::unique_ptr<EventCounter>>();
+ EXPECT_EQ(4, destructions);
+}
+
+TEST_F(ADataTest, AData_StaticTest) {
+ using namespace std;
+
+ static_assert(is_copy_assignable<shared_ptr<EventCounter>>::value, "");
+ static_assert(is_copy_constructible<shared_ptr<EventCounter>>::value, "");
+ static_assert(is_default_constructible<shared_ptr<EventCounter>>::value, "");
+
+ static_assert(is_copy_assignable<weak_ptr<DerivedCounter>>::value, "");
+ static_assert(is_copy_constructible<weak_ptr<DerivedCounter>>::value, "");
+ static_assert(is_default_constructible<weak_ptr<DerivedCounter>>::value, "");
+
+ static_assert(!is_copy_assignable<unique_ptr<DerivedCounter>>::value, "");
+ static_assert(!is_copy_constructible<unique_ptr<DerivedCounter>>::value, "");
+ static_assert(is_default_constructible<unique_ptr<DerivedCounter>>::value, "");
+
+ static_assert(is_copy_assignable<sp<EventCounter>>::value, "");
+ static_assert(is_copy_constructible<sp<EventCounter>>::value, "");
+ static_assert(is_default_constructible<sp<EventCounter>>::value, "");
+
+ static_assert(is_copy_assignable<wp<EventCounter>>::value, "");
+ static_assert(is_copy_constructible<wp<EventCounter>>::value, "");
+ static_assert(is_default_constructible<wp<EventCounter>>::value, "");
+
+ static_assert(is_convertible<shared_ptr<DerivedCounter>, shared_ptr<EventCounter>>::value, "");
+ static_assert(!is_convertible<shared_ptr<EventCounter>, shared_ptr<DerivedCounter>>::value, "");
+
+ static_assert(is_convertible<unique_ptr<DerivedCounter>, unique_ptr<EventCounter>>::value, "");
+ static_assert(!is_convertible<unique_ptr<EventCounter>, unique_ptr<DerivedCounter>>::value, "");
+
+ static_assert(is_convertible<unique_ptr<DerivedCounter>, shared_ptr<EventCounter>>::value, "");
+ static_assert(!is_convertible<shared_ptr<DerivedCounter>, unique_ptr<EventCounter>>::value, "");
+
+ static_assert(is_convertible<weak_ptr<DerivedCounter>, weak_ptr<EventCounter>>::value, "");
+ static_assert(!is_convertible<weak_ptr<EventCounter>, weak_ptr<DerivedCounter>>::value, "");
+
+ static_assert(is_convertible<shared_ptr<DerivedCounter>, weak_ptr<EventCounter>>::value, "");
+ static_assert(!is_convertible<weak_ptr<DerivedCounter>, shared_ptr<EventCounter>>::value, "");
+
+ static_assert(is_convertible<sp<EventCounter>, sp<RefBase>>::value, "");
+ static_assert(is_convertible<sp<RefBase>, sp<EventCounter>>::value, "YES");
+
+ static_assert(is_convertible<wp<EventCounter>, wp<RefBase>>::value, "");
+ static_assert(is_convertible<wp<RefBase>, wp<EventCounter>>::value, "YES");
+
+ static_assert(is_convertible<sp<EventCounter>, wp<RefBase>>::value, "");
+ static_assert(!is_convertible<wp<EventCounter>, sp<RefBase>>::value, "");
+}
+
+TEST_F(ADataTest, AData_SampleTest) {
+ AData<int, float>::Basic data;
+ int i = 1;
+ float f = 7.0f;
+
+ data.set(5);
+ EXPECT_TRUE(data.find(&i));
+ EXPECT_FALSE(data.find(&f));
+ EXPECT_EQ(i, 5);
+
+ data.set(6.0f);
+ EXPECT_FALSE(data.find(&i));
+ EXPECT_TRUE(data.find(&f));
+ EXPECT_EQ(f, 6.0f);
+
+ AData<int, sp<RefBase>>::RelaxedBasic objdata; // relaxed type support
+ sp<ABuffer> buf = new ABuffer(16), buf2;
+ sp<RefBase> obj;
+
+ objdata.set(buf);
+ EXPECT_TRUE(objdata.find(&buf2));
+ EXPECT_EQ(buf, buf2);
+ EXPECT_FALSE(objdata.find(&i));
+ EXPECT_TRUE(objdata.find(&obj));
+ EXPECT_TRUE(obj == buf);
+
+ obj = buf;
+ objdata.set(obj); // storing as sp<RefBase>
+ EXPECT_FALSE(objdata.find(&buf2)); // not stored as ABuffer(!)
+ EXPECT_TRUE(objdata.find(&obj));
+}
+
+struct SampleTypeFlagger {
+ typedef unsigned type;
+ enum Flags : type {
+ kEmpty = 100,
+ kInt,
+ kConstCharPtr,
+ kEventCounter,
+ kEventCounterPointer,
+ kEventCounterSharedPointer,
+ kEventCounterUniquePointer,
+ kEventCounterWeakPointer,
+ kEventCounterSP,
+ kEventCounterWP,
+ };
+ constexpr static type mask = ~Flags(0);
+ constexpr static type flagFor(void*) { return kEmpty; }
+ constexpr static type flagFor(int*) { return kInt; }
+ constexpr static type flagFor(const char**) { return kConstCharPtr; }
+ constexpr static type flagFor(EventCounter*) { return kEventCounter; }
+ constexpr static type flagFor(EventCounter**) { return kEventCounterPointer; }
+ constexpr static
+ type flagFor(std::shared_ptr<EventCounter>*) { return kEventCounterSharedPointer; }
+ constexpr static
+ type flagFor(std::unique_ptr<EventCounter>*) { return kEventCounterUniquePointer; }
+ constexpr static type flagFor(std::weak_ptr<EventCounter>*) { return kEventCounterWeakPointer; }
+ constexpr static type flagFor(sp<EventCounter>*) { return kEventCounterSP; }
+ constexpr static type flagFor(wp<EventCounter>*) { return kEventCounterWP; }
+ constexpr static bool canDeleteAs(type object, type del) { return del == object; }
+ template <typename T> struct store { typedef T as_type; };
+};
+
+TEST_F(ADataTest, AData_SimpleTest) {
+ int _int = 0;
+ const char *_constCharPtr = NULL;
+ AData<int, const char *>::Custom<SampleTypeFlagger> u;
+ EXPECT_FALSE(u.used());
+ EXPECT_FALSE(u.find<int>(&_int));
+ EXPECT_FALSE(u.find<const char *>(&_constCharPtr));
+
+ EXPECT_TRUE(u.set<int>(4));
+ EXPECT_TRUE(u.used());
+ EXPECT_TRUE(u.find<int>(&_int));
+ EXPECT_EQ(4, _int);
+ EXPECT_FALSE(u.find<const char *>(&_constCharPtr));
+ EXPECT_EQ(NULL, _constCharPtr);
+
+ EXPECT_TRUE(u.clear());
+ EXPECT_FALSE(u.used());
+ EXPECT_FALSE(u.find<int>(&_int));
+ EXPECT_FALSE(u.find<const char *>(&_constCharPtr));
+
+ EXPECT_TRUE(u.set<int>(5));
+ EXPECT_TRUE(u.set<int>(6));
+ EXPECT_TRUE(u.find<int>(&_int));
+ EXPECT_EQ(6, _int);
+
+ EXPECT_TRUE(u.set<const char *>("hello"));
+ EXPECT_TRUE(u.used());
+ EXPECT_FALSE(u.find<int>(&_int));
+ EXPECT_TRUE(u.find<const char *>(&_constCharPtr));
+ EXPECT_STREQ("hello", _constCharPtr);
+
+ EXPECT_TRUE(u.clear());
+ EXPECT_FALSE(u.used());
+ EXPECT_FALSE(u.find<int>(&_int));
+ EXPECT_FALSE(u.find<const char *>(&_constCharPtr));
+
+ EXPECT_TRUE(u.set<const char *>("world"));
+ EXPECT_TRUE(u.set<const char *>("!!"));
+ EXPECT_TRUE(u.used());
+ EXPECT_FALSE(u.find<int>(&_int));
+ EXPECT_TRUE(u.find<const char *>(&_constCharPtr));
+ EXPECT_STREQ("!!", _constCharPtr);
+
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_TRUE(u.find(&_constCharPtr));
+}
+
+void set(std::unique_ptr<int> &dst, std::unique_ptr<int> &&src) {
+ dst = std::move(src);
+}
+
+void set(std::unique_ptr<int> &dst, std::unique_ptr<int> &src) {
+ dst = std::move(src);
+}
+
+TEST_F(ADataTest, AData_CopyMoveTest) {
+ int destructions = 0;
+ int _int = 0;
+ std::shared_ptr<EventCounter> _shared;
+ std::unique_ptr<EventCounter> _unique;
+ std::weak_ptr<EventCounter> _weak;
+ const std::shared_ptr<EventCounter> _constShared(new EventCounter(&destructions));
+ const std::unique_ptr<EventCounter> _constUnique = nullptr;
+
+ AData<int, std::weak_ptr<EventCounter>, std::shared_ptr<EventCounter>,
+ std::unique_ptr<EventCounter>>::Basic u;
+
+ // test that data is empty
+ EXPECT_FALSE(u.used());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+
+ // test that integer can be stored and read
+ EXPECT_TRUE(u.set<int>(1));
+ EXPECT_TRUE(u.used());
+ EXPECT_TRUE(u.find(&_int));
+ EXPECT_EQ(1, _int);
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+
+ // test that movable type (unique_ptr) can be moved in and read out, and it moves
+ _unique = std::unique_ptr<EventCounter>(new EventCounter(&destructions, 123));
+ EXPECT_TRUE(u.set(std::move(_unique)));
+ EXPECT_FALSE((bool)_unique);
+ EXPECT_TRUE(u.used());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_TRUE(u.remove(&_unique));
+ EXPECT_TRUE((bool)_unique);
+ if (_unique) {
+ EXPECT_EQ(123, _unique->magic());
+ }
+
+ // the unique value should have been removed but still accessible as nullptr
+ EXPECT_TRUE(u.remove(&_unique));
+ EXPECT_FALSE((bool)_unique);
+ EXPECT_EQ(1, destructions);
+
+ // test that movable-only type (unique_ptr) can be stored without moving (and is still
+ // moved)
+ _unique = std::unique_ptr<EventCounter>(new EventCounter(&destructions, 321));
+ EXPECT_TRUE(u.set(std::move(_unique)));
+ EXPECT_FALSE((bool)_unique);
+ EXPECT_TRUE(u.set(std::unique_ptr<EventCounter>(new EventCounter(&destructions, 1234))));
+ EXPECT_EQ(2, destructions);
+ EXPECT_TRUE(u.remove(&_unique));
+ EXPECT_TRUE((bool)_unique);
+ if (_unique) {
+ EXPECT_EQ(1234, _unique->magic());
+ }
+ EXPECT_TRUE(u.set(std::move(_unique)));
+ EXPECT_EQ(2, destructions);
+ EXPECT_TRUE(u.clear());
+ EXPECT_EQ(3, destructions);
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+
+ // u.set(_constUnique);
+
+ // test that copiable & movable type (shared_ptr) is copied unless explicitly moved.
+ _shared = std::make_shared<EventCounter>(&destructions, 234);
+ EXPECT_EQ(1L, _shared.use_count());
+ EXPECT_TRUE(u.set(_shared));
+ EXPECT_TRUE((bool)_shared);
+ if (_shared) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+
+ EXPECT_EQ(2L, _shared.use_count());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_TRUE(u.find(&_shared));
+ EXPECT_EQ(2L, _shared.use_count());
+ EXPECT_TRUE((bool)_shared);
+ if (_shared) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+
+ // explicitly move in shared_ptr
+ EXPECT_TRUE(u.set(std::move(_shared)));
+ EXPECT_EQ(0, _shared.use_count()); // shared should be nullptr
+ EXPECT_FALSE((bool)_shared);
+ EXPECT_TRUE(u.find(&_shared));
+ EXPECT_EQ(2L, _shared.use_count()); // now both u and _shared contains the object
+ EXPECT_TRUE((bool)_shared);
+ if (_shared) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_TRUE(u.find(&_shared));
+ EXPECT_EQ(2L, _shared.use_count()); // still both u and _shared contains the object
+
+ EXPECT_TRUE(u.clear());
+ EXPECT_TRUE(_shared.unique()); // now only _shared contains the object
+
+ EXPECT_TRUE(u.set(_constShared));
+ EXPECT_EQ(2L, _constShared.use_count()); // even though it is const, we can add a use count
+ EXPECT_TRUE(u.find(&_shared));
+ EXPECT_EQ(3L, _shared.use_count()); // now u, _shared and _constShared contains the const object
+ EXPECT_TRUE((bool)_shared);
+ if (_shared) {
+ EXPECT_EQ(1234, _shared->magic());
+ }
+
+ // test that weak pointer can be copied in (support for moving is from C++14 only)
+ _weak = _shared;
+ EXPECT_EQ(_weak.use_count(), _shared.use_count());
+ EXPECT_TRUE(u.set(_weak));
+
+ _weak.reset();
+ EXPECT_EQ(_weak.use_count(), 0);
+
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_TRUE(u.find(&_weak));
+ EXPECT_EQ(_weak.use_count(), _shared.use_count());
+ EXPECT_EQ(_weak.lock(), _shared);
+
+ // we can remove a weak pointer multiple times
+ _weak.reset();
+ EXPECT_TRUE(u.find(&_weak));
+ EXPECT_EQ(_weak.use_count(), _shared.use_count());
+ EXPECT_EQ(_weak.lock(), _shared);
+ EXPECT_TRUE(u.clear());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+};
+
+TEST_F(ADataTest, AData_RelaxedCopyMoveTest) {
+ int destructions = 0;
+ int _int = 0;
+ std::shared_ptr<DerivedCounter> _shared;
+ std::unique_ptr<DerivedCounter> _unique, _unique2;
+ std::weak_ptr<DerivedCounter> _weak;
+ std::shared_ptr<EventCounter> _shared_base;
+ std::unique_ptr<EventCounter> _unique_base;
+ std::weak_ptr<EventCounter> _weak_base;
+ const std::shared_ptr<DerivedCounter> _constShared(new DerivedCounter(&destructions));
+ const std::unique_ptr<DerivedCounter> _constUnique = nullptr;
+
+ AData<int, std::unique_ptr<EventCounter>, std::shared_ptr<EventCounter>,
+ std::weak_ptr<EventCounter>>::RelaxedBasic u;
+
+ // test that data is empty
+ EXPECT_FALSE(u.used());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that integer can be stored and read
+ EXPECT_TRUE(u.set<int>(1));
+ EXPECT_TRUE(u.used());
+ EXPECT_TRUE(u.find(&_int));
+ EXPECT_EQ(1, _int);
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that movable type (unique_ptr) can be moved in and read out, and it moves
+ _unique = std::unique_ptr<DerivedCounter>(new DerivedCounter(&destructions, 123));
+ EXPECT_TRUE(u.set(std::move(_unique)));
+ EXPECT_FALSE((bool)_unique);
+ EXPECT_TRUE(u.used());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_TRUE(u.remove(&_unique));
+ EXPECT_TRUE((bool)_unique);
+ if (_unique) {
+ EXPECT_EQ(123, _unique->magic());
+ }
+
+ // the unique value should have been removed but still accessible as nullptr
+ EXPECT_TRUE(u.remove(&_unique));
+ EXPECT_FALSE((bool)_unique);
+ EXPECT_EQ(1, destructions);
+
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_TRUE(u.remove(&_unique_base));
+ EXPECT_FALSE((bool)_unique_base);
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that movable-only type (unique_ptr) can be stored without moving (and is still
+ // moved)
+ _unique = std::unique_ptr<DerivedCounter>(new DerivedCounter(&destructions, 321));
+ EXPECT_TRUE(u.set(std::move(_unique)));
+ EXPECT_FALSE((bool)_unique);
+ EXPECT_TRUE(u.set(std::unique_ptr<DerivedCounter>(new DerivedCounter(&destructions, 1234))));
+ EXPECT_EQ(2, destructions);
+ EXPECT_TRUE(u.remove(&_unique));
+ EXPECT_TRUE((bool)_unique);
+ if (_unique) {
+ EXPECT_EQ(1234, _unique->magic());
+ }
+ EXPECT_TRUE(u.set(std::move(_unique)));
+ EXPECT_EQ(2, destructions);
+ EXPECT_TRUE(u.clear());
+ EXPECT_EQ(3, destructions);
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that unique pointer can be set and removed as base type (but removed as derived only
+ // if it was set as derived type)
+ _unique = std::unique_ptr<DerivedCounter>(new DerivedCounter(&destructions, 321));
+ EXPECT_TRUE(u.set(std::move(_unique)));
+ EXPECT_FALSE((bool)_unique);
+ EXPECT_TRUE(u.remove(&_unique_base));
+ EXPECT_TRUE((bool)_unique_base);
+ if (_unique_base) {
+ EXPECT_EQ(321, _unique_base->magic());
+ }
+ EXPECT_TRUE(u.remove(&_unique));
+ EXPECT_FALSE((bool)_unique);
+
+ EXPECT_TRUE(u.set(std::move(_unique_base)));
+ EXPECT_FALSE((bool)_unique_base);
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE((bool)_unique);
+ EXPECT_TRUE(u.remove(&_unique_base));
+ EXPECT_TRUE((bool)_unique_base);
+ if (_unique_base) {
+ EXPECT_EQ(321, _unique_base->magic());
+ }
+
+ EXPECT_EQ(3, destructions);
+ EXPECT_TRUE(u.remove(&_unique_base));
+ EXPECT_EQ(4, destructions);
+ EXPECT_FALSE((bool)_unique_base);
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // u.set(_constUnique);
+
+ // test that copiable & movable type (shared_ptr) is copied unless explicitly moved.
+ _shared = std::make_shared<DerivedCounter>(&destructions, 234);
+ EXPECT_EQ(1L, _shared.use_count());
+ EXPECT_TRUE(u.set(_shared));
+ EXPECT_TRUE((bool)_shared);
+ if (_shared) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+
+ EXPECT_EQ(2L, _shared.use_count());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_TRUE(u.find(&_shared));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+ EXPECT_EQ(2L, _shared.use_count());
+ EXPECT_TRUE((bool)_shared);
+ if (_shared) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+
+ // explicitly move in shared_ptr
+ EXPECT_TRUE(u.set(std::move(_shared)));
+ EXPECT_EQ(0, _shared.use_count()); // shared should be nullptr
+ EXPECT_FALSE((bool)_shared);
+ EXPECT_TRUE(u.find(&_shared));
+ EXPECT_EQ(2L, _shared.use_count()); // now both u and _shared contains the object
+ EXPECT_TRUE((bool)_shared);
+ if (_shared) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+ EXPECT_TRUE(u.find(&_shared));
+ EXPECT_EQ(2L, _shared.use_count()); // still both u and _shared contains the object
+
+ EXPECT_TRUE(u.clear());
+ EXPECT_TRUE(_shared.unique()); // now only _shared contains the object
+
+ EXPECT_TRUE(u.set(_constShared));
+ EXPECT_EQ(2L, _constShared.use_count()); // even though it is const, we can add a use count
+ EXPECT_TRUE(u.find(&_shared));
+ EXPECT_EQ(3L, _shared.use_count()); // now u, _shared and _constShared contains the const object
+ EXPECT_TRUE((bool)_shared);
+ if (_shared) {
+ EXPECT_EQ(1234, _shared->magic());
+ }
+
+ // test that shared pointer can be set and removed as base type (but removed as derived only
+ // if it was set as derived type)
+ EXPECT_TRUE(u.find(&_shared_base));
+ EXPECT_TRUE((bool)_shared_base);
+ if (_shared_base) {
+ EXPECT_EQ(1234, _shared_base->magic());
+ }
+ EXPECT_EQ(4L, _shared.use_count()); // now u, _shared, _constShared and _shared_base contains
+ // the const object
+ _shared.reset();
+ EXPECT_EQ(3L, _shared_base.use_count()); // now u, _constShared and _shared_base contains it
+ EXPECT_TRUE(u.clear());
+ EXPECT_EQ(2L, _shared_base.use_count()); // now _constShared and _shared_base contains it
+
+ EXPECT_TRUE(u.set(_shared_base)); // now u_ also contains it as base class
+ EXPECT_EQ(3L, _shared_base.use_count());
+ EXPECT_FALSE(u.find(&_shared)); // cannot get it as derived type
+ EXPECT_FALSE((bool)_shared);
+ _shared_base.reset();
+ EXPECT_TRUE(u.find(&_shared_base)); // can still get it as base type
+ EXPECT_TRUE((bool)_shared_base);
+ if (_shared_base) {
+ EXPECT_EQ(1234, _shared_base->magic());
+ }
+ _shared = std::static_pointer_cast<DerivedCounter>(_shared_base);
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that weak pointer can be copied in (support for moving is from C++14 only)
+ _weak = _shared;
+ EXPECT_EQ(_weak.use_count(), _shared.use_count());
+ EXPECT_TRUE(u.set(_weak));
+
+ _weak.reset();
+ EXPECT_EQ(_weak.use_count(), 0);
+
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_TRUE(u.find(&_weak));
+ EXPECT_EQ(_weak.use_count(), _shared.use_count());
+ EXPECT_EQ(_weak.lock(), _shared);
+
+ // we can remove a weak pointer multiple times
+ _weak.reset();
+ EXPECT_TRUE(u.find(&_weak));
+ EXPECT_EQ(_weak.use_count(), _shared.use_count());
+ EXPECT_EQ(_weak.lock(), _shared);
+ EXPECT_TRUE(u.clear());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that weak pointer can be set and removed as base type (but removed as derived only
+ // if it was set as derived type)
+ _weak = _shared;
+ EXPECT_TRUE(u.set(_weak));
+ EXPECT_TRUE(u.find(&_weak_base));
+ EXPECT_FALSE(_weak_base.expired());
+ if (!_weak_base.expired()) {
+ EXPECT_EQ(1234, _weak_base.lock()->magic());
+ }
+ // now _shared, _constShared and _shared_base contains the const object
+ EXPECT_EQ(3L, _weak.use_count());
+ _weak.reset();
+ EXPECT_EQ(3L, _weak_base.use_count()); // _weak did not hold a reference
+ _shared.reset();
+ EXPECT_EQ(2L, _weak_base.use_count()); // now u, _constShared and _shared_base contains it
+ EXPECT_TRUE(u.clear());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.remove(&_unique));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.remove(&_unique_base));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ EXPECT_TRUE(u.set(_weak_base)); // now u_ also contains it as base class
+ EXPECT_FALSE(u.find(&_weak)); // cannot get it as derived type
+ EXPECT_TRUE(_weak.expired());
+ _weak_base.reset();
+ EXPECT_TRUE(u.find(&_weak_base)); // can still get it as base type
+ EXPECT_FALSE(_weak_base.expired());
+ if (!_weak_base.expired()) {
+ EXPECT_EQ(1234, _weak_base.lock()->magic());
+ }
+};
+
+TEST_F(ADataTest, AData_AndroidSpTest) {
+ int destructions = 0;
+ int _int = 0;
+ sp<EventCounter> _shared;
+ wp<EventCounter> _weak;
+ const sp<EventCounter> _constShared(new EventCounter(&destructions));
+
+ AData<int, sp<EventCounter>, wp<EventCounter>>::Strict<uint8_t> u;
+
+ // test that data is empty
+ EXPECT_FALSE(u.used());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+
+ // test that integer can be stored and read
+ EXPECT_TRUE(u.set<int>(1));
+ EXPECT_TRUE(u.used());
+ EXPECT_TRUE(u.find(&_int));
+ EXPECT_EQ(1, _int);
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+
+ // test that copiable & movable type (shared_ptr) is copied unless explicitly moved.
+ _shared = new EventCounter(&destructions, 234);
+ _weak = _shared; // used for tracking #234
+
+ EXPECT_TRUE(u.set(_shared));
+ EXPECT_TRUE((bool)_shared.get());
+ if (_shared.get()) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+
+ _shared.clear();
+ EXPECT_EQ(NULL, _shared.get());
+ EXPECT_NE(nullptr, _weak.promote().get()); // u still holds object
+
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_TRUE(u.find(&_shared)); // now u and _shared both hold object
+ EXPECT_TRUE((bool)_shared.get());
+ if (_shared.get()) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+ // verify the find did not move out object
+ _shared.clear();
+ EXPECT_EQ(NULL, _shared.get());
+ EXPECT_NE(nullptr, _weak.promote().get()); // u still holds object
+ EXPECT_TRUE(u.find(&_shared)); // now u and _shared both hold object
+ if (_shared.get()) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+
+ // verify that we can set object multiple times
+ EXPECT_TRUE(u.set(_shared));
+
+ // explicitly move in sp
+ EXPECT_TRUE(u.set(std::move(_shared)));
+ EXPECT_FALSE((bool)_shared.get()); // android also clears sp<> on move...
+ EXPECT_TRUE(u.find(&_shared)); // still can get it back
+ EXPECT_TRUE((bool)_shared.get());
+ if (_shared.get()) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_weak));
+
+ EXPECT_TRUE(u.used());
+ EXPECT_TRUE(u.clear()); // now only _shared contains the object
+ EXPECT_FALSE(u.used());
+
+ // we still hold a copy
+ EXPECT_TRUE((bool)_shared.get());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared)); // _shared still contains the object
+
+ EXPECT_TRUE(u.set(_constShared));
+ EXPECT_TRUE(u.find(&_shared)); // now _shared contains _constShared
+ EXPECT_EQ(NULL, _weak.promote().get()); // original _shared is now lost
+
+ EXPECT_TRUE((bool)_shared.get());
+ if (_shared.get()) {
+ EXPECT_EQ(1234, _shared->magic());
+ }
+ EXPECT_TRUE(u.clear());
+
+ // test that wp can be copied in
+ _weak = _shared;
+ EXPECT_TRUE(u.set(_weak));
+
+ _weak.clear();
+
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_TRUE(u.find(&_weak));
+ EXPECT_EQ(_weak.promote(), _shared);
+
+ // we can remove a weak pointer multiple times
+ _weak.clear();
+ EXPECT_TRUE(u.find(&_weak));
+ EXPECT_EQ(_weak.promote(), _shared);
+ EXPECT_TRUE(u.clear());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+};
+
+TEST_F(ADataTest, AData_RelaxedAndroidSpTest) {
+ int destructions = 0;
+ int _int = 0;
+ sp<EventCounter> _shared;
+ wp<EventCounter> _weak;
+ sp<RefBase> _shared_base;
+ wp<RefBase> _weak_base;
+ const sp<EventCounter> _constShared(new EventCounter(&destructions));
+
+ AData<int, sp<RefBase>, wp<RefBase>>::Relaxed<uint16_t> u;
+
+ // test that data is empty
+ EXPECT_FALSE(u.used());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that integer can be stored and read
+ EXPECT_TRUE(u.set<int>(1));
+ EXPECT_TRUE(u.used());
+ EXPECT_TRUE(u.find(&_int));
+ EXPECT_EQ(1, _int);
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that copiable & movable type (shared_ptr) is copied unless explicitly moved.
+ _shared = new EventCounter(&destructions, 234);
+ _weak = _shared; // used for tracking #234
+
+ EXPECT_TRUE(u.set(_shared));
+ EXPECT_TRUE((bool)_shared.get());
+ if (_shared.get()) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+
+ _shared.clear();
+ EXPECT_EQ(NULL, _shared.get());
+ EXPECT_NE(nullptr, _weak.promote().get()); // u still holds object
+
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_TRUE(u.find(&_shared)); // now u and _shared both hold object
+ EXPECT_TRUE((bool)_shared.get());
+ if (_shared.get()) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+ // verify the find did not move out object
+ _shared.clear();
+ EXPECT_EQ(NULL, _shared.get());
+ EXPECT_NE(nullptr, _weak.promote().get()); // u still holds object
+ EXPECT_TRUE(u.find(&_shared)); // now u and _shared both hold object
+ if (_shared.get()) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+
+ // verify that we can set object multiple times
+ EXPECT_TRUE(u.set(_shared));
+
+ // explicitly move in sp
+ EXPECT_TRUE(u.set(std::move(_shared)));
+ EXPECT_FALSE((bool)_shared.get()); // android also clears sp<> on move...
+ EXPECT_TRUE(u.find(&_shared)); // still can get it back
+ EXPECT_TRUE((bool)_shared.get());
+ if (_shared.get()) {
+ EXPECT_EQ(234, _shared->magic());
+ }
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ EXPECT_TRUE(u.used());
+ EXPECT_TRUE(u.clear()); // now only _shared contains the object
+ EXPECT_FALSE(u.used());
+
+ // we still hold a copy
+ EXPECT_TRUE((bool)_shared.get());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared)); // _shared still contains the object
+
+ EXPECT_TRUE(u.set(_constShared));
+ EXPECT_TRUE(u.find(&_shared)); // now _shared contains _constShared
+ EXPECT_EQ(NULL, _weak.promote().get()); // original _shared is now lost
+
+ EXPECT_TRUE((bool)_shared.get());
+ if (_shared.get()) {
+ EXPECT_EQ(1234, _shared->magic());
+ }
+ EXPECT_TRUE(u.clear());
+
+ // test that shared pointer can be set and removed as base type (but removed as derived only
+ // if it was set as derived type)
+ EXPECT_TRUE(u.set(_constShared));
+ EXPECT_TRUE(u.find(&_shared_base));
+ EXPECT_TRUE((bool)_shared_base.get());
+ if (_shared_base.get()) {
+ EXPECT_EQ(1234, static_cast<EventCounter*>(_shared_base.get())->magic());
+ }
+ _shared.clear();
+ EXPECT_TRUE(u.clear());
+ EXPECT_TRUE((bool)_shared_base.get());
+ if (_shared_base.get()) {
+ EXPECT_EQ(1234, static_cast<EventCounter*>(_shared_base.get())->magic());
+ }
+
+ EXPECT_TRUE(u.set(_shared_base)); // now u contains it as base class
+ EXPECT_TRUE((bool)_shared_base.get());
+ EXPECT_FALSE(u.find(&_shared)); // cannot get it as derived type
+ EXPECT_FALSE((bool)_shared.get());
+ _shared_base.clear();
+ EXPECT_TRUE(u.find(&_shared_base)); // can still get it as base type
+ EXPECT_TRUE((bool)_shared_base.get());
+ if (_shared_base.get()) {
+ EXPECT_EQ(1234, static_cast<EventCounter*>(_shared_base.get())->magic());
+ }
+ _shared = static_cast<DerivedCounter*>(_shared_base.get());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that wp can be copied in
+ _weak = _shared;
+ EXPECT_TRUE(u.set(_weak));
+
+ _weak.clear();
+
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_TRUE(u.find(&_weak));
+ EXPECT_EQ(_weak.promote(), _shared);
+
+ // we can remove a weak pointer multiple times
+ _weak.clear();
+ EXPECT_TRUE(u.find(&_weak));
+ EXPECT_EQ(_weak.promote(), _shared);
+ EXPECT_TRUE(u.clear());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ // test that weak pointer can be set and removed as base type (but removed as derived only
+ // if it was set as derived type)
+ _weak = _shared;
+ EXPECT_TRUE(u.set(_weak));
+ EXPECT_TRUE(u.find(&_weak_base));
+ EXPECT_TRUE(_weak_base.promote().get() == _shared.get());
+
+ _weak.clear();
+ _shared.clear();
+ EXPECT_TRUE(u.clear());
+ EXPECT_FALSE(u.find(&_int));
+ EXPECT_FALSE(u.find(&_shared));
+ EXPECT_FALSE(u.find(&_weak));
+ EXPECT_FALSE(u.find(&_shared_base));
+ EXPECT_FALSE(u.find(&_weak_base));
+
+ EXPECT_TRUE(u.set(_weak_base)); // now u_ also contains it as base class
+ EXPECT_FALSE(u.find(&_weak)); // cannot get it as derived type
+ EXPECT_FALSE(_weak.promote().get());
+ _weak_base.clear();
+ EXPECT_TRUE(u.find(&_weak_base)); // can still get it as base type
+ EXPECT_TRUE(_weak_base.promote().get());
+ if (_weak_base.promote().get()) {
+ EXPECT_EQ(1234, static_cast<EventCounter*>(_weak_base.promote().get())->magic());
+ }
+};
+
+} // namespace android
diff --git a/media/libstagefright/foundation/tests/Android.mk b/media/libstagefright/foundation/tests/Android.mk
new file mode 100644
index 0000000..d741c6f
--- /dev/null
+++ b/media/libstagefright/foundation/tests/Android.mk
@@ -0,0 +1,35 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := sf_foundation_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+ AData_test.cpp \
+ Flagged_test.cpp \
+ TypeTraits_test.cpp \
+ Utils_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ libstagefright_foundation \
+ libutils \
+
+LOCAL_C_INCLUDES := \
+ frameworks/av/include \
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+include $(BUILD_NATIVE_TEST)
+
+# Include subdirectory makefiles
+# ============================================================
+
+# If we're building with ONE_SHOT_MAKEFILE (mm, mmm), then what the framework
+# team really wants is to build the stuff defined by this makefile.
+ifeq (,$(ONE_SHOT_MAKEFILE))
+include $(call first-makefiles-under,$(LOCAL_PATH))
+endif
diff --git a/media/libstagefright/foundation/tests/Flagged_test.cpp b/media/libstagefright/foundation/tests/Flagged_test.cpp
new file mode 100644
index 0000000..3c906994
--- /dev/null
+++ b/media/libstagefright/foundation/tests/Flagged_test.cpp
@@ -0,0 +1,639 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Flagged_test"
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/foundation/Flagged.h>
+
+namespace android {
+
+/**
+ * Helper template that can be used to print values in static_assert error messages.
+ *
+ * Use integers here.
+ */
+template<bool, int ...N>
+struct _print_as_warning { };
+
+template<int ...N>
+struct _print_as_warning<true, N...> : std::true_type { };
+
+#define static_assert_equals(a, b, msg) \
+static_assert(_print_as_warning<(a) == (b), a, b>::value, msg)
+
+class FlaggedTest : public ::testing::Test {
+protected:
+ // empty structs
+ struct A0 { };
+ struct A1 { };
+ struct A_A0 : public A0 { };
+
+ // simple struct
+ struct BB {
+ int32_t i;
+ uint32_t u;
+ };
+
+ // struct inheriting from A0
+ struct BB_A0 : public A0 {
+ int32_t i;
+ uint32_t u;
+ };
+
+ // struct inheriting from struct inheriting A0
+ struct BB_AA0 : public A_A0 {
+ int32_t i;
+ uint32_t u;
+ };
+
+ // struct that wraps
+ struct WBBA0 {
+ BB_A0 b;
+ };
+
+ struct WBBA0_A1 : public A1 {
+ BB_A0 b;
+ };
+
+ struct WBBA0_A0 : public A0 {
+ BB_A0 b;
+ };
+
+ struct WBB_A0 : public A0 {
+ BB b;
+ };
+
+ struct WBBA0_AA0 : public A_A0 {
+ BB_A0 b;
+ };
+
+ struct WBBAA0_A0 : public A0 {
+ BB_AA0 b;
+ };
+
+ struct WWBBA0_A0 : public A0 {
+ WBBA0 b;
+ };
+};
+
+/**
+ * This test is here to confirm the handling of wrapping classes that inherit from an interface
+ * while also inheriting from that same interface. While we no longer use this construct, we want
+ * to track if this defect is ever fixed.
+ */
+TEST_F(FlaggedTest, StaticSanityTests) {
+ static_assert(sizeof(A0) == 1, "");
+ static_assert(sizeof(A1) == 1, "");
+ static_assert(sizeof(A_A0) == 1, "");
+
+ static constexpr size_t size = sizeof(BB); // original [pair]
+
+ // inheriting from A0 does not increase size
+ static_assert(sizeof(BB_A0) == size, ""); // [pair]:A0
+ static_assert(sizeof(BB_AA0) == size, ""); // [pair]:[:A0]
+
+ // wrapping a class that inherits from A0 does not increase size
+ static_assert(sizeof(WBBA0) == size, ""); // [ [pair]:[:A0] ]
+
+ // wrapping a class that inherits from A0 while also inheriting from A1 does not increase size
+ static_assert(sizeof(WBBA0_A1) == size, ""); // [ [pair]:A0 ]:A1
+
+ // wrapping a class that inherits from A0 while also inheriting from A0 DOES increase size
+ EXPECT_GT(sizeof(WBBA0_A0), size); // [ [pair]:A0 ]:A0
+
+ // wrapping a class that does not inherit from A0 while inheriting from A0 does not increase
+ // size
+ static_assert(sizeof(WBB_A0) == size, ""); // [[pair]]:A0
+
+ // wrapping a class that inherits from A0 while also inheriting from a class that inherits
+ // from A0 does increase size
+ EXPECT_GT(sizeof(WBBA0_AA0), size); // [ [pair]:A0 ]:[:A0]
+
+ // wrapping a class that indirectly inherits from A0 while also inheriting from A0 does
+ // increase size
+ EXPECT_GT(sizeof(WBBAA0_A0), size); // [ [pair]:[:A0] ]:A0
+
+ // wrapping a class that inherits from A0 while also inheriting A0 does increase size
+ EXPECT_GT(sizeof(WWBBA0_A0), size); // [ [pair]:A0 ]:A0
+}
+
+enum FLAG : int32_t {
+ kMask0 = 0x0FF,
+ kFlag0_A = 0x0AA,
+ kFlag0_B = 0x0BB,
+ kFlag0_C = 0x0CC,
+ kMask1 = 0xFF0,
+ kFlag1_A = 0xAA0,
+ kFlag1_B = 0xBB0,
+ kFlag1_C = 0xCC0,
+ kMaskCommon = 0x0F0,
+};
+
+TEST_F(FlaggedTest, BasicExample) {
+ enum SafeFlags : uint32_t {
+ kUnsafe,
+ kSafe,
+ kSafeMask = _Flagged_helper::minMask(kSafe),
+ };
+ typedef Flagged<int32_t, SafeFlags, kSafeMask> safeInt32;
+
+ safeInt32 a(kUnsafe);
+ a.setFlags(kSafe);
+ a.get() = 15;
+ EXPECT_EQ(a.flags(), kSafe);
+ EXPECT_EQ(a.get(), 15);
+
+ enum OriginFlags : uint32_t {
+ kUnknown,
+ kConst,
+ kCalculated,
+ kComponent,
+ kApplication,
+ kFile,
+ kBinder,
+ kOriginMask = _Flagged_helper::minMask(kBinder),
+ };
+ typedef Flagged<safeInt32, OriginFlags, kOriginMask>
+ trackedSafeInt32;
+
+ static_assert(sizeof(trackedSafeInt32) == sizeof(safeInt32), "");
+
+ trackedSafeInt32 b(kConst, kSafe, 1);
+ EXPECT_EQ(b.flags(), kConst);
+ EXPECT_EQ(b.get().flags(), kSafe);
+ EXPECT_EQ(b.get().get(), 1);
+ b.setFlags(kCalculated);
+ volatile bool overflow = true;
+ b.get().setFlags(overflow ? kUnsafe : kSafe);
+
+ enum ValidatedFlags : uint32_t {
+ kUnsafeV = kUnsafe,
+ kSafeV = kSafe,
+ kValidated = kSafe | 2,
+ kSharedMaskV = kSafeMask,
+ kValidatedMask = _Flagged_helper::minMask(kValidated),
+ };
+ typedef Flagged<safeInt32, ValidatedFlags, kValidatedMask, kSharedMaskV> validatedInt32;
+
+ validatedInt32 v(kUnsafeV, kSafe, 10);
+ EXPECT_EQ(v.flags(), kUnsafeV);
+ EXPECT_EQ(v.get().flags(), kUnsafe); // !kUnsafeV overrides kSafe
+ EXPECT_EQ(v.get().get(), 10);
+ v.setFlags(kValidated);
+ EXPECT_EQ(v.flags(), kValidated);
+ EXPECT_EQ(v.get().flags(), kSafe);
+ v.get().setFlags(kUnsafe);
+ EXPECT_EQ(v.flags(), 2); // NOTE: sharing masks with enums allows strange situations to occur
+}
+
+TEST_F(FlaggedTest, _Flagged_helper_Test) {
+ using helper = _Flagged_helper;
+
+ using i32 = int32_t;
+ using u32 = uint32_t;
+ using u8 = uint8_t;
+
+ // base2
+ static_assert(Flagged<i32, u32, 0u, 0u, 0>::sFlagMask == 0u, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, 0>::sFlagShift == 0, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, 0>::sEffectiveMask == 0u, "");
+
+ static_assert(Flagged<i32, u32, 0u, 0u, 10>::sFlagMask == 0u, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, 10>::sFlagShift == 10, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, 10>::sEffectiveMask == 0u, "");
+
+ static_assert(Flagged<i32, u32, 0u, 0u, -1>::sFlagMask == 0u, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, -1>::sFlagShift == 0, "");
+ static_assert(Flagged<i32, u32, 0u, 0u, -1>::sEffectiveMask == 0u, "");
+
+ static_assert(Flagged<i32, u32, 99u, 0u, 0>::sFlagMask == 99u, "");
+ static_assert(Flagged<i32, u32, 99u, 0u, 0>::sFlagShift == 0, "");
+ static_assert(Flagged<i32, u32, 99u, 0u, 0>::sEffectiveMask == 99u, "");
+
+ static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sFlagMask == 0x99u, "");
+ static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sFlagShift == 12, "");
+ static_assert(Flagged<i32, u32, 0x99u, 0u, 12>::sEffectiveMask == 0x99000u, "");
+
+ static_assert(Flagged<i32, u32, 99u, 0u, -1>::sFlagMask == 99u, "");
+ static_assert(Flagged<i32, u32, 99u, 0u, -1>::sFlagShift == 0, "");
+ static_assert(Flagged<i32, u32, 99u, 0u, -1>::sEffectiveMask == 99u, "");
+
+ // mask_of<T, Flag>
+ // also Flagged<> no default
+ typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 0 /* shift */> i32_800f_0;
+ typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 4 /* shift */> i32_800f_4;
+ // this also tests that these types can be instantiated
+ static_assert(sizeof(i32_800f_0) >= sizeof(i32) + sizeof(u32),
+ "should be at least size of component types");
+ static_assert(sizeof(i32_800f_4) == sizeof(i32_800f_0), "regardless of shift");
+ static_assert(!i32_800f_0::sFlagCombined, "");
+ static_assert(!i32_800f_4::sFlagCombined, "");
+
+ static_assert(helper::mask_of<i32_800f_0, u32>::value == 0x800F, "incorrect mask");
+ static_assert(helper::mask_of<i32_800f_0, i32>::value == 0,
+ "mask should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32_800f_0, u32>::effective_value == 0x800F, "incorrect mask");
+ static_assert(helper::mask_of<i32_800f_0, i32>::effective_value == 0,
+ "mask should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32_800f_0, u32>::shift == 0, "incorrect shift");
+ static_assert(helper::mask_of<i32_800f_0, i32>::shift == 0,
+ "shift should be 0 when types mismatch");
+
+ static_assert(helper::mask_of<i32_800f_4, u32>::value == 0x800F, "incorrect mask");
+ static_assert(helper::mask_of<i32_800f_4, i32>::value == 0,
+ "mask should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32_800f_4, u32>::effective_value == 0x800F0, "incorrect mask");
+ static_assert(helper::mask_of<i32_800f_4, i32>::effective_value == 0,
+ "mask should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32_800f_4, u32>::shift == 4, "incorrect shift");
+ static_assert(helper::mask_of<i32_800f_4, i32>::shift == 0,
+ "shift should be 0 when types mismatch");
+ static_assert(helper::mask_of<i32, u32>::value == 0, "mask should be 0 if not masked");
+ static_assert(helper::mask_of<i32, i32>::value == 0, "mask should be 0 if not masked");
+
+ // lshift(value, n)
+ static_assert(helper::lshift(0U, 0) == 0U, "");
+ static_assert(helper::lshift(0U, 30) == 0U, "");
+ static_assert(helper::lshift(1U, 0) == 1U, "");
+ static_assert(helper::lshift(1U, 10) == 1024U, "");
+ static_assert(helper::lshift(10U, 10) == 10240U, "");
+ static_assert(helper::lshift(10, 10) == 10240, "");
+ static_assert(helper::lshift(-10, 0) == -10, "");
+ // static_assert(helper::lshift(-10, 10) == -10240, ""); // error: left shift of negative value
+
+ // minMask(maxValue)
+ static_assert(helper::minMask(0U) == 0U, "lowest 0 bits");
+ static_assert(helper::minMask(1U) == 1U, "lowest 1 bit");
+ static_assert(helper::minMask(2U) == 3U, "lowest 2 bits");
+ static_assert(helper::minMask(3U) == 3U, "lowest 2 bits");
+ static_assert(helper::minMask(4U) == 7U, "lowest 3 bits");
+ static_assert(helper::minMask(~0U) == ~0U, "all bits");
+ // static_assert(helper::minMask(10) == 0xF, "all bits"); // error: must be unsigned
+
+ // topBits(n)
+ static_assert(helper::topBits<u32>(0) == 0U, "top 0 bit");
+ static_assert(helper::topBits<u32>(1) == 0x80000000U, "top 1 bit");
+ static_assert(helper::topBits<u32>(2) == 0xC0000000U, "top 2 bits");
+ static_assert(helper::topBits<u32>(12) == 0xFFF00000U, "top 12 bits");
+ static_assert(helper::topBits<u32>(32) == 0xFFFFFFFFU, "all bits");
+ // static_assert(helper::topBits<u32>(33) == 0xFFFFFFFFU, ""); // should OVERFLOW
+
+ static_assert(helper::topBits<u8>(0) == 0U, "top 0 bit");
+ static_assert(helper::topBits<u8>(1) == 0x80U, "top 1 bit");
+ static_assert(helper::topBits<u8>(2) == 0xC0U, "top 2 bit");
+ static_assert(helper::topBits<u8>(8) == 0xFFU, "all bits");
+ // static_assert(helper::topBits<u8>(9) == 0xFFU, ""); // should OVERFLOW
+
+ // getShift(mask, base, shared, base-shift, base-effective)
+ static_assert(helper::getShift(0u, 0u, 0u, 0, 0u) == 0, "no flag require no shift");
+ static_assert(helper::getShift(0u, 0u, 1u, 0, 0u) == -1,
+ "shared must be within mask and base mask");
+ static_assert(helper::getShift(0u, 1u, 1u, 0, 1u) == -1, "shared must be within mask");
+ static_assert(helper::getShift(0u, 1u, 0u, 0, 1u) == 0,
+ "no flags require no shift even with base mask");
+ static_assert(helper::getShift(0u, 1u, 0u, 1, 2u) == 0,
+ "no flags require no shift even with shifted base mask");
+ static_assert(helper::getShift(1u, 0u, 0u, 0, 0u) == 0, "no base mask requires no shift");
+ static_assert(helper::getShift(1u, 1u, 0u, 0, 1u) == 1,
+ "overlapping mask and basemask requires shift");
+ static_assert(helper::getShift(1u, 1u, 0u, 0, 1u) == 1,
+ "overlapping mask and basemask requires shift");
+ static_assert(helper::getShift(1u, 1u, 1u, 0, 1u) == 0,
+ "shared mask requires using base shift");
+ static_assert(helper::getShift(1u, 1u, 1u, 1, 2u) == 1,
+ "shared mask requires using base shift");
+ static_assert(helper::getShift(3u, 5u, 1u, 0, 5u) == 0,
+ "mask and basemask that overlap only in shared region requires no shift");
+ static_assert(helper::getShift(3u, 7u, 1u, 0, 7u) == -1,
+ "mask and basemask must not overlap in more than shared region");
+ static_assert(helper::getShift(1u, 0u, 1u, 0, 0u) == -1, "shared must be within base mask");
+
+ static_assert(helper::getShift(0u, 1u, 0u, 1, 1u) == -2, "effective mask must cover base mask");
+ static_assert(helper::getShift(0u, 5u, 0u, 1, 2u) == -2, "effective mask must cover base mask");
+ static_assert(helper::getShift(0u, 5u, 0u, 1, 10u) == 0, "");
+ static_assert(helper::getShift(0u, 5u, 0u, 1, 31u) == 0,
+ "effective mask can be larger than base mask");
+
+ static_assert(helper::getShift(0x800Fu, 0x800Fu, 0x800Fu, 0, 0x800Fu) == 0,
+ "(0x800F << 0) & 0x800F == 0x800F");
+ static_assert(helper::getShift(0x800Fu, 0x800Fu, 0x800Fu, 16, 0x800F0000u) == 16,
+ "(0x800F << 0) & 0x800F == 0x800F");
+ static_assert(helper::getShift(0x1800Fu, 0x800Fu, 0x800Fu, 0, 0x800Fu) == 0,
+ "(0x1800F << 0) & 0x800F == 0x800F");
+ static_assert(helper::getShift(0x1800Fu, 0x800Fu, 0x800Fu, 16, 0x800F0000u) == -1,
+ "(0x1800F << 16) overflows");
+
+ // verify that when not sharing masks, effective mask makes the difference
+ static_assert(helper::getShift(0x800Fu, 0u, 0u, 0, 0x800Fu) == 4,
+ "(0x800F << 4) & 0x800F == 0");
+ static_assert(helper::getShift(0x800Fu, 0x2u, 0u, 0, 0x8002u) == 2,
+ "(0x800F << 2) & 0x8002 == 0");
+ static_assert(helper::getShift(0x800Fu, 0x1u, 0u, 15, 0x8001u) == 1,
+ "(0x800F << 1) & 0x8001 == 0");
+ static_assert(helper::getShift(0x800Fu, 0x800Fu, 0u, 16, 0x800F0000u) == 0,
+ "0x800F & 0x800F0000 == 0");
+ static_assert(helper::getShift(0x800Fu, 0x800F8000u, 0u, 0, 0x800F8000u) == 5,
+ "(0x800F << 5) & 0x800F8000 == 0");
+ static_assert(helper::getShift(0x800Fu, 0xF0000u, 0u, 0, 0x800F8000u) == 5,
+ "(0x800F << 5) & 0x800F8000 == 0");
+ static_assert(helper::getShift(0x800Fu, 0x1Fu, 0u, 15, 0x800F8000u) == 5,
+ "(0x800F << 5) & 0x800F8000 == 0");
+ static_assert(helper::getShift(0xFFu, 0x80808080u, 0u, 0, 0x80808080u) == -1,
+ "0xFF always overlaps with 0x80808080");
+ static_assert(helper::getShift(0xFFu, 0x10001000u, 0u, 3, 0x80808080u) == -1,
+ "0xFF always overlaps with 0x80808080");
+ static_assert(helper::getShift(0xFFu, 0x80808040u, 0u, 0, 0x80808040u) == 7,
+ "(0xFF << 7) & 0x 80808040 == 0");
+
+ // verify min_shift (mask must be positive or no shift can be required)
+ static_assert(helper::getShift(0xFF, 0x40808040, 0, 0, 0x40808040) == 7, "");
+ static_assert(helper::getShift((i32)0x800000FF, 0x40808040, 0, 0, 0x40808040) == -1, "");
+ static_assert(helper::getShift(0x100000FF, 0x40808040, 0, 0, 0x40808040) == -1, "");
+ static_assert(helper::getShift(0xFF, (i32)0x80808040, 0, 0, (i32)0x80808040) == 7, "");
+ static_assert(helper::getShift((i32)0x80007F80, 0x40808040, 0, 0, 0x40808040) == 0, "");
+
+ // shared mask can also be negative (but not shift can be required)
+ static_assert(helper::getShift((i32)0x80007F80, (i32)0xC0808040, (i32)0x80000000,
+ 0, (i32)0xC0808040) == 0, "");
+ static_assert(helper::getShift((i32)0x80007F80, (i32)0xC0808040, (i32)0xC0000000,
+ 0, (i32)0xC0808040) == -1, "");
+ static_assert(helper::getShift((i32)0x80007F80, (i32)0x60404020, (i32)0x60000000,
+ 1, (i32)0xC0808040) == -1, "");
+
+ // min_shift
+ typedef Flagged<i32, u32, 0u> i32_0_0;
+ typedef Flagged<i32, u32, 1u> i32_1_0;
+ typedef Flagged<i32, u32, 1u, 0u, 1> i32_1_1;
+
+ // this is a wrapper over getShift, so same test cases apply when T is flagged
+ static_assert(helper::min_shift<i32_0_0, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32_0_0, u32, 0u, 1u>::value == -1, "");
+ static_assert(helper::min_shift<i32_1_0, u32, 0u, 1u>::value == -1, "");
+ static_assert(helper::min_shift<i32_1_0, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32_0_0, u32, 1u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32_1_0, u32, 1u, 0u>::value == 1, "");
+ static_assert(helper::min_shift<i32_1_0, u32, 1u, 1u>::value == 0, "");
+ static_assert(helper::min_shift<i32_1_1, u32, 1u, 1u>::value == 1, "");
+ static_assert(helper::min_shift<i32_1_1, u32, 3u, 0u>::value == 2, "");
+ static_assert(helper::min_shift<Flagged<i32, u32, 5u>, u32, 3u, 1u>::value == 0, "");
+ static_assert(helper::min_shift<Flagged<i32, u32, 7u>, u32, 3u, 1u>::value == -1, "");
+ static_assert(helper::min_shift<i32_0_0, u32, 1u, 1u>::value == -1, "");
+
+ static_assert(helper::min_shift<i32_800f_0, u32, 0x800Fu, 0u>::value == 4, "");
+ static_assert(helper::min_shift<i32_800f_4, u32, 0x1800Fu, 0x800Fu>::value == 4, "");
+ static_assert(helper::min_shift<i32_800f_4, u32, 0x800Fu, 0u>::value == 0, "");
+ static_assert(helper::min_shift<Flagged<i32, u32, 0x8002u>, u32, 0x800Fu, 0u>::value == 2, "");
+ static_assert(helper::min_shift<Flagged<i32, u32, 0x8001u>, u32, 0x800Fu, 0u>::value == 1, "");
+ static_assert(
+ helper::min_shift<Flagged<i32, u32, 0x800Fu, 0u, 16>, u32, 0x800Fu, 0u>::value == 0, "");
+ static_assert(
+ helper::min_shift<Flagged<i32, u32, 0x800F8000u>, u32, 0x800Fu, 0u>::value == 5, "");
+ static_assert(
+ helper::min_shift<Flagged<i32, u32, 0x80808080u>, u32, 0xFFu, 0u>::value == -1, "");
+ static_assert(
+ helper::min_shift<Flagged<i32, u32, 0x80808040u>, u32, 0xFFu, 0u>::value == 7, "");
+
+ // for min_shift, non-tagged type behaves as if having base mask of 0
+ static_assert(helper::min_shift<i32, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<u32, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32, u32, 0u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32, u32, 0u, 1u>::value == -1, "");
+ static_assert(helper::min_shift<i32, u32, 1u, 0u>::value == 0, "");
+ static_assert(helper::min_shift<i32, u32, 1u, 1u>::value == -1, "");
+
+ // verify min_shift (mask must be positive or no shift can be required)
+ static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>, i32, 0xFF, 0>::value == 7, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+ i32, (i32)0x800000FF, 0>::value == -1, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+ i32, 0x100000FF, 0>::value == -1, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x80808040>,
+ i32, 0xFF, 0>::value == 7, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, 0x40808040>,
+ i32, (i32)0x80007F80, 0>::value == 0, "");
+
+ static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x80808040>,
+ i32, (i32)0x80007F80, (i32)0x80000000>::value == 0, "");
+ static_assert(helper::min_shift<Flagged<i32, i32, (i32)0xC0808040>,
+ i32, (i32)0x80007F80, (i32)0xC0000000>::value == -1, "");
+ // note: cannot create a flagged type with signed flag and shift
+ // static_assert(helper::min_shift<Flagged<i32, i32, (i32)0x60404020, 0, 1>,
+ // i32, (i32)0x40003FC0, (i32)0x40000000>::value == -1, "");
+
+ typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 16 /* shift */> i32_800f_16;
+ static_assert_equals(sizeof(i32_800f_16), sizeof(i32_800f_0), "");
+ // shifted mask overflows!
+ // typedef Flagged<i32, u32, 0x800F /* mask */, 0 /* shared mask */, 17 /* shift */> i32_800f_17;
+ // static_assert(sizeof(i32_800f_17) == sizeof(i32_800f_0), "");
+ typedef Flagged<i32, i32, 0x800F /* mask */, 0 /* shared mask */, 15 /* shift */> i32_800f_15i;
+ static_assert_equals(sizeof(i32_800f_15i), sizeof(i32_800f_0), "");
+ // shifted mask overflows!
+ // typedef Flagged<i32, i32, 0x800F /* mask */, 0 /* shared mask */, 16 /* shift */> i32_800f_16i;
+ // static_assert(sizeof(i32_800f_16i) == sizeof(i32_800f_0), "");
+
+ // canCombine(mask, base, shared, shift, base-shift, base-effective)
+ static_assert(helper::canCombine(0u, 0u, 0u, 0, 0, 0u), "using no mask is valid");
+ static_assert(helper::canCombine(0u, 0u, 0u, 0, 0, 0u), "");
+ static_assert(helper::canCombine(0u, 0u, 0u, 4, 0, 0u), "");
+ static_assert(!helper::canCombine(0u, 0u, 1u, 0, 0, 0u),
+ "shared mask must be the overlap of masks");
+ static_assert(helper::canCombine(1u, 0u, 0u, 0, 0, 0u), "");
+ static_assert(helper::canCombine(1u, 0u, 0u, 4, 0, 0u), "");
+ static_assert(helper::canCombine(3u, 5u, 1u, 0, 0, 5u), "");
+ static_assert(!helper::canCombine(3u, 3u, 3u, 1, 0, 3u), "shift must match when sharing mask");
+ static_assert(helper::canCombine(3u, 3u, 3u, 1, 1, 6u), "");
+ static_assert(!helper::canCombine(3u, 3u, 3u, 1, 2, 12u), "shift must match when sharing mask");
+ static_assert(!helper::canCombine(3u, 7u, 1u, 0, 0, 7u), "");
+ static_assert(!helper::canCombine(1u, 0u, 1u, 0, 0, 0u), "");
+
+ static_assert(!helper::canCombine(0u, 1u, 1u, 0, 0, 1u),
+ "shared mask must be the overlap of masks");
+ static_assert(helper::canCombine(0u, 1u, 0u, 0, 0, 1u), "");
+ static_assert(helper::canCombine(0u, 1u, 0u, 4, 0, 1u), "");
+ static_assert(helper::canCombine(1u, 1u, 0u, 1, 0, 1u), "");
+ static_assert(!helper::canCombine(1u, 1u, 0u, 0, 0, 1u), "");
+ static_assert(helper::canCombine(1u, 1u, 0u, 1, 0, 1u), "");
+ static_assert(helper::canCombine(1u, 1u, 1u, 0, 0, 1u), "");
+ static_assert(!helper::canCombine(1u, 1u, 1u, 1, 0, 1u), "shift must match when sharing mask");
+
+ static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0u, 4, 0, 0x800Fu), "");
+ static_assert(!helper::canCombine(0x800Fu, 0x800Fu, 0u, 1, 0, 0x800Fu), "");
+ static_assert(helper::canCombine(0x800Fu, 0x8002u, 0u, 2, 0, 0x8002u), "");
+ static_assert(helper::canCombine(0x800Fu, 0x8001u, 0u, 1, 0, 0x8001u), "");
+ static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0u, 0, 16, 0x800F0000u), "");
+ static_assert(helper::canCombine(0x800Fu, 0x800Fu, 0x800Fu, 16, 16, 0x800F0000u), "");
+ static_assert(!helper::canCombine(0x1800Fu, 0x800Fu, 0u, 0, 16, 0x800F0000u), "");
+ static_assert(!helper::canCombine(0x1800Fu, 0x800Fu, 0x800Fu, 16, 16, 0x800F0000u), "");
+ static_assert(helper::canCombine(0x800Fu, 0x800F8000u, 0u, 8, 0, 0x800F8000u), "");
+ static_assert(!helper::canCombine(0xFFu, 0x80808080u, 0u, -1, 0, 0x80808080u), "");
+ static_assert(helper::canCombine(0xFFu, 0x80808040u, 0u, 7, 0, 0x80808040u), "");
+ static_assert(helper::canCombine(0xFFu, 0x8000u, 0u, 7, 0, 0x80808040u), "");
+ static_assert(helper::canCombine(0xFFu, 0x101u, 0u, 7, 15, 0x80808040u), "");
+
+ // can combine signed-flagged types only if mask is positive or no shift is required
+ static_assert(!helper::canCombine(0xFF, 0x40808040, 0, 0, 0, 0x40808040), "");
+ static_assert(helper::canCombine(0xFF, 0x40808040, 0, 7, 0, 0x40808040), "");
+ static_assert(!helper::canCombine((i32)0x800000FF, 0x40808040, 0, 0, 0, 0x40808040), "");
+ static_assert(!helper::canCombine((i32)0x800000FF, 0x40808040, 0, 7, 0, 0x40808040), "");
+ static_assert(!helper::canCombine(0x100000FF, 0x40808040, 0, 0, 0, 0x40808040), "");
+ static_assert(!helper::canCombine(0x100000FF, 0x40808040, 0, 7, 0, 0x40808040), "");
+ static_assert(!helper::canCombine(0xFF, (i32)0x80808040, 0, 0, 0, (i32)0x80808040), "");
+ static_assert(helper::canCombine(0xFF, (i32)0x80808040, 0, 7, 0, (i32)0x80808040), "");
+ static_assert(helper::canCombine((i32)0x80007F80, 0x40808040, 0, 0, 0, 0x40808040), "");
+
+ static_assert(helper::canCombine((i32)0x80007F80, (i32)0x80808040, (i32)0x80000000, 0, 0, (i32)0x80808040), "");
+ static_assert(!helper::canCombine((i32)0xC0007F80, (i32)0x80808040, (i32)0xC0000000, 0, 0, (i32)0x80808040), "");
+ static_assert(!helper::canCombine((i32)0x80007F80, (i32)0x80808040, (i32)0x80000000, 1, 0, (i32)0x80808040), "");
+ static_assert(!helper::canCombine((i32)0xC0007F80, (i32)0x80808040, (i32)0xC0000000, 1, 0, (i32)0x80808040), "");
+
+ // can_combine<T, Flag, MASK, [SHARED_MASK], [SHIFT]
+ static_assert(helper::can_combine<i32_0_0, u32, 0u>::value, "");
+ static_assert(helper::can_combine<i32_0_0, u32, 0u, 0u>::value, "");
+ static_assert(helper::can_combine<i32_0_0, u32, 0u, 0u, 4>::value, "");
+ static_assert(!helper::can_combine<i32_0_0, u32, 0u, 1u>::value, "");
+ static_assert(helper::can_combine<i32_0_0, u32, 1u, 0u>::value, "");
+ static_assert(helper::can_combine<i32_0_0, u32, 1u, 0u, 4>::value, "");
+ static_assert(!helper::can_combine<i32_0_0, u32, 1u, 1u>::value, "");
+
+ static_assert(!helper::can_combine<i32_1_0, u32, 0u, 1u>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 0u, 0u>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 0u, 0u, 4>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 1u, 0u>::value, "");
+ static_assert(!helper::can_combine<i32_1_0, u32, 1u, 0u, 0>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 1u, 0u, 1>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 1u, 1u>::value, "");
+ static_assert(helper::can_combine<i32_1_0, u32, 1u, 1u, 0>::value, "");
+ static_assert(!helper::can_combine<i32_1_0, u32, 1u, 1u, 1>::value,
+ "shouldn't be able to use SHIFT with SHARED_MASK");
+
+ static_assert(helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u, 4>::value, "");
+ static_assert(!helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u, 1>::value, "");
+ static_assert(helper::can_combine<i32_800f_0, u32, 0x800Fu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x8002u>, u32, 0x800Fu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x8001u>, u32, 0x800Fu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x800F0000u>, u32, 0x800Fu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x800F8000u>, u32, 0x800Fu, 0u>::value, "");
+ static_assert(!helper::can_combine<Flagged<i32, u32, 0x80808080u>, u32, 0xFFu, 0u>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, u32, 0x80808040u>, u32, 0xFFu, 0u>::value, "");
+
+ // can combine signed-flagged types only if mask is positive or no shift is required
+ static_assert(helper::can_combine<Flagged<i32, i32, 0x40808040>, i32, 0xFF, 0>::value, "");
+ static_assert(!helper::can_combine<Flagged<i32, i32, 0x40808040>,
+ i32, (i32)0x800000FF, 0>::value, "");
+ static_assert(!helper::can_combine<Flagged<i32, i32, 0x40808040>,
+ i32, 0x100000FF, 0>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, i32, (i32)0x80808040>, i32, 0xFF, 0>::value, "");
+ static_assert(helper::can_combine<Flagged<i32, i32, 0x40808040>,
+ i32, (i32)0x80007F80, 0>::value, "");
+
+ static_assert(helper::can_combine<Flagged<i32, i32, (i32)0x80808040>,
+ i32, (i32)0x80007F80, (i32)0x80000000>::value, "");
+ static_assert(!helper::can_combine<Flagged<i32, i32, (i32)0xC0808040>,
+ i32, (i32)0x80007F80, (i32)0xC0000000>::value, "");
+
+ static_assert(helper::min_shift<Flagged<i32, FLAG, (FLAG)0x80808040>,
+ FLAG, (FLAG)0x80007F80, (FLAG)0x80000000>::value == 0, "");
+ static_assert(helper::can_combine<Flagged<i32, FLAG, (FLAG)0x80808040>,
+ FLAG, (FLAG)0x80007F80, (FLAG)0x80000000>::value, "");
+
+ // cannot combine non-tagged types
+ static_assert(!helper::can_combine<i32, u32, 0u, 0u>::value, "");
+ static_assert(!helper::can_combine<u32, u32, 0u, 0u>::value, "");
+ static_assert(!helper::can_combine<i32, u32, 0u, 0u>::value, "");
+ static_assert(!helper::can_combine<i32, u32, 0u, 1u>::value, "");
+ static_assert(!helper::can_combine<i32, u32, 1u, 0u>::value, "");
+ static_assert(!helper::can_combine<i32, u32, 1u, 1u>::value, "");
+
+ typedef Flagged<i32_800f_0, u32, 0x800F /* mask */, 0 /* shared mask */> i32_800f_800f;
+ static_assert(i32_800f_800f::sFlagMask == 0x800F, "");
+ static_assert(i32_800f_800f::sFlagShift == 4, "");
+ static_assert(i32_800f_800f::sEffectiveMask == 0x880FF, "");
+ static_assert(!i32_800f_0::sFlagCombined, "");
+ static_assert(!i32_800f_4::sFlagCombined, "");
+
+ static_assert(i32_800f_800f::sFlagCombined, "");
+ static_assert_equals(sizeof(i32_800f_800f), sizeof(i32_800f_0), "");
+
+ typedef Flagged<i32_800f_0, u32, 0x1FFFF /* mask */> i32_800f_1ffff;
+ static_assert(i32_800f_1ffff::sFlagMask == 0x1FFFF, "");
+ static_assert(i32_800f_1ffff::sFlagShift == 0, "");
+ static_assert(i32_800f_1ffff::sEffectiveMask == 0x1FFFF, "");
+ static_assert(!i32_800f_1ffff::sFlagCombined, "");
+
+ // operational tests
+ i32_800f_800f val(0x8000, 0x1234, 56);
+ EXPECT_EQ(val.get().get(), 56);
+ EXPECT_EQ(val.flags(), 0x8000u);
+ EXPECT_EQ(val.get().flags(), 0x1234u & 0x800F);
+ val.setFlags(0x12345);
+ EXPECT_EQ(val.flags(), 0x12345u & 0x800F);
+ EXPECT_EQ(val.get().flags(), 0x1234u & 0x800F);
+ val.get().setFlags(0x54321);
+ EXPECT_EQ(val.flags(), 0x12345u & 0x800F);
+ EXPECT_EQ(val.get().flags(), 0x54321u & 0x800F);
+ EXPECT_EQ(val.get().get(), 56);
+
+ typedef Flagged<i32_800f_4, u32, 0x800F /* mask */, 0 /* shared mask */> i32_800f_800f_B;
+ static_assert(i32_800f_800f_B::sFlagMask == 0x800F, "");
+ static_assert(i32_800f_800f_B::sFlagShift == 0, "");
+ static_assert(i32_800f_800f_B::sEffectiveMask == 0x880FF, "");
+
+ i32_800f_800f_B valB(0x8000, 0x1234, -987);
+ EXPECT_EQ(valB.get().get(), -987);
+ EXPECT_EQ(valB.flags(), 0x8000u);
+ EXPECT_EQ(valB.get().flags(), 0x1234u & 0x800F);
+ valB.setFlags(0x12345);
+ EXPECT_EQ(valB.flags(), 0x12345u & 0x800F);
+ EXPECT_EQ(valB.get().flags(), 0x1234u & 0x800F);
+ valB.get().setFlags(0x5C321);
+ EXPECT_EQ(valB.flags(), 0x12345u & 0x800F);
+ EXPECT_EQ(valB.get().flags(), 0x5C321u & 0x800F);
+ EXPECT_EQ(valB.get().get(), -987);
+
+ typedef Flagged<Flagged<i32, u32, 0xFF>, u32, 0xFF0, 0xF0> i32_ff_ff0;
+ i32_ff_ff0 valC(0xABCD, 0x1234, 101);
+ EXPECT_EQ(valC.get().get(), 101);
+ EXPECT_EQ(valC.flags(), 0xBC0u);
+ EXPECT_EQ(valC.get().flags(), 0xC4u);
+ valC.setFlags(0x12345);
+ EXPECT_EQ(valC.flags(), 0x340u);
+ EXPECT_EQ(valC.get().flags(), 0x44u);
+ valC.get().setFlags(0x54321);
+ EXPECT_EQ(valC.flags(), 0x320u);
+ EXPECT_EQ(valC.get().flags(), 0x21u);
+ EXPECT_EQ(valC.get().get(), 101);
+
+ // when combining flags (with no shift), it should work with signed flags
+ typedef Flagged<Flagged<i32, FLAG, kMask0>, FLAG, kMask1, kMaskCommon> i32_F_ff_ff0;
+ static_assert(i32_F_ff_ff0::sFlagCombined, "flags should be combined");
+
+ i32_F_ff_ff0 valD(kFlag1_A, kFlag0_A, 1023);
+ EXPECT_EQ(valD.get().get(), 1023);
+ EXPECT_EQ(valD.flags(), kFlag1_A);
+ EXPECT_EQ(valD.get().flags(), kFlag0_A);
+ valD.setFlags(kFlag1_B);
+ EXPECT_EQ(valD.flags(), kFlag1_B);
+ EXPECT_EQ(valD.get().flags(), FLAG(0x0BA));
+ valD.get().setFlags(kFlag0_C);
+ EXPECT_EQ(valD.flags(), FLAG(0xBC0));
+ EXPECT_EQ(valD.get().flags(), kFlag0_C);
+ EXPECT_EQ(valD.get().get(), 1023);
+}
+
+} // namespace android
diff --git a/media/libstagefright/foundation/tests/TypeTraits_test.cpp b/media/libstagefright/foundation/tests/TypeTraits_test.cpp
new file mode 100644
index 0000000..1e2049d
--- /dev/null
+++ b/media/libstagefright/foundation/tests/TypeTraits_test.cpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TypeTraits_test"
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/foundation/TypeTraits.h>
+
+namespace android {
+
+class TypeTraitsTest : public ::testing::Test {
+protected:
+ enum A { };
+ enum UA : uint32_t { };
+ enum IA : int32_t { };
+};
+
+// =========== basic sanity tests for type-support templates
+TEST_F(TypeTraitsTest, StaticTests) {
+
+ // ============ is_integral_or_enum
+
+ static_assert(!std::is_integral<A>::value, "enums should not be integral");
+ static_assert(!std::is_integral<UA>::value, "enums should not be integral");
+ static_assert(!std::is_integral<IA>::value, "enums should not be integral");
+ static_assert(is_integral_or_enum<A>::value, "enums should be integral_or_enum");
+ static_assert(is_integral_or_enum<UA>::value, "enums should be integral_or_enum");
+ static_assert(is_integral_or_enum<IA>::value, "enums should be integral_or_enum");
+ static_assert(is_integral_or_enum<int>::value, "ints should be integral_or_enum");
+ static_assert(is_integral_or_enum<unsigned>::value, "unsigned ints should be integral_or_enum");
+ static_assert(!is_integral_or_enum<float>::value, "floats should not be integral_or_enum");
+
+ // ============ is_unsigned_integral
+
+ static_assert(!std::is_unsigned<UA>::value,
+ "unsigned enums should not be unsigned");
+ static_assert(!std::is_unsigned<IA>::value,
+ "unsigned enums should not be unsigned");
+ static_assert(std::is_unsigned<typename std::underlying_type<UA>::type>::value,
+ "underlying type of unsigned enums should be unsigned");
+ static_assert(!std::is_unsigned<typename std::underlying_type<IA>::type>::value,
+ "underlying type of unsigned enums should be unsigned");
+ static_assert(is_unsigned_integral<UA>::value,
+ "unsigned enums should be unsigned_integral");
+ static_assert(!is_unsigned_integral<IA>::value,
+ "signed enums should not be unsigned_integral");
+ static_assert(is_unsigned_integral<unsigned>::value,
+ "unsigned ints should be unsigned_integral");
+ static_assert(!is_unsigned_integral<int>::value,
+ "ints should not be unsigned_integral");
+ static_assert(!is_unsigned_integral<float>::value,
+ "floats should not be unsigned_integral");
+
+ // ============ is_signed_integral
+
+ static_assert(!std::is_signed<UA>::value,
+ "unsigned enums should not be signed");
+ static_assert(!std::is_signed<IA>::value,
+ "unsigned enums should not be signed");
+ static_assert(!std::is_signed<typename std::underlying_type<UA>::type>::value,
+ "underlying type of unsigned enums should be signed");
+ static_assert(std::is_signed<typename std::underlying_type<IA>::type>::value,
+ "underlying type of unsigned enums should be signed");
+ static_assert(!is_signed_integral<UA>::value,
+ "unsigned enums should not be signed_integral");
+ static_assert(is_signed_integral<IA>::value,
+ "signed enums should be signed_integral");
+ static_assert(!is_signed_integral<unsigned>::value,
+ "unsigned ints should not be signed_integral");
+ static_assert(is_signed_integral<int>::value,
+ "ints should be signed_integral");
+ static_assert(!is_signed_integral<float>::value,
+ "floats should not be signed_integral");
+
+ // ============ underlying_integral_type
+
+ static_assert(std::is_same<uint64_t, typename underlying_integral_type<uint64_t>::type>::value,
+ "underlying integral type of uint64_t should be uint64_t");
+ static_assert(std::is_same<uint32_t, typename underlying_integral_type<UA>::type>::value,
+ "underlying integral type of uint32_t based enums should be uint32_t");
+ static_assert(std::is_same<int64_t, typename underlying_integral_type<int64_t>::type>::value,
+ "underlying integral type of int64_t should be int64_t");
+ static_assert(std::is_same<int32_t, typename underlying_integral_type<IA>::type>::value,
+ "underlying integral type of int32_t based enums should be int32_t");
+ //typedef underlying_integral_type<float>::type no_type;
+ static_assert(std::is_same<void, typename underlying_integral_type<float, void>::type>::value,
+ "underlying integral type of float cannot be specified");
+
+ // ============ is_one_of
+
+ static_assert(!is_one_of<int>::value, "int shouldn't be one of {}");
+ static_assert(!is_one_of<int, unsigned>::value, "int shouldn't be one of {unsigned}");
+ static_assert(!is_one_of<int, unsigned, float>::value,
+ "int shouldn't be one of {unsigned, float}");
+ static_assert(is_one_of<int, int>::value, "int should be one of {int}");
+ static_assert(is_one_of<int, int, float>::value, "int should be one of {int, float}");
+ static_assert(is_one_of<int, float, int>::value, "int should be one of {float, int}");
+ static_assert(is_one_of<int, float, int, unsigned>::value,
+ "int should be one of {float, int, unsigned}");
+ static_assert(is_one_of<int, float, unsigned, int>::value,
+ "int should be one of {float, unsigned, int}");
+ static_assert(!is_one_of<int, int&>::value, "int shouldn't be one of {int&}");
+
+ // ============ are_unique
+
+ static_assert(are_unique<>::value, "{} should be unique");
+ static_assert(are_unique<int>::value, "{int} should be unique");
+ static_assert(are_unique<int, float>::value, "{int, float} should be unique");
+ static_assert(!are_unique<int, int>::value, "{int, int} shouldn't be unique");
+ static_assert(!are_unique<int, float, int>::value, "{int, float, int} shouldn't be unique");
+ static_assert(!are_unique<float, int, int>::value, "{float, int, int} shouldn't be unique");
+ static_assert(!are_unique<int, int, float>::value, "{int, int, float} shouldn't be unique");
+
+ // ============ find_first
+
+ static_assert(find_first<int>::index == 0, "int is not in {}");
+ static_assert(find_first<int, unsigned>::index == 0, "int is not in {unsigned}");
+ static_assert(find_first<int, unsigned, float>::index == 0, "int is not in {unsigned, float}");
+ static_assert(find_first<int, int>::index == 1, "int is 1st in {int}");
+ static_assert(find_first<int, int, float>::index == 1, "int is 1st in {int, float}");
+ static_assert(find_first<int, float, int>::index == 2, "int is 2nd in {float, int}");
+ static_assert(find_first<int, float, int, unsigned>::index == 2,
+ "int is 2nd in {float, int, unsigned}");
+ static_assert(find_first<int, float, int, unsigned>::index == 2,
+ "int is 2nd and 3rd in {float, int, int, unsigned}");
+ static_assert(find_first<int, float, unsigned, int>::index == 3,
+ "int is 3rd in {float, unsigned, int}");
+ static_assert(find_first<int, int&>::index == 0, "int is not in {int&}");
+
+ // ============ find_first_convertible_to
+
+ static_assert(find_first_convertible_to<int>::index == 0, "int is not convertible to {}");
+ static_assert(find_first_convertible_to<int, unsigned*>::index == 0,
+ "int is not convertible to {unsigned*}");
+ static_assert(find_first_convertible_to<int, unsigned*, float&>::index == 0,
+ "int is not convertible to {unsigned, float&}");
+ static_assert(find_first_convertible_to<int, int>::index == 1, "int is convertible to {int}");
+ static_assert(find_first_convertible_to<int, unsigned, int>::index == 1,
+ "int is convertible to 1st of {unsigned, int}");
+ static_assert(find_first_convertible_to<int, int&, float>::index == 2,
+ "int is convertible to 2nd of {int&, float}");
+ static_assert(find_first_convertible_to<float, float*, int, unsigned>::index == 2,
+ "float is convertible to 2nd of {float*, int, unsigned}");
+ static_assert(find_first_convertible_to<float, void, float[1], int>::index == 3,
+ "int is 3rd convertible to {void, float[], int}");
+ static_assert(find_first_convertible_to<int&, const int&>::index == 1,
+ "int& is convertible to {const int&}");
+ static_assert(find_first_convertible_to<const int&, int&>::index == 0,
+ "const int& is not convertible to {int&}");
+}
+
+} // namespace android
diff --git a/media/libstagefright/foundation/tests/Utils_test.cpp b/media/libstagefright/foundation/tests/Utils_test.cpp
new file mode 100644
index 0000000..0439d5c
--- /dev/null
+++ b/media/libstagefright/foundation/tests/Utils_test.cpp
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Utils_test"
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AStringUtils.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/Utils.h> // for FOURCC
+
+namespace android {
+
+class UtilsTest : public ::testing::Test {
+};
+
+TEST_F(UtilsTest, TestStringUtils) {
+ ASSERT_EQ(AStringUtils::Compare("Audio", "AudioExt", 5, false), 0);
+ ASSERT_EQ(AStringUtils::Compare("Audio", "audiOExt", 5, true), 0);
+ ASSERT_NE(AStringUtils::Compare("Audio", "audioExt", 5, false), 0);
+ ASSERT_NE(AStringUtils::Compare("Audio", "AudiOExt", 5, false), 0);
+
+ ASSERT_LT(AStringUtils::Compare("Audio", "AudioExt", 7, false), 0);
+ ASSERT_LT(AStringUtils::Compare("Audio", "audiOExt", 7, true), 0);
+
+ ASSERT_GT(AStringUtils::Compare("AudioExt", "Audio", 7, false), 0);
+ ASSERT_GT(AStringUtils::Compare("audiOext", "Audio", 7, true), 0);
+
+ ASSERT_LT(AStringUtils::Compare("Audio", "Video", 5, false), 0);
+ ASSERT_LT(AStringUtils::Compare("Audio1", "Audio2", 6, false), 0);
+ ASSERT_LT(AStringUtils::Compare("audio", "VIDEO", 5, true), 0);
+ ASSERT_LT(AStringUtils::Compare("audio1", "AUDIO2", 6, true), 0);
+
+ ASSERT_GT(AStringUtils::Compare("Video", "Audio", 5, false), 0);
+ ASSERT_GT(AStringUtils::Compare("Audio2", "Audio1", 6, false), 0);
+ ASSERT_GT(AStringUtils::Compare("VIDEO", "audio", 5, true), 0);
+ ASSERT_GT(AStringUtils::Compare("AUDIO2", "audio1", 6, true), 0);
+
+ ASSERT_TRUE(AStringUtils::MatchesGlob("AudioA", 5, "AudioB", 5, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 6, "AudioA", 5, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 5, "AudioA", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 5, "audiOB", 5, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("AudioA", 5, "audiOB", 5, true));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 6, "AudioA", 5, true));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 5, "AudioA", 6, true));
+
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*1", 1, "String8", 6, true));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*1", 1, "String8", 6, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*1", 1, "String8", 0, true));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*1", 1, "String8", 0, false));
+
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*ring1", 5, "String8", 6, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*ring2", 5, "STRING8", 6, true));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("*ring4", 5, "StRing8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("*ring5", 5, "StrinG8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("*ring8", 5, "String8", 7, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("*ring8", 5, "String8", 7, true));
+
+ ASSERT_TRUE(AStringUtils::MatchesGlob("Str*1", 4, "String8", 6, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("Str*2", 4, "STRING8", 6, true));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*3", 4, "string8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*4", 4, "StRing8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*5", 4, "AString8", 7, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*6", 4, "AString8", 7, true));
+
+ ASSERT_TRUE(AStringUtils::MatchesGlob("Str*ng1", 6, "String8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng2", 6, "string8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng3", 6, "StRing8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng4", 6, "StriNg8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng5", 6, "StrinG8", 6, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("Str*ng6", 6, "STRING8", 6, true));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng8", 6, "AString8", 7, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng1", 6, "String16", 7, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("Str*ing9", 7, "String8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ringA", 8, "String8", 6, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng8", 6, "AString8", 7, true));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng1", 6, "String16", 7, true));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("Str*ing9", 7, "STRING8", 6, true));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ringA", 8, "String8", 6, true));
+
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str1", 8, "bestrestroom", 9, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str1", 8, "bestrestrestroom", 13, false));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("*str*stro", 8, "bestrestrestroom", 14, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str*1", 9, "bestrestrestroom", 14, false));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str1", 8, "beSTReSTRoom", 9, true));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str1", 8, "beSTRestreSTRoom", 13, true));
+ ASSERT_FALSE(AStringUtils::MatchesGlob("*str*stro", 8, "bestreSTReSTRoom", 14, true));
+ ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str*1", 9, "bestreSTReSTRoom", 14, true));
+}
+
+TEST_F(UtilsTest, TestDebug) {
+#define LVL(x) (ADebug::Level)(x)
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "", LVL(5)), LVL(5));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", " \t \n ", LVL(2)), LVL(2));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3", LVL(5)), LVL(3));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3:*deo", LVL(5)), LVL(3));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString(
+ "video", "\t\n 3 \t\n:\t\n video \t\n", LVL(5)), LVL(3));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3:*deo,2:vid*", LVL(5)), LVL(2));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString(
+ "avideo", "\t\n 3 \t\n:\t\n avideo \t\n,\t\n 2 \t\n:\t\n video \t\n", LVL(5)), LVL(3));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString(
+ "audio.omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(2));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString(
+ "video.omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(3));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(3));
+ ASSERT_EQ(ADebug::GetLevelFromSettingsString("omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(4));
+#undef LVL
+}
+
+TEST_F(UtilsTest, TestFourCC) {
+ ASSERT_EQ(FOURCC('s', 't', 'm' , 'u'), 'stmu');
+}
+
+TEST_F(UtilsTest, TestMathTemplates) {
+ ASSERT_EQ(divRound(-10, -4), 3);
+ ASSERT_EQ(divRound(-11, -4), 3);
+ ASSERT_EQ(divRound(-12, -4), 3);
+ ASSERT_EQ(divRound(-13, -4), 3);
+ ASSERT_EQ(divRound(-14, -4), 4);
+
+ ASSERT_EQ(divRound(10, -4), -3);
+ ASSERT_EQ(divRound(11, -4), -3);
+ ASSERT_EQ(divRound(12, -4), -3);
+ ASSERT_EQ(divRound(13, -4), -3);
+ ASSERT_EQ(divRound(14, -4), -4);
+
+ ASSERT_EQ(divRound(-10, 4), -3);
+ ASSERT_EQ(divRound(-11, 4), -3);
+ ASSERT_EQ(divRound(-12, 4), -3);
+ ASSERT_EQ(divRound(-13, 4), -3);
+ ASSERT_EQ(divRound(-14, 4), -4);
+
+ ASSERT_EQ(divRound(10, 4), 3);
+ ASSERT_EQ(divRound(11, 4), 3);
+ ASSERT_EQ(divRound(12, 4), 3);
+ ASSERT_EQ(divRound(13, 4), 3);
+ ASSERT_EQ(divRound(14, 4), 4);
+
+ ASSERT_EQ(divUp(-11, -4), 3);
+ ASSERT_EQ(divUp(-12, -4), 3);
+ ASSERT_EQ(divUp(-13, -4), 4);
+
+ ASSERT_EQ(divUp(11, -4), -2);
+ ASSERT_EQ(divUp(12, -4), -3);
+ ASSERT_EQ(divUp(13, -4), -3);
+
+ ASSERT_EQ(divUp(-11, 4), -2);
+ ASSERT_EQ(divUp(-12, 4), -3);
+ ASSERT_EQ(divUp(-13, 4), -3);
+
+ ASSERT_EQ(divUp(11, 4), 3);
+ ASSERT_EQ(divUp(12, 4), 3);
+ ASSERT_EQ(divUp(13, 4), 4);
+
+ ASSERT_EQ(align(11, 4), 12);
+ ASSERT_EQ(align(12, 4), 12);
+ ASSERT_EQ(align(13, 4), 16);
+ ASSERT_EQ(align(11, 8), 16);
+ ASSERT_EQ(align(11, 2), 12);
+ ASSERT_EQ(align(11, 1), 11);
+
+ ASSERT_EQ(abs(5L), 5L);
+ ASSERT_EQ(abs(-25), 25);
+
+ ASSERT_EQ(min(5.6f, 6.0f), 5.6f);
+ ASSERT_EQ(min(6.0f, 5.6f), 5.6f);
+ ASSERT_EQ(min(-4.3, 8.6), -4.3);
+ ASSERT_EQ(min(8.6, -4.3), -4.3);
+
+ ASSERT_EQ(max(5.6f, 6.0f), 6.0f);
+ ASSERT_EQ(max(6.0f, 5.6f), 6.0f);
+ ASSERT_EQ(max(-4.3, 8.6), 8.6);
+ ASSERT_EQ(max(8.6, -4.3), 8.6);
+
+ ASSERT_FALSE(isInRange(-43, 86u, -44));
+ ASSERT_TRUE(isInRange(-43, 87u, -43));
+ ASSERT_TRUE(isInRange(-43, 88u, -1));
+ ASSERT_TRUE(isInRange(-43, 89u, 0));
+ ASSERT_TRUE(isInRange(-43, 90u, 46));
+ ASSERT_FALSE(isInRange(-43, 91u, 48));
+ ASSERT_FALSE(isInRange(-43, 92u, 50));
+
+ ASSERT_FALSE(isInRange(43, 86u, 42));
+ ASSERT_TRUE(isInRange(43, 87u, 43));
+ ASSERT_TRUE(isInRange(43, 88u, 44));
+ ASSERT_TRUE(isInRange(43, 89u, 131));
+ ASSERT_FALSE(isInRange(43, 90u, 133));
+ ASSERT_FALSE(isInRange(43, 91u, 135));
+
+ ASSERT_FALSE(isInRange(43u, 86u, 42u));
+ ASSERT_TRUE(isInRange(43u, 85u, 43u));
+ ASSERT_TRUE(isInRange(43u, 84u, 44u));
+ ASSERT_TRUE(isInRange(43u, 83u, 125u));
+ ASSERT_FALSE(isInRange(43u, 82u, 125u));
+ ASSERT_FALSE(isInRange(43u, 81u, 125u));
+
+ ASSERT_FALSE(isInRange(-43, ~0u, 43));
+ ASSERT_FALSE(isInRange(-43, ~0u, 44));
+ ASSERT_FALSE(isInRange(-43, ~0u, ~0));
+ ASSERT_FALSE(isInRange(-43, ~0u, 41));
+ ASSERT_FALSE(isInRange(-43, ~0u, 40));
+
+ ASSERT_FALSE(isInRange(43u, ~0u, 43u));
+ ASSERT_FALSE(isInRange(43u, ~0u, 41u));
+ ASSERT_FALSE(isInRange(43u, ~0u, 40u));
+ ASSERT_FALSE(isInRange(43u, ~0u, ~0u));
+
+ ASSERT_FALSE(isInRange(-43, 86u, -44, 0u));
+ ASSERT_FALSE(isInRange(-43, 86u, -44, 1u));
+ ASSERT_FALSE(isInRange(-43, 86u, -44, 2u));
+ ASSERT_FALSE(isInRange(-43, 86u, -44, ~0u));
+ ASSERT_TRUE(isInRange(-43, 87u, -43, 0u));
+ ASSERT_TRUE(isInRange(-43, 87u, -43, 1u));
+ ASSERT_TRUE(isInRange(-43, 87u, -43, 86u));
+ ASSERT_TRUE(isInRange(-43, 87u, -43, 87u));
+ ASSERT_FALSE(isInRange(-43, 87u, -43, 88u));
+ ASSERT_FALSE(isInRange(-43, 87u, -43, ~0u));
+ ASSERT_TRUE(isInRange(-43, 88u, -1, 0u));
+ ASSERT_TRUE(isInRange(-43, 88u, -1, 45u));
+ ASSERT_TRUE(isInRange(-43, 88u, -1, 46u));
+ ASSERT_FALSE(isInRange(-43, 88u, -1, 47u));
+ ASSERT_FALSE(isInRange(-43, 88u, -1, ~3u));
+ ASSERT_TRUE(isInRange(-43, 90u, 46, 0u));
+ ASSERT_TRUE(isInRange(-43, 90u, 46, 1u));
+ ASSERT_FALSE(isInRange(-43, 90u, 46, 2u));
+ ASSERT_FALSE(isInRange(-43, 91u, 48, 0u));
+ ASSERT_FALSE(isInRange(-43, 91u, 48, 2u));
+ ASSERT_FALSE(isInRange(-43, 91u, 48, ~6u));
+ ASSERT_FALSE(isInRange(-43, 92u, 50, 0u));
+ ASSERT_FALSE(isInRange(-43, 92u, 50, 1u));
+
+ ASSERT_FALSE(isInRange(43u, 86u, 42u, 0u));
+ ASSERT_FALSE(isInRange(43u, 86u, 42u, 1u));
+ ASSERT_FALSE(isInRange(43u, 86u, 42u, 2u));
+ ASSERT_FALSE(isInRange(43u, 86u, 42u, ~0u));
+ ASSERT_TRUE(isInRange(43u, 87u, 43u, 0u));
+ ASSERT_TRUE(isInRange(43u, 87u, 43u, 1u));
+ ASSERT_TRUE(isInRange(43u, 87u, 43u, 86u));
+ ASSERT_TRUE(isInRange(43u, 87u, 43u, 87u));
+ ASSERT_FALSE(isInRange(43u, 87u, 43u, 88u));
+ ASSERT_FALSE(isInRange(43u, 87u, 43u, ~0u));
+ ASSERT_TRUE(isInRange(43u, 88u, 60u, 0u));
+ ASSERT_TRUE(isInRange(43u, 88u, 60u, 70u));
+ ASSERT_TRUE(isInRange(43u, 88u, 60u, 71u));
+ ASSERT_FALSE(isInRange(43u, 88u, 60u, 72u));
+ ASSERT_FALSE(isInRange(43u, 88u, 60u, ~3u));
+ ASSERT_TRUE(isInRange(43u, 90u, 132u, 0u));
+ ASSERT_TRUE(isInRange(43u, 90u, 132u, 1u));
+ ASSERT_FALSE(isInRange(43u, 90u, 132u, 2u));
+ ASSERT_FALSE(isInRange(43u, 91u, 134u, 0u));
+ ASSERT_FALSE(isInRange(43u, 91u, 134u, 2u));
+ ASSERT_FALSE(isInRange(43u, 91u, 134u, ~6u));
+ ASSERT_FALSE(isInRange(43u, 92u, 136u, 0u));
+ ASSERT_FALSE(isInRange(43u, 92u, 136u, 1u));
+
+ ASSERT_EQ(periodicError(124, 100), 24);
+ ASSERT_EQ(periodicError(288, 100), 12);
+ ASSERT_EQ(periodicError(-345, 100), 45);
+ ASSERT_EQ(periodicError(-493, 100), 7);
+ ASSERT_EQ(periodicError(-550, 100), 50);
+ ASSERT_EQ(periodicError(-600, 100), 0);
+}
+
+} // namespace android
diff --git a/media/libstagefright/http/Android.bp b/media/libstagefright/http/Android.bp
index 71a8750..5d90b0a 100644
--- a/media/libstagefright/http/Android.bp
+++ b/media/libstagefright/http/Android.bp
@@ -29,6 +29,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
product_variables: {
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 9cab226..e415334 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -24,6 +24,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: [
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 8c88fe9..143fd59 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -49,11 +49,6 @@
const int64_t LiveSession::kUpSwitchMarginUs = 5000000ll;
const int64_t LiveSession::kResumeThresholdUs = 100000ll;
-// Buffer Prepare/Ready/Underflow Marks
-const int64_t LiveSession::kReadyMarkUs = 5000000ll;
-const int64_t LiveSession::kPrepareMarkUs = 1500000ll;
-const int64_t LiveSession::kUnderflowMarkUs = 1000000ll;
-
struct LiveSession::BandwidthEstimator : public RefBase {
BandwidthEstimator();
@@ -495,6 +490,13 @@
return new HTTPDownloader(mHTTPService, mExtraHeaders);
}
+void LiveSession::setBufferingSettings(
+ const BufferingSettings &buffering) {
+ sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
+ writeToAMessage(msg, buffering);
+ msg->post();
+}
+
void LiveSession::connectAsync(
const char *url, const KeyedVector<String8, String8> *headers) {
sp<AMessage> msg = new AMessage(kWhatConnect, this);
@@ -518,9 +520,10 @@
return err;
}
-status_t LiveSession::seekTo(int64_t timeUs) {
+status_t LiveSession::seekTo(int64_t timeUs, MediaPlayerSeekMode mode) {
sp<AMessage> msg = new AMessage(kWhatSeek, this);
msg->setInt64("timeUs", timeUs);
+ msg->setInt32("mode", mode);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
@@ -619,6 +622,12 @@
void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
+ case kWhatSetBufferingSettings:
+ {
+ readFromAMessage(msg, &mBufferingSettings);
+ break;
+ }
+
case kWhatConnect:
{
onConnect(msg);
@@ -829,7 +838,10 @@
// If switching up, require a cushion bigger than kUnderflowMark
// to avoid buffering immediately after the switch.
// (If we don't have that cushion we'd rather cancel and try again.)
- int64_t delayUs = switchUp ? (kUnderflowMarkUs + 1000000ll) : 0;
+ int64_t delayUs =
+ switchUp ?
+ (mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll + 1000000ll)
+ : 0;
bool needResumeUntil = false;
sp<AMessage> stopParams = msg;
if (checkSwitchProgress(stopParams, delayUs, &needResumeUntil)) {
@@ -1441,8 +1453,11 @@
void LiveSession::onSeek(const sp<AMessage> &msg) {
int64_t timeUs;
+ int32_t mode;
CHECK(msg->findInt64("timeUs", &timeUs));
- changeConfiguration(timeUs);
+ CHECK(msg->findInt32("mode", &mode));
+ // TODO: add "mode" to changeConfiguration.
+ changeConfiguration(timeUs/* , (MediaPlayerSeekMode)mode */);
}
status_t LiveSession::getDuration(int64_t *durationUs) const {
@@ -2185,13 +2200,16 @@
}
++activeCount;
- int64_t readyMark = mInPreparationPhase ? kPrepareMarkUs : kReadyMarkUs;
- if (bufferedDurationUs > readyMark
+ int64_t readyMarkUs =
+ (mInPreparationPhase ?
+ mBufferingSettings.mInitialWatermarkMs :
+ mBufferingSettings.mRebufferingWatermarkHighMs) * 1000ll;
+ if (bufferedDurationUs > readyMarkUs
|| mPacketSources[i]->isFinished(0)) {
++readyCount;
}
if (!mPacketSources[i]->isFinished(0)) {
- if (bufferedDurationUs < kUnderflowMarkUs) {
+ if (bufferedDurationUs < mBufferingSettings.mRebufferingWatermarkLowMs * 1000ll) {
++underflowCount;
}
if (bufferedDurationUs > mUpSwitchMark) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 65a824e..abf8cf0 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -18,6 +18,7 @@
#define LIVE_SESSION_H_
+#include <media/BufferingSettings.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/mediaplayer.h>
@@ -72,6 +73,8 @@
uint32_t flags,
const sp<IMediaHTTPService> &httpService);
+ void setBufferingSettings(const BufferingSettings &buffering);
+
int64_t calculateMediaTimeUs(int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq);
status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
@@ -86,7 +89,7 @@
status_t disconnect();
// Blocks until seek is complete.
- status_t seekTo(int64_t timeUs);
+ status_t seekTo(int64_t timeUs, MediaPlayerSeekMode mode);
status_t getDuration(int64_t *durationUs) const;
size_t getTrackCount() const;
@@ -129,6 +132,7 @@
kWhatChangeConfiguration2 = 'chC2',
kWhatChangeConfiguration3 = 'chC3',
kWhatPollBuffering = 'poll',
+ kWhatSetBufferingSettings = 'sBuS',
};
// Bandwidth Switch Mark Defaults
@@ -138,9 +142,7 @@
static const int64_t kResumeThresholdUs;
// Buffer Prepare/Ready/Underflow Marks
- static const int64_t kReadyMarkUs;
- static const int64_t kPrepareMarkUs;
- static const int64_t kUnderflowMarkUs;
+ BufferingSettings mBufferingSettings;
struct BandwidthEstimator;
struct BandwidthItem {
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 7ad7fee..00cf142 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -26,6 +26,7 @@
#include "include/avc_utils.h"
#include "include/ID3.h"
#include "mpeg2ts/AnotherPacketSource.h"
+#include "mpeg2ts/HlsSampleDecryptor.h"
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -36,7 +37,6 @@
#include <ctype.h>
#include <inttypes.h>
-#include <openssl/aes.h>
#define FLOGV(fmt, ...) ALOGV("[fetcher-%d] " fmt, mFetcherID, ##__VA_ARGS__)
#define FSLOGV(stream, fmt, ...) ALOGV("[fetcher-%d] [%s] " fmt, mFetcherID, \
@@ -167,11 +167,15 @@
mFirstPTSValid(false),
mFirstTimeUs(-1ll),
mVideoBuffer(new AnotherPacketSource(NULL)),
+ mSampleAesKeyItemChanged(false),
mThresholdRatio(-1.0f),
mDownloadState(new DownloadState()),
mHasMetadata(false) {
memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
mHTTPDownloader = mSession->getHTTPDownloader();
+
+ memset(mKeyData, 0, sizeof(mKeyData));
+ memset(mAESInitVec, 0, sizeof(mAESInitVec));
}
PlaylistFetcher::~PlaylistFetcher() {
@@ -306,6 +310,15 @@
}
}
+ // TODO: Revise this when we add support for KEYFORMAT
+ // If method has changed (e.g., -> NONE); sufficient to check at the segment boundary
+ if (mSampleAesKeyItem != NULL && first && found && method != "SAMPLE-AES") {
+ ALOGI("decryptBuffer: resetting mSampleAesKeyItem(%p) with method %s",
+ mSampleAesKeyItem.get(), method.c_str());
+ mSampleAesKeyItem = NULL;
+ mSampleAesKeyItemChanged = true;
+ }
+
if (!found) {
method = "NONE";
}
@@ -313,6 +326,8 @@
if (method == "NONE") {
return OK;
+ } else if (method == "SAMPLE-AES") {
+ ALOGV("decryptBuffer: Non-Widevine SAMPLE-AES is supported now.");
} else if (!(method == "AES-128")) {
ALOGE("Unsupported cipher method '%s'", method.c_str());
return ERROR_UNSUPPORTED;
@@ -345,6 +360,79 @@
mAESKeyForURI.add(keyURI, key);
}
+ if (first) {
+ // If decrypting the first block in a file, read the iv from the manifest
+ // or derive the iv from the file's sequence number.
+
+ unsigned char AESInitVec[AES_BLOCK_SIZE];
+ AString iv;
+ if (itemMeta->findString("cipher-iv", &iv)) {
+ if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
+ || iv.size() > 16 * 2 + 2) {
+ ALOGE("malformed cipher IV '%s'.", iv.c_str());
+ return ERROR_MALFORMED;
+ }
+
+ while (iv.size() < 16 * 2 + 2) {
+ iv.insert("0", 1, 2);
+ }
+
+ memset(AESInitVec, 0, sizeof(AESInitVec));
+ for (size_t i = 0; i < 16; ++i) {
+ char c1 = tolower(iv.c_str()[2 + 2 * i]);
+ char c2 = tolower(iv.c_str()[3 + 2 * i]);
+ if (!isxdigit(c1) || !isxdigit(c2)) {
+ ALOGE("malformed cipher IV '%s'.", iv.c_str());
+ return ERROR_MALFORMED;
+ }
+ uint8_t nibble1 = isdigit(c1) ? c1 - '0' : c1 - 'a' + 10;
+ uint8_t nibble2 = isdigit(c2) ? c2 - '0' : c2 - 'a' + 10;
+
+ AESInitVec[i] = nibble1 << 4 | nibble2;
+ }
+ } else {
+ memset(AESInitVec, 0, sizeof(AESInitVec));
+ AESInitVec[15] = mSeqNumber & 0xff;
+ AESInitVec[14] = (mSeqNumber >> 8) & 0xff;
+ AESInitVec[13] = (mSeqNumber >> 16) & 0xff;
+ AESInitVec[12] = (mSeqNumber >> 24) & 0xff;
+ }
+
+ bool newKey = memcmp(mKeyData, key->data(), AES_BLOCK_SIZE) != 0;
+ bool newInitVec = memcmp(mAESInitVec, AESInitVec, AES_BLOCK_SIZE) != 0;
+ bool newSampleAesKeyItem = newKey || newInitVec;
+ ALOGV("decryptBuffer: SAMPLE-AES newKeyItem %d/%d (Key %d initVec %d)",
+ mSampleAesKeyItemChanged, newSampleAesKeyItem, newKey, newInitVec);
+
+ if (newSampleAesKeyItem) {
+ memcpy(mKeyData, key->data(), AES_BLOCK_SIZE);
+ memcpy(mAESInitVec, AESInitVec, AES_BLOCK_SIZE);
+
+ if (method == "SAMPLE-AES") {
+ mSampleAesKeyItemChanged = true;
+
+ sp<ABuffer> keyDataBuffer = ABuffer::CreateAsCopy(mKeyData, sizeof(mKeyData));
+ sp<ABuffer> initVecBuffer = ABuffer::CreateAsCopy(mAESInitVec, sizeof(mAESInitVec));
+
+ // always allocating a new one rather than updating the old message
+ // lower layer might still have a reference to the old message
+ mSampleAesKeyItem = new AMessage();
+ mSampleAesKeyItem->setBuffer("keyData", keyDataBuffer);
+ mSampleAesKeyItem->setBuffer("initVec", initVecBuffer);
+
+ ALOGV("decryptBuffer: New SampleAesKeyItem: Key: %s IV: %s",
+ HlsSampleDecryptor::aesBlockToStr(mKeyData).c_str(),
+ HlsSampleDecryptor::aesBlockToStr(mAESInitVec).c_str());
+ } // SAMPLE-AES
+ } // newSampleAesKeyItem
+ } // first
+
+ if (method == "SAMPLE-AES") {
+ ALOGV("decryptBuffer: skipping full-seg decrypt for SAMPLE-AES");
+ return OK;
+ }
+
+
AES_KEY aes_key;
if (AES_set_decrypt_key(key->data(), 128, &aes_key) != 0) {
ALOGE("failed to set AES decryption key.");
@@ -361,44 +449,6 @@
return ERROR_MALFORMED;
}
- if (first) {
- // If decrypting the first block in a file, read the iv from the manifest
- // or derive the iv from the file's sequence number.
-
- AString iv;
- if (itemMeta->findString("cipher-iv", &iv)) {
- if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
- || iv.size() > 16 * 2 + 2) {
- ALOGE("malformed cipher IV '%s'.", iv.c_str());
- return ERROR_MALFORMED;
- }
-
- while (iv.size() < 16 * 2 + 2) {
- iv.insert("0", 1, 2);
- }
-
- memset(mAESInitVec, 0, sizeof(mAESInitVec));
- for (size_t i = 0; i < 16; ++i) {
- char c1 = tolower(iv.c_str()[2 + 2 * i]);
- char c2 = tolower(iv.c_str()[3 + 2 * i]);
- if (!isxdigit(c1) || !isxdigit(c2)) {
- ALOGE("malformed cipher IV '%s'.", iv.c_str());
- return ERROR_MALFORMED;
- }
- uint8_t nibble1 = isdigit(c1) ? c1 - '0' : c1 - 'a' + 10;
- uint8_t nibble2 = isdigit(c2) ? c2 - '0' : c2 - 'a' + 10;
-
- mAESInitVec[i] = nibble1 << 4 | nibble2;
- }
- } else {
- memset(mAESInitVec, 0, sizeof(mAESInitVec));
- mAESInitVec[15] = mSeqNumber & 0xff;
- mAESInitVec[14] = (mSeqNumber >> 8) & 0xff;
- mAESInitVec[13] = (mSeqNumber >> 16) & 0xff;
- mAESInitVec[12] = (mSeqNumber >> 24) & 0xff;
- }
- }
-
AES_cbc_encrypt(
buffer->data(), buffer->data(), buffer->size(),
&aes_key, mAESInitVec, AES_DECRYPT);
@@ -409,7 +459,7 @@
status_t PlaylistFetcher::checkDecryptPadding(const sp<ABuffer> &buffer) {
AString method;
CHECK(buffer->meta()->findString("cipher-method", &method));
- if (method == "NONE") {
+ if (method == "NONE" || method == "SAMPLE-AES") {
return OK;
}
@@ -956,6 +1006,38 @@
return false;
}
+void PlaylistFetcher::initSeqNumberForLiveStream(
+ int32_t &firstSeqNumberInPlaylist,
+ int32_t &lastSeqNumberInPlaylist) {
+ // start at least 3 target durations from the end.
+ int64_t timeFromEnd = 0;
+ size_t index = mPlaylist->size();
+ sp<AMessage> itemMeta;
+ int64_t itemDurationUs;
+ int32_t targetDuration;
+ if (mPlaylist->meta()->findInt32("target-duration", &targetDuration)) {
+ do {
+ --index;
+ if (!mPlaylist->itemAt(index, NULL /* uri */, &itemMeta)
+ || !itemMeta->findInt64("durationUs", &itemDurationUs)) {
+ ALOGW("item or itemDurationUs missing");
+ mSeqNumber = lastSeqNumberInPlaylist - 3;
+ break;
+ }
+
+ timeFromEnd += itemDurationUs;
+ mSeqNumber = firstSeqNumberInPlaylist + index;
+ } while (timeFromEnd < targetDuration * 3E6 && index > 0);
+ } else {
+ ALOGW("target-duration missing");
+ mSeqNumber = lastSeqNumberInPlaylist - 3;
+ }
+
+ if (mSeqNumber < firstSeqNumberInPlaylist) {
+ mSeqNumber = firstSeqNumberInPlaylist;
+ }
+}
+
bool PlaylistFetcher::initDownloadState(
AString &uri,
sp<AMessage> &itemMeta,
@@ -982,11 +1064,8 @@
if (mSegmentStartTimeUs < 0) {
if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
- // If this is a live session, start 3 segments from the end on connect
- mSeqNumber = lastSeqNumberInPlaylist - 3;
- if (mSeqNumber < firstSeqNumberInPlaylist) {
- mSeqNumber = firstSeqNumberInPlaylist;
- }
+ // this is a live session
+ initSeqNumberForLiveStream(firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
} else {
// When seeking mSegmentStartTimeUs is unavailable (< 0), we
// use mStartTimeUs (client supplied timestamp) to determine both start segment
@@ -1627,6 +1706,11 @@
mNextPTSTimeUs = -1ll;
}
+ if (mSampleAesKeyItemChanged) {
+ mTSParser->signalNewSampleAesKey(mSampleAesKeyItem);
+ mSampleAesKeyItemChanged = false;
+ }
+
size_t offset = 0;
while (offset + 188 <= buffer->size()) {
status_t err = mTSParser->feedTSPacket(buffer->data() + offset, 188);
@@ -2009,10 +2093,24 @@
}
}
+ sp<HlsSampleDecryptor> sampleDecryptor = NULL;
+ if (mSampleAesKeyItem != NULL) {
+ ALOGV("extractAndQueueAccessUnits[%d] SampleAesKeyItem: Key: %s IV: %s",
+ mSeqNumber,
+ HlsSampleDecryptor::aesBlockToStr(mKeyData).c_str(),
+ HlsSampleDecryptor::aesBlockToStr(mAESInitVec).c_str());
+
+ sampleDecryptor = new HlsSampleDecryptor(mSampleAesKeyItem);
+ }
+
+ int frameId = 0;
+
size_t offset = 0;
while (offset < buffer->size()) {
const uint8_t *adtsHeader = buffer->data() + offset;
CHECK_LT(offset + 5, buffer->size());
+ // non-const pointer for decryption if needed
+ uint8_t *adtsFrame = buffer->data() + offset;
unsigned aac_frame_length =
((adtsHeader[3] & 3) << 11)
@@ -2070,6 +2168,18 @@
}
}
+ if (sampleDecryptor != NULL) {
+ bool protection_absent = (adtsHeader[1] & 0x1);
+ size_t headerSize = protection_absent ? 7 : 9;
+ if (frameId == 0) {
+ ALOGV("extractAndQueueAAC[%d] protection_absent %d (%02x) headerSize %zu",
+ mSeqNumber, protection_absent, adtsHeader[1], headerSize);
+ }
+
+ sampleDecryptor->processAAC(headerSize, adtsFrame, aac_frame_length);
+ }
+ frameId++;
+
sp<ABuffer> unit = new ABuffer(aac_frame_length);
memcpy(unit->data(), adtsHeader, aac_frame_length);
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index c8ca457..d7db54a 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -19,6 +19,7 @@
#define PLAYLIST_FETCHER_H_
#include <media/stagefright/foundation/AHandler.h>
+#include <openssl/aes.h>
#include "mpeg2ts/ATSParser.h"
#include "LiveSession.h"
@@ -175,7 +176,10 @@
// Stores the initialization vector to decrypt the next block of cipher text, which can
// either be derived from the sequence number, read from the manifest, or copied from
// the last block of cipher text (cipher-block chaining).
- unsigned char mAESInitVec[16];
+ unsigned char mAESInitVec[AES_BLOCK_SIZE];
+ unsigned char mKeyData[AES_BLOCK_SIZE];
+ bool mSampleAesKeyItemChanged;
+ sp<AMessage> mSampleAesKeyItem;
Mutex mThresholdLock;
float mThresholdRatio;
@@ -218,6 +222,9 @@
void onStop(const sp<AMessage> &msg);
void onMonitorQueue();
void onDownloadNext();
+ void initSeqNumberForLiveStream(
+ int32_t &firstSeqNumberInPlaylist,
+ int32_t &lastSeqNumberInPlaylist);
bool initDownloadState(
AString &uri,
sp<AMessage> &itemMeta,
diff --git a/media/libstagefright/id3/Android.bp b/media/libstagefright/id3/Android.bp
index b2aedec..30008d9 100644
--- a/media/libstagefright/id3/Android.bp
+++ b/media/libstagefright/id3/Android.bp
@@ -11,6 +11,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: ["libmedia"],
@@ -38,4 +42,12 @@
],
static_libs: ["libstagefright_id3"],
+
+ sanitize: {
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
+ },
+
}
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index a0eb630..8d1ad66 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -379,7 +379,7 @@
flags &= ~1;
}
- if (flags & 2) {
+ if ((flags & 2) && (dataSize >= 2)) {
// This file has "unsynchronization", so we have to replace occurrences
// of 0xff 0x00 with just 0xff in order to get the real data.
@@ -395,11 +395,15 @@
mData[writeOffset++] = mData[readOffset++];
}
// move the remaining data following this frame
- memmove(&mData[writeOffset], &mData[readOffset], oldSize - readOffset);
+ if (readOffset <= oldSize) {
+ memmove(&mData[writeOffset], &mData[readOffset], oldSize - readOffset);
+ } else {
+ ALOGE("b/34618607 (%zu %zu %zu %zu)", readOffset, writeOffset, oldSize, mSize);
+ android_errorWriteLog(0x534e4554, "34618607");
+ }
- flags &= ~2;
}
-
+ flags &= ~2;
if (flags != prevFlags || iTunesHack) {
WriteSyncsafeInteger(&mData[offset + 4], dataSize);
mData[offset + 8] = flags >> 8;
@@ -517,7 +521,7 @@
if (mOffset == 126 || mOffset == 127) {
// Special treatment for the track number and genre.
char tmp[16];
- sprintf(tmp, "%d", (int)*frameData);
+ snprintf(tmp, sizeof(tmp), "%d", (int)*frameData);
id->setTo(tmp);
return;
diff --git a/media/libstagefright/id3/testid3.cpp b/media/libstagefright/id3/testid3.cpp
index b2f4188..442a3ff 100644
--- a/media/libstagefright/id3/testid3.cpp
+++ b/media/libstagefright/id3/testid3.cpp
@@ -154,8 +154,6 @@
int main(int argc, char **argv) {
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
for (int i = 1; i < argc; ++i) {
scan(argv[i]);
}
diff --git a/media/libstagefright/include/ACodecBufferChannel.h b/media/libstagefright/include/ACodecBufferChannel.h
new file mode 100644
index 0000000..0da2e81
--- /dev/null
+++ b/media/libstagefright/include/ACodecBufferChannel.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_BUFFER_CHANNEL_H_
+
+#define A_BUFFER_CHANNEL_H_
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <vector>
+
+#include <media/openmax/OMX_Types.h>
+#include <media/stagefright/CodecBase.h>
+#include <media/ICrypto.h>
+#include <media/IOMX.h>
+
+namespace android {
+
+/**
+ * BufferChannelBase implementation for ACodec.
+ */
+class ACodecBufferChannel : public BufferChannelBase {
+public:
+ struct BufferAndId {
+ sp<MediaCodecBuffer> mBuffer;
+ IOMX::buffer_id mBufferId;
+ };
+
+ struct BufferInfo {
+ BufferInfo(
+ const sp<MediaCodecBuffer> &buffer,
+ IOMX::buffer_id bufferId,
+ const sp<IMemory> &sharedEncryptedBuffer);
+
+ BufferInfo() = delete;
+
+ // Buffer facing MediaCodec and its clients.
+ const sp<MediaCodecBuffer> mClientBuffer;
+ // Buffer facing CodecBase.
+ const sp<MediaCodecBuffer> mCodecBuffer;
+ // OMX buffer ID.
+ const IOMX::buffer_id mBufferId;
+ // Encrypted buffer in case of secure input.
+ const sp<IMemory> mSharedEncryptedBuffer;
+ };
+
+ ACodecBufferChannel(
+ const sp<AMessage> &inputBufferFilled, const sp<AMessage> &outputBufferDrained);
+ virtual ~ACodecBufferChannel();
+
+ // BufferChannelBase interface
+ virtual status_t queueInputBuffer(const sp<MediaCodecBuffer> &buffer) override;
+ virtual status_t queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer,
+ bool secure,
+ const uint8_t *key,
+ const uint8_t *iv,
+ CryptoPlugin::Mode mode,
+ CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples,
+ size_t numSubSamples,
+ AString *errorDetailMsg) override;
+ virtual status_t renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) override;
+ virtual status_t discardBuffer(const sp<MediaCodecBuffer> &buffer) override;
+ virtual void getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+ virtual void getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) override;
+
+ // Methods below are interface for ACodec to use.
+
+ /**
+ * Set input buffer array.
+ *
+ * @param array Newly allocated buffers. Empty if buffers are
+ * deallocated.
+ */
+ void setInputBufferArray(const std::vector<BufferAndId> &array);
+ /**
+ * Set output buffer array.
+ *
+ * @param array Newly allocated buffers. Empty if buffers are
+ * deallocated.
+ */
+ void setOutputBufferArray(const std::vector<BufferAndId> &array);
+ /**
+ * Request MediaCodec to fill the specified input buffer.
+ *
+ * @param bufferId ID of the buffer, assigned by underlying component.
+ */
+ void fillThisBuffer(IOMX::buffer_id bufferID);
+ /**
+ * Request MediaCodec to drain the specified output buffer.
+ *
+ * @param bufferId ID of the buffer, assigned by underlying component.
+ * @param omxFlags flags associated with this buffer (e.g. EOS).
+ */
+ void drainThisBuffer(IOMX::buffer_id bufferID, OMX_U32 omxFlags);
+
+private:
+ const sp<AMessage> mInputBufferFilled;
+ const sp<AMessage> mOutputBufferDrained;
+
+ sp<MemoryDealer> mDealer;
+ sp<IMemory> mDecryptDestination;
+ int32_t mHeapSeqNum;
+
+ // These should only be accessed via std::atomic_* functions.
+ //
+ // Note on thread safety: since the vector and BufferInfo are const, it's
+ // safe to read them at any thread once the shared_ptr object is atomically
+ // obtained. Inside BufferInfo, mBufferId and mSharedEncryptedBuffer are
+ // immutable objects. We write internal states of mClient/CodecBuffer when
+ // the caller has given up the reference, so that access is also safe.
+ std::shared_ptr<const std::vector<const BufferInfo>> mInputBuffers;
+ std::shared_ptr<const std::vector<const BufferInfo>> mOutputBuffers;
+
+ sp<MemoryDealer> makeMemoryDealer(size_t heapSize);
+
+ bool hasCryptoOrDescrambler() {
+ return mCrypto != NULL || mDescrambler != NULL;
+ }
+};
+
+} // namespace android
+
+#endif // A_BUFFER_CHANNEL_H_
diff --git a/media/libstagefright/include/CallbackDataSource.h b/media/libstagefright/include/CallbackDataSource.h
index 80cd1f7..0d775e6 100644
--- a/media/libstagefright/include/CallbackDataSource.h
+++ b/media/libstagefright/include/CallbackDataSource.h
@@ -42,6 +42,7 @@
return mName;
}
virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL);
+ virtual sp<IDataSource> getIDataSource() const;
private:
sp<IDataSource> mIDataSource;
@@ -70,6 +71,7 @@
return mName;
}
virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL);
+ virtual sp<IDataSource> getIDataSource() const;
private:
// 2kb comes from experimenting with the time-to-first-frame from a MediaPlayer
diff --git a/media/libstagefright/include/DRMExtractor.h b/media/libstagefright/include/DRMExtractor.h
deleted file mode 100644
index 3dc7df8..0000000
--- a/media/libstagefright/include/DRMExtractor.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef DRM_EXTRACTOR_H_
-
-#define DRM_EXTRACTOR_H_
-
-#include <media/IMediaSource.h>
-#include <media/stagefright/MediaExtractor.h>
-#include <drm/DrmManagerClient.h>
-
-namespace android {
-
-struct AMessage;
-class DataSource;
-class SampleTable;
-class String8;
-class DecryptHandle;
-
-class DRMExtractor : public MediaExtractor {
-public:
- DRMExtractor(const sp<DataSource> &source, const char *mime);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
- virtual sp<MetaData> getMetaData();
- virtual const char * name() { return "DRMExtractor"; }
-
-protected:
- virtual ~DRMExtractor();
-
-private:
- sp<DataSource> mDataSource;
-
- sp<IMediaExtractor> mOriginalExtractor;
- sp<DecryptHandle> mDecryptHandle;
- DrmManagerClient* mDrmManagerClient;
-
- DRMExtractor(const DRMExtractor &);
- DRMExtractor &operator=(const DRMExtractor &);
-};
-
-bool SniffDRM(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // DRM_EXTRACTOR_H_
-
diff --git a/media/libstagefright/include/DataConverter.h b/media/libstagefright/include/DataConverter.h
index 8d67921..60ebad1 100644
--- a/media/libstagefright/include/DataConverter.h
+++ b/media/libstagefright/include/DataConverter.h
@@ -24,18 +24,18 @@
namespace android {
-struct ABuffer;
+class MediaCodecBuffer;
// DataConverter base class, defaults to memcpy
struct DataConverter : public RefBase {
virtual size_t sourceSize(size_t targetSize); // will clamp to SIZE_MAX
virtual size_t targetSize(size_t sourceSize); // will clamp to SIZE_MAX
- status_t convert(const sp<ABuffer> &source, sp<ABuffer> &target);
+ status_t convert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target);
virtual ~DataConverter();
protected:
- virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target);
+ virtual status_t safeConvert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target);
};
// SampleConverterBase uses a ratio to calculate the source and target sizes
@@ -45,7 +45,7 @@
virtual size_t targetSize(size_t sourceSize);
protected:
- virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) = 0;
+ virtual status_t safeConvert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target) = 0;
// sourceSize = sourceSampleSize / targetSampleSize * targetSize
SampleConverterBase(uint32_t sourceSampleSize, uint32_t targetSampleSize)
@@ -61,7 +61,7 @@
static AudioConverter *Create(AudioEncoding source, AudioEncoding target);
protected:
- virtual status_t safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt);
+ virtual status_t safeConvert(const sp<MediaCodecBuffer> &source, sp<MediaCodecBuffer> &target);
private:
AudioConverter(
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index 93e9a4b..2a75298 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -45,6 +45,8 @@
virtual sp<MetaData> getMetaData();
+ virtual status_t setMediaCas(const sp<ICas> &cas) override;
+
virtual uint32_t flags() const;
virtual const char * name() { return "MPEG2TSExtractor"; }
@@ -70,7 +72,10 @@
off64_t mOffset;
+ static bool isScrambledFormat(const sp<MetaData> &format);
+
void init();
+ void addSource(const sp<AnotherPacketSource> &impl);
// Try to feed more data from source to parser.
// |isInit| means this function is called inside init(). This is a signal to
// save SyncEvent so that init() can add SyncPoint after it updates |mSourceImpls|.
@@ -89,6 +94,8 @@
// Add a SynPoint derived from |event|.
void addSyncPoint_l(const ATSParser::SyncEvent &event);
+ status_t estimateDurationsFromTimesUsAtEnd();
+
DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
};
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 89ad137..f847119 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -66,6 +66,8 @@
protected:
virtual ~MPEG4Extractor();
+ virtual void populateMetrics();
+
private:
struct PsshInfo {
@@ -140,6 +142,9 @@
Track *findTrackByMimePrefix(const char *mimePrefix);
+ status_t parseAC3SampleEntry(off64_t offset);
+ status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+
MPEG4Extractor(const MPEG4Extractor &);
MPEG4Extractor &operator=(const MPEG4Extractor &);
};
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 6c073f0..4af3d39 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -20,6 +20,8 @@
#include <media/IOMX.h>
#include <utils/threads.h>
#include <utils/KeyedVector.h>
+#include <media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h>
+#include "OmxNodeOwner.h"
namespace android {
@@ -27,162 +29,34 @@
struct OMXNodeInstance;
class OMX : public BnOMX,
+ public OmxNodeOwner,
public IBinder::DeathRecipient {
public:
OMX();
- virtual bool livesLocally(node_id node, pid_t pid);
-
virtual status_t listNodes(List<ComponentInfo> *list);
virtual status_t allocateNode(
const char *name, const sp<IOMXObserver> &observer,
- sp<IBinder> *nodeBinder,
- node_id *node);
-
- virtual status_t freeNode(node_id node);
-
- virtual status_t sendCommand(
- node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param);
-
- virtual status_t getParameter(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size);
-
- virtual status_t setParameter(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size);
-
- virtual status_t getConfig(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size);
-
- virtual status_t setConfig(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size);
-
- virtual status_t getState(
- node_id node, OMX_STATETYPE* state);
-
- virtual status_t enableNativeBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable);
-
- virtual status_t getGraphicBufferUsage(
- node_id node, OMX_U32 port_index, OMX_U32* usage);
-
- virtual status_t storeMetaDataInBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type);
-
- virtual status_t prepareForAdaptivePlayback(
- node_id node, OMX_U32 portIndex, OMX_BOOL enable,
- OMX_U32 max_frame_width, OMX_U32 max_frame_height);
-
- virtual status_t configureVideoTunnelMode(
- node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
- OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
-
- virtual status_t useBuffer(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize);
-
- virtual status_t useGraphicBuffer(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
-
- virtual status_t updateGraphicBufferInMeta(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
-
- virtual status_t updateNativeHandleInMeta(
- node_id node, OMX_U32 port_index,
- const sp<NativeHandle> &nativeHandle, buffer_id buffer);
+ sp<IOMXNode> *omxNode);
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer,
- MetadataBufferType *type);
-
- virtual status_t createPersistentInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferConsumer> *bufferConsumer);
-
- virtual status_t setInputSurface(
- node_id node, OMX_U32 port_index,
- const sp<IGraphicBufferConsumer> &bufferConsumer,
- MetadataBufferType *type);
-
- virtual status_t signalEndOfInputStream(node_id node);
-
- virtual status_t allocateSecureBuffer(
- node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle);
-
- virtual status_t allocateBufferWithBackup(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize);
-
- virtual status_t freeBuffer(
- node_id node, OMX_U32 port_index, buffer_id buffer);
-
- virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd);
-
- virtual status_t emptyBuffer(
- node_id node,
- buffer_id buffer,
- OMX_U32 range_offset, OMX_U32 range_length,
- OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
-
- virtual status_t getExtensionIndex(
- node_id node,
- const char *parameter_name,
- OMX_INDEXTYPE *index);
-
- virtual status_t setInternalOption(
- node_id node,
- OMX_U32 port_index,
- InternalOptionType type,
- const void *data,
- size_t size);
+ sp<IGraphicBufferSource> *bufferSource);
virtual void binderDied(const wp<IBinder> &the_late_who);
- virtual bool isSecure(IOMX::node_id node);
-
- OMX_ERRORTYPE OnEvent(
- node_id node,
- OMX_IN OMX_EVENTTYPE eEvent,
- OMX_IN OMX_U32 nData1,
- OMX_IN OMX_U32 nData2,
- OMX_IN OMX_PTR pEventData);
-
- OMX_ERRORTYPE OnEmptyBufferDone(
- node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd);
-
- OMX_ERRORTYPE OnFillBufferDone(
- node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd);
-
- void invalidateNodeID(node_id node);
+ virtual status_t freeNode(const sp<OMXNodeInstance>& instance);
protected:
virtual ~OMX();
private:
- struct CallbackDispatcherThread;
- struct CallbackDispatcher;
-
Mutex mLock;
OMXMaster *mMaster;
- size_t mNodeCounter;
+ MediaCodecsXmlParser mParser;
- KeyedVector<wp<IBinder>, OMXNodeInstance *> mLiveNodes;
- KeyedVector<node_id, OMXNodeInstance *> mNodeIDToInstance;
- KeyedVector<node_id, sp<CallbackDispatcher> > mDispatchers;
-
- node_id makeNodeID_l(OMXNodeInstance *instance);
- OMXNodeInstance *findInstance(node_id node);
- sp<CallbackDispatcher> findDispatcher(node_id node);
-
- void invalidateNodeID_l(node_id node);
+ KeyedVector<wp<IBinder>, sp<OMXNodeInstance> > mLiveNodes;
OMX(const OMX &);
OMX &operator=(const OMX &);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 6411267..8e08d15 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -18,29 +18,35 @@
#define OMX_NODE_INSTANCE_H_
-#include "OMX.h"
+#include <atomic>
+#include <media/IOMX.h>
#include <utils/RefBase.h>
-#include <utils/SortedVector.h>
#include <utils/threads.h>
+#include <utils/KeyedVector.h>
+#include <utils/SortedVector.h>
+#include "OmxNodeOwner.h"
+
+#include <android/hidl/memory/1.0/IMemory.h>
namespace android {
-
+class GraphicBuffer;
+class IOMXBufferSource;
class IOMXObserver;
struct OMXMaster;
-class GraphicBufferSource;
+class OMXBuffer;
+typedef hidl::memory::V1_0::IMemory IHidlMemory;
-struct OMXNodeInstance {
+struct OMXNodeInstance : public BnOMXNode {
OMXNodeInstance(
- OMX *owner, const sp<IOMXObserver> &observer, const char *name);
+ OmxNodeOwner *owner, const sp<IOMXObserver> &observer, const char *name);
- void setHandle(OMX::node_id node_id, OMX_HANDLETYPE handle);
+ void setHandle(OMX_HANDLETYPE handle);
- OMX *owner();
+ OMX_HANDLETYPE handle();
sp<IOMXObserver> observer();
- OMX::node_id nodeID();
- status_t freeNode(OMXMaster *master);
+ status_t freeNode() override;
status_t sendCommand(OMX_COMMANDTYPE cmd, OMX_S32 param);
status_t getParameter(OMX_INDEXTYPE index, void *params, size_t size);
@@ -51,15 +57,10 @@
status_t getConfig(OMX_INDEXTYPE index, void *params, size_t size);
status_t setConfig(OMX_INDEXTYPE index, const void *params, size_t size);
- status_t getState(OMX_STATETYPE* state);
-
- status_t enableNativeBuffers(OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable);
+ status_t setPortMode(OMX_U32 port_index, IOMX::PortMode mode);
status_t getGraphicBufferUsage(OMX_U32 portIndex, OMX_U32* usage);
- status_t storeMetaDataInBuffers(
- OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type);
-
status_t prepareForAdaptivePlayback(
OMX_U32 portIndex, OMX_BOOL enable,
OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight);
@@ -68,113 +69,80 @@
OMX_U32 portIndex, OMX_BOOL tunneled,
OMX_U32 audioHwSync, native_handle_t **sidebandHandle);
- status_t useBuffer(
- OMX_U32 portIndex, const sp<IMemory> ¶ms,
- OMX::buffer_id *buffer, OMX_U32 allottedSize);
-
- status_t useGraphicBuffer(
- OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
- OMX::buffer_id *buffer);
-
- status_t updateGraphicBufferInMeta(
- OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
- OMX::buffer_id buffer);
-
- status_t updateNativeHandleInMeta(
- OMX_U32 portIndex, const sp<NativeHandle> &nativeHandle,
- OMX::buffer_id buffer);
-
- status_t createInputSurface(
- OMX_U32 portIndex, android_dataspace dataSpace,
- sp<IGraphicBufferProducer> *bufferProducer,
- MetadataBufferType *type);
-
- static status_t createPersistentInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferConsumer> *bufferConsumer);
-
status_t setInputSurface(
- OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer,
- MetadataBufferType *type);
-
- status_t signalEndOfInputStream();
-
- void signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
+ const sp<IOMXBufferSource> &bufferSource);
status_t allocateSecureBuffer(
- OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
+ OMX_U32 portIndex, size_t size, IOMX::buffer_id *buffer,
void **buffer_data, sp<NativeHandle> *native_handle);
- status_t allocateBufferWithBackup(
- OMX_U32 portIndex, const sp<IMemory> ¶ms,
- OMX::buffer_id *buffer, OMX_U32 allottedSize);
+ status_t useBuffer(
+ OMX_U32 portIndex, const OMXBuffer &omxBuf, buffer_id *buffer);
- status_t freeBuffer(OMX_U32 portIndex, OMX::buffer_id buffer);
+ status_t freeBuffer(
+ OMX_U32 portIndex, buffer_id buffer);
- status_t fillBuffer(OMX::buffer_id buffer, int fenceFd);
+ status_t fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf, int fenceFd = -1);
status_t emptyBuffer(
- OMX::buffer_id buffer,
- OMX_U32 rangeOffset, OMX_U32 rangeLength,
- OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
-
- status_t emptyGraphicBuffer(
- OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &buffer,
- OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1);
status_t getExtensionIndex(
const char *parameterName, OMX_INDEXTYPE *index);
- status_t setInternalOption(
- OMX_U32 portIndex,
- IOMX::InternalOptionType type,
- const void *data,
- size_t size);
+ status_t setQuirks(OMX_U32 quirks);
bool isSecure() const {
return mIsSecure;
}
+ status_t dispatchMessage(const omx_message &msg) override;
+
// handles messages and removes them from the list
void onMessages(std::list<omx_message> &messages);
- void onMessage(const omx_message &msg);
- void onObserverDied(OMXMaster *master);
- void onGetHandleFailed();
+ void onObserverDied();
void onEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
static OMX_CALLBACKTYPE kCallbacks;
private:
+ struct CallbackDispatcherThread;
+ struct CallbackDispatcher;
+
Mutex mLock;
- OMX *mOwner;
- OMX::node_id mNodeID;
+ OmxNodeOwner *mOwner;
OMX_HANDLETYPE mHandle;
sp<IOMXObserver> mObserver;
- bool mDying;
+ sp<CallbackDispatcher> mDispatcher;
+ std::atomic_bool mDying;
bool mSailed; // configuration is set (no more meta-mode changes)
bool mQueriedProhibitedExtensions;
SortedVector<OMX_INDEXTYPE> mProhibitedExtensions;
bool mIsSecure;
+ uint32_t mQuirks;
- // Lock only covers mGraphicBufferSource. We can't always use mLock
- // because of rare instances where we'd end up locking it recursively.
- Mutex mGraphicBufferSourceLock;
- // Access this through getGraphicBufferSource().
- sp<GraphicBufferSource> mGraphicBufferSource;
-
+ // Lock only covers mOMXBufferSource and mOMXOutputListener. We can't always
+ // use mLock because of rare instances where we'd end up locking it recursively.
+ Mutex mOMXBufferSourceLock;
+ // Access these through getBufferSource().
+ sp<IOMXBufferSource> mOMXBufferSource;
struct ActiveBuffer {
OMX_U32 mPortIndex;
- OMX::buffer_id mID;
+ IOMX::buffer_id mID;
};
Vector<ActiveBuffer> mActiveBuffers;
// for buffer ptr to buffer id translation
Mutex mBufferIDLock;
uint32_t mBufferIDCount;
- KeyedVector<OMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
- KeyedVector<OMX_BUFFERHEADERTYPE *, OMX::buffer_id> mBufferHeaderToBufferID;
+ KeyedVector<IOMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
+ KeyedVector<OMX_BUFFERHEADERTYPE *, IOMX::buffer_id> mBufferHeaderToBufferID;
+ bool mLegacyAdaptiveExperiment;
+ IOMX::PortMode mPortMode[2];
// metadata and secure buffer type tracking
MetadataBufferType mMetadataType[2];
enum SecureBufferType {
@@ -184,6 +152,14 @@
};
SecureBufferType mSecureBufferType[2];
+ // Following are OMX parameters managed by us (instead of the component)
+ // OMX_IndexParamMaxFrameDurationForBitrateControl
+ KeyedVector<int64_t, int64_t> mOriginalTimeUs;
+ bool mRestorePtsFailed;
+ int64_t mMaxTimestampGapUs;
+ int64_t mPrevOriginalTimeUs;
+ int64_t mPrevModifiedTimeUs;
+
// For debug support
char *mName;
int DEBUG;
@@ -198,21 +174,51 @@
~OMXNodeInstance();
- void addActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id);
- void removeActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id);
+ void addActiveBuffer(OMX_U32 portIndex, IOMX::buffer_id id);
+ void removeActiveBuffer(OMX_U32 portIndex, IOMX::buffer_id id);
void freeActiveBuffers();
// For buffer id management
- OMX::buffer_id makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
- OMX_BUFFERHEADERTYPE *findBufferHeader(OMX::buffer_id buffer, OMX_U32 portIndex);
- OMX::buffer_id findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
- void invalidateBufferID(OMX::buffer_id buffer);
+ IOMX::buffer_id makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
+ OMX_BUFFERHEADERTYPE *findBufferHeader(IOMX::buffer_id buffer, OMX_U32 portIndex);
+ IOMX::buffer_id findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader);
+ void invalidateBufferID(IOMX::buffer_id buffer);
bool isProhibitedIndex_l(OMX_INDEXTYPE index);
+ status_t useBuffer_l(
+ OMX_U32 portIndex, const sp<IMemory> ¶ms,
+ const sp<IHidlMemory> &hParams, IOMX::buffer_id *buffer);
+
+ status_t useGraphicBuffer_l(
+ OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+ IOMX::buffer_id *buffer);
+
+ status_t useGraphicBufferWithMetadata_l(
+ OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+ IOMX::buffer_id *buffer);
+
status_t useGraphicBuffer2_l(
OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
- OMX::buffer_id *buffer);
+ IOMX::buffer_id *buffer);
+
+ status_t emptyBuffer_l(
+ IOMX::buffer_id buffer,
+ OMX_U32 rangeOffset, OMX_U32 rangeLength,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+
+ status_t emptyGraphicBuffer_l(
+ IOMX::buffer_id buffer, const sp<GraphicBuffer> &graphicBuffer,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+
+ status_t emptyNativeHandleBuffer_l(
+ IOMX::buffer_id buffer, const sp<NativeHandle> &nativeHandle,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+
+ status_t emptyBuffer_l(
+ OMX_BUFFERHEADERTYPE *header,
+ OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr, int fenceFd);
+
static OMX_ERRORTYPE OnEvent(
OMX_IN OMX_HANDLETYPE hComponent,
OMX_IN OMX_PTR pAppData,
@@ -231,6 +237,9 @@
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
+ status_t enableNativeBuffers_l(
+ OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable);
+
status_t storeMetaDataInBuffers_l(
OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type);
@@ -243,10 +252,6 @@
int retrieveFenceFromMeta_l(
OMX_BUFFERHEADERTYPE *header, OMX_U32 portIndex);
- status_t emptyBuffer_l(
- OMX_BUFFERHEADERTYPE *header,
- OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr, int fenceFd);
-
// Updates the graphic buffer handle in the metadata buffer for |buffer| and |header| to
// |graphicBuffer|'s handle. If |updateCodecBuffer| is true, the update will happen in
// the actual codec buffer (use this if not using emptyBuffer (with no _l) later to
@@ -254,18 +259,27 @@
// buffer.)
status_t updateGraphicBufferInMeta_l(
OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
- OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header, bool updateCodecBuffer);
+ IOMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header);
- status_t createGraphicBufferSource(
- OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &consumer /* nullable */,
- MetadataBufferType *type);
- sp<GraphicBufferSource> getGraphicBufferSource();
- void setGraphicBufferSource(const sp<GraphicBufferSource> &bufferSource);
+ status_t updateNativeHandleInMeta_l(
+ OMX_U32 portIndex, const sp<NativeHandle> &nativeHandle,
+ IOMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header);
+
+ sp<IOMXBufferSource> getBufferSource();
+ void setBufferSource(const sp<IOMXBufferSource> &bufferSource);
+ // Called when omx_message::FILL_BUFFER_DONE is received. (Currently the
+ // buffer source will fix timestamp in the header if needed.)
+ void codecBufferFilled(omx_message &msg);
// Handles |msg|, and may modify it. Returns true iff completely handled it and
// |msg| does not need to be sent to the event listener.
bool handleMessage(omx_message &msg);
+ bool handleDataSpaceChanged(omx_message &msg);
+
+ status_t setMaxPtsGapUs(const void *params, size_t size);
+ int64_t getCodecTimestamp(OMX_TICKS timestamp);
+
OMXNodeInstance(const OMXNodeInstance &);
OMXNodeInstance &operator=(const OMXNodeInstance &);
};
diff --git a/media/libstagefright/include/OmxNodeOwner.h b/media/libstagefright/include/OmxNodeOwner.h
new file mode 100644
index 0000000..64ec7f7
--- /dev/null
+++ b/media/libstagefright/include/OmxNodeOwner.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OMX_NODE_OWNER_H_
+
+#define OMX_NODE_OWNER_H_
+
+namespace android {
+
+struct OMXNodeInstance;
+
+/**
+ * This struct is needed to separate OMX from OMXNodeInstance.
+ *
+ * TODO: This might not be needed after Treble transition is complete.
+ */
+struct OmxNodeOwner {
+ virtual status_t freeNode(const sp<OMXNodeInstance> &instance) = 0;
+ virtual ~OmxNodeOwner() {}
+};
+
+}
+
+#endif // OMX_NODE_OWNER_H_
diff --git a/media/libstagefright/include/SecureBuffer.h b/media/libstagefright/include/SecureBuffer.h
new file mode 100644
index 0000000..cf7933a
--- /dev/null
+++ b/media/libstagefright/include/SecureBuffer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SECURE_BUFFER_H_
+
+#define SECURE_BUFFER_H_
+
+#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
+
+namespace android {
+
+class NativeHandle;
+
+/**
+ * Secure MediaCodecBuffer implementation.
+ *
+ * For classes outside of MediaCodec, this buffer is an opaque buffer only with
+ * the size information. For decryption, it exposes underlying handle/pointer
+ * and its type, which can be fed to ICrypto::decrypt().
+ */
+class SecureBuffer : public MediaCodecBuffer {
+public:
+ SecureBuffer(const sp<AMessage> &format, const void *ptr, size_t size);
+ SecureBuffer(const sp<AMessage> &format, const sp<NativeHandle> &handle, size_t size);
+
+ virtual ~SecureBuffer() = default;
+
+ void *getDestinationPointer();
+ ICrypto::DestinationType getDestinationType();
+
+private:
+ SecureBuffer() = delete;
+
+ const void *mPointer;
+ const sp<NativeHandle> mHandle;
+};
+
+} // namespace android
+
+#endif // SECURE_BUFFER_H_
diff --git a/media/libstagefright/include/SharedMemoryBuffer.h b/media/libstagefright/include/SharedMemoryBuffer.h
new file mode 100644
index 0000000..92df68a
--- /dev/null
+++ b/media/libstagefright/include/SharedMemoryBuffer.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SHARED_MEMORY_BUFFER_H_
+
+#define SHARED_MEMORY_BUFFER_H_
+
+#include <media/MediaCodecBuffer.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+
+namespace android {
+
+struct AMessage;
+class IMemory;
+
+/**
+ * MediaCodecBuffer implementation based on IMemory.
+ */
+class SharedMemoryBuffer : public MediaCodecBuffer {
+public:
+ typedef ::android::hidl::memory::V1_0::IMemory TMemory;
+ SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem);
+ SharedMemoryBuffer(const sp<AMessage> &format, const sp<TMemory> &mem);
+
+ virtual ~SharedMemoryBuffer() = default;
+
+private:
+ SharedMemoryBuffer() = delete;
+
+ const sp<IMemory> mMemory;
+ const sp<TMemory> mTMemory;
+};
+
+} // namespace android
+
+#endif // SHARED_MEMORY_BUFFER_H_
diff --git a/media/libstagefright/include/SimpleSoftOMXComponent.h b/media/libstagefright/include/SimpleSoftOMXComponent.h
index 591b38e..1d1f2bd 100644
--- a/media/libstagefright/include/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/include/SimpleSoftOMXComponent.h
@@ -29,6 +29,11 @@
struct ALooper;
+struct CodecProfileLevel {
+ OMX_U32 mProfile;
+ OMX_U32 mLevel;
+};
+
struct SimpleSoftOMXComponent : public SoftOMXComponent {
SimpleSoftOMXComponent(
const char *name,
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
deleted file mode 100644
index 65cb99a..0000000
--- a/media/libstagefright/include/WVMExtractor.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef WVM_EXTRACTOR_H_
-
-#define WVM_EXTRACTOR_H_
-
-#include <media/stagefright/MediaExtractor.h>
-#include <utils/Errors.h>
-
-namespace android {
-
-struct AMessage;
-class String8;
-class DataSource;
-
-class WVMLoadableExtractor : public MediaExtractor {
-public:
- WVMLoadableExtractor() {}
- virtual ~WVMLoadableExtractor() {}
-
- virtual int64_t getCachedDurationUs(status_t *finalStatus) = 0;
- virtual status_t getError() = 0;
- virtual status_t getEstimatedBandwidthKbps(int32_t *kbps) = 0;
- virtual void setAdaptiveStreamingMode(bool adaptive) = 0;
- virtual void setCryptoPluginMode(bool cryptoPluginMode) = 0;
- virtual void setError(status_t err) = 0;
- virtual void setUID(uid_t uid) = 0;
-};
-
-class WVMExtractor : public MediaExtractor {
-public:
- explicit WVMExtractor(const sp<DataSource> &source);
-
- virtual size_t countTracks();
- virtual sp<IMediaSource> getTrack(size_t index);
- virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
- virtual sp<MetaData> getMetaData();
- virtual void setUID(uid_t uid);
-
- // Return the amount of data cached from the current
- // playback positiion (in us).
- // While more data is still being fetched *finalStatus == OK,
- // Once fetching is completed (no more data available), *finalStatus != OK
- // If fetching completed normally (i.e. reached EOS instead of IO error)
- // *finalStatus == ERROR_END_OF_STREAM
- int64_t getCachedDurationUs(status_t *finalStatus);
-
- // Return the current estimated bandwidth
- status_t getEstimatedBandwidthKbps(int32_t *kbps);
-
- // Set to use adaptive streaming mode by the WV component.
- // If adaptive == true, adaptive streaming mode will be used.
- // Default mode is non-adaptive streaming mode.
- // Should set to use adaptive streaming mode only if widevine:// protocol
- // is used.
- void setAdaptiveStreamingMode(bool adaptive);
-
- // setCryptoPluginMode(true) to select crypto plugin mode.
- // In this mode, the extractor returns encrypted data for use
- // with the MediaCodec model, which handles the decryption in the
- // codec.
- void setCryptoPluginMode(bool cryptoPluginMode);
-
- static bool getVendorLibHandle();
-
- status_t getError();
-
- void setError(status_t err);
-
-protected:
- virtual ~WVMExtractor();
-
-private:
- sp<DataSource> mDataSource;
- sp<WVMLoadableExtractor> mImpl;
-
- WVMExtractor(const WVMExtractor &);
- WVMExtractor &operator=(const WVMExtractor &);
-};
-
-bool SniffWVM(
- const sp<DataSource> &source, String8 *mimeType, float *confidence,
- sp<AMessage> *);
-
-} // namespace android
-
-#endif // DRM_EXTRACTOR_H_
-
diff --git a/media/libstagefright/include/avc_utils.h b/media/libstagefright/include/avc_utils.h
index b2ef360..d05906a 100644
--- a/media/libstagefright/include/avc_utils.h
+++ b/media/libstagefright/include/avc_utils.h
@@ -18,6 +18,7 @@
#define AVC_UTILS_H_
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <utils/Errors.h>
@@ -84,6 +85,7 @@
sp<MetaData> MakeAVCCodecSpecificData(const sp<ABuffer> &accessUnit);
bool IsIDR(const sp<ABuffer> &accessUnit);
+bool IsIDR(const sp<MediaCodecBuffer> &accessUnit);
bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit);
uint32_t FindAVCLayerId(const uint8_t *data, size_t size);
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 3379d1a..22b8657 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -29,23 +29,42 @@
#include <media/stagefright/SkipCutBuffer.h>
#include <utils/NativeHandle.h>
#include <OMX_Audio.h>
+#include <hardware/gralloc.h>
#define TRACK_BUFFER_TIMING 0
namespace android {
struct ABuffer;
+class ACodecBufferChannel;
+class MediaCodecBuffer;
class MemoryDealer;
struct DescribeColorFormat2Params;
struct DataConverter;
+// Treble shared memory
+namespace hidl {
+namespace allocator {
+namespace V1_0 {
+struct IAllocator;
+} // V1_0
+} // allocator
+namespace memory {
+namespace V1_0 {
+struct IMemory;
+} // V1_0
+} // memory
+} // hidl
+
+typedef hidl::allocator::V1_0::IAllocator TAllocator;
+typedef hidl::memory::V1_0::IMemory TMemory;
+
struct ACodec : public AHierarchicalStateMachine, public CodecBase {
ACodec();
- virtual void setNotificationMessage(const sp<AMessage> &msg);
-
void initiateSetup(const sp<AMessage> &msg);
+ virtual std::shared_ptr<BufferChannelBase> getBufferChannel() override;
virtual void initiateAllocateComponent(const sp<AMessage> &msg);
virtual void initiateConfigureComponent(const sp<AMessage> &msg);
virtual void initiateCreateInputSurface();
@@ -71,29 +90,6 @@
handleMessage(msg);
}
- struct PortDescription : public CodecBase::PortDescription {
- size_t countBuffers();
- IOMX::buffer_id bufferIDAt(size_t index) const;
- sp<ABuffer> bufferAt(size_t index) const;
- sp<NativeHandle> handleAt(size_t index) const;
- sp<RefBase> memRefAt(size_t index) const;
-
- private:
- friend struct ACodec;
-
- Vector<IOMX::buffer_id> mBufferIDs;
- Vector<sp<ABuffer> > mBuffers;
- Vector<sp<NativeHandle> > mHandles;
- Vector<sp<RefBase> > mMemRefs;
-
- PortDescription();
- void addBuffer(
- IOMX::buffer_id id, const sp<ABuffer> &buffer,
- const sp<NativeHandle> &handle, const sp<RefBase> &memRef);
-
- DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
- };
-
// Returns 0 if configuration is not supported. NOTE: this is treated by
// some OMX components as auto level, and by others as invalid level.
static int /* OMX_VIDEO_AVCLEVELTYPE */ getAVCLevelFor(
@@ -108,6 +104,11 @@
static status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]);
+ // Save the flag.
+ void setTrebleFlag(bool trebleFlag);
+ // Return the saved flag.
+ bool getTrebleFlag() const;
+
protected:
virtual ~ACodec();
@@ -149,6 +150,7 @@
kWhatSubmitOutputMetadataBufferIfEOS = 'subm',
kWhatOMXDied = 'OMXd',
kWhatReleaseCodecInstance = 'relC',
+ kWhatForceStateTransition = 'fstt',
};
enum {
@@ -186,13 +188,14 @@
Status mStatus;
unsigned mDequeuedAt;
- sp<ABuffer> mData; // the client's buffer; if not using data conversion, this is the
- // codec buffer; otherwise, it is allocated separately
- sp<RefBase> mMemRef; // and a reference to the IMemory, so it does not go away
- sp<ABuffer> mCodecData; // the codec's buffer
- sp<RefBase> mCodecRef; // and a reference to the IMemory
+ sp<MediaCodecBuffer> mData; // the client's buffer; if not using data conversion, this is
+ // the codec buffer; otherwise, it is allocated separately
+ sp<RefBase> mMemRef; // and a reference to the IMemory, so it does not go away
+ sp<MediaCodecBuffer> mCodecData; // the codec's buffer
+ sp<RefBase> mCodecRef; // and a reference to the IMemory
+
sp<GraphicBuffer> mGraphicBuffer;
- sp<NativeHandle> mNativeHandle;
+ bool mNewGraphicBuffer;
int mFenceFd;
FrameRenderTracker::Info *mRenderInfo;
@@ -222,8 +225,6 @@
KeyedVector<int64_t, BufferStats> mBufferStats;
#endif
- sp<AMessage> mNotify;
-
sp<UninitializedState> mUninitializedState;
sp<LoadedState> mLoadedState;
sp<LoadedToIdleState> mLoadedToIdleState;
@@ -238,10 +239,11 @@
AString mComponentName;
uint32_t mFlags;
- uint32_t mQuirks;
sp<IOMX> mOMX;
- sp<IBinder> mNodeBinder;
- IOMX::node_id mNode;
+ sp<IOMXNode> mOMXNode;
+ int32_t mNodeGeneration;
+ bool mTrebleFlag;
+ sp<TAllocator> mAllocator[2];
sp<MemoryDealer> mDealer[2];
bool mUsingNativeWindow;
@@ -283,33 +285,42 @@
bool mChannelMaskPresent;
int32_t mChannelMask;
unsigned mDequeueCounter;
- MetadataBufferType mInputMetadataType;
- MetadataBufferType mOutputMetadataType;
- bool mLegacyAdaptiveExperiment;
+ IOMX::PortMode mPortMode[2];
int32_t mMetadataBuffersToSubmit;
size_t mNumUndequeuedBuffers;
sp<DataConverter> mConverter[2];
+ sp<IGraphicBufferSource> mGraphicBufferSource;
int64_t mRepeatFrameDelayUs;
int64_t mMaxPtsGapUs;
float mMaxFps;
-
- int64_t mTimePerFrameUs;
- int64_t mTimePerCaptureUs;
-
+ double mFps;
+ double mCaptureFps;
bool mCreateInputBuffersSuspended;
+ uint32_t mLatency;
bool mTunneled;
OMX_INDEXTYPE mDescribeColorAspectsIndex;
OMX_INDEXTYPE mDescribeHDRStaticInfoIndex;
+ std::shared_ptr<ACodecBufferChannel> mBufferChannel;
+
+ int32_t mStateGeneration;
+
+ enum {
+ kExtensionsUnchecked,
+ kExtensionsNone,
+ kExtensionsExist,
+ } mVendorExtensionsStatus;
+
status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode);
status_t allocateBuffersOnPort(OMX_U32 portIndex);
status_t freeBuffersOnPort(OMX_U32 portIndex);
status_t freeBuffer(OMX_U32 portIndex, size_t i);
status_t handleSetSurface(const sp<Surface> &surface);
+ status_t setPortMode(int32_t portIndex, IOMX::PortMode mode);
status_t setupNativeWindowSizeFormatAndUsage(
ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */,
bool reconnect);
@@ -326,17 +337,19 @@
BufferInfo *dequeueBufferFromNativeWindow();
inline bool storingMetadataInDecodedBuffers() {
- return mOutputMetadataType >= 0 && !mIsEncoder;
+ return (mPortMode[kPortIndexOutput] == IOMX::kPortModeDynamicANWBuffer) && !mIsEncoder;
}
- inline bool usingMetadataOnEncoderOutput() {
- return mOutputMetadataType >= 0 && mIsEncoder;
+ inline bool usingSecureBufferOnEncoderOutput() {
+ return (mPortMode[kPortIndexOutput] == IOMX::kPortModePresetSecureBuffer) && mIsEncoder;
}
BufferInfo *findBufferByID(
uint32_t portIndex, IOMX::buffer_id bufferID,
ssize_t *index = NULL);
+ status_t fillBuffer(BufferInfo *info);
+
status_t setComponentRole(bool isEncoder, const char *mime);
status_t configureCodec(const char *mime, const sp<AMessage> &msg);
@@ -474,6 +487,8 @@
AudioEncoding encoding = kAudioEncodingPcm16bit);
status_t setPriority(int32_t priority);
+ status_t setLatency(uint32_t latency);
+ status_t getLatency(uint32_t *latency);
status_t setOperatingRate(float rateFloat, bool isVideo);
status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
status_t setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure);
@@ -496,11 +511,10 @@
status_t configureBitrate(
int32_t bitrate, OMX_VIDEO_CONTROLRATETYPE bitrateMode);
+ void configureEncoderLatency(const sp<AMessage> &msg);
status_t setupErrorCorrectionParameters();
- status_t initNativeWindow();
-
// Returns true iff all buffers on the given port have status
// OWNED_BY_US or OWNED_BY_NATIVE_WINDOW.
bool allYourBuffersAreBelongToUs(OMX_U32 portIndex);
@@ -548,9 +562,19 @@
status_t requestIDRFrame();
status_t setParameters(const sp<AMessage> ¶ms);
+ // set vendor extension parameters specified in params that are supported by the codec
+ status_t setVendorParameters(const sp<AMessage> ¶ms);
+
+ // get vendor extension parameters supported by the codec for a specific port and add it to
+ // |format|
+ status_t getVendorParameters(OMX_U32 portIndex, sp<AMessage> &format);
+
// Send EOS on input stream.
void onSignalEndOfInputStream();
+ // Force EXEC->IDLE->LOADED shutdown sequence if not stale.
+ void forceStateTransition(int generation);
+
DISALLOW_EVIL_CONSTRUCTORS(ACodec);
};
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 2ec89a4..f20c2cd 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -89,6 +89,8 @@
int64_t mPrevSampleTimeUs;
int64_t mInitialReadTimeUs;
int64_t mNumFramesReceived;
+ int64_t mNumFramesSkipped;
+ int64_t mNumFramesLost;
int64_t mNumClientOwnedBuffers;
List<MediaBuffer * > mBuffersReceived;
diff --git a/media/libstagefright/include/media/stagefright/CameraSource.h b/media/libstagefright/include/media/stagefright/CameraSource.h
index c604f2d..aa56d27 100644
--- a/media/libstagefright/include/media/stagefright/CameraSource.h
+++ b/media/libstagefright/include/media/stagefright/CameraSource.h
@@ -18,6 +18,7 @@
#define CAMERA_SOURCE_H_
+#include <deque>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaSource.h>
#include <camera/android/hardware/ICamera.h>
@@ -141,6 +142,9 @@
const sp<IMemory> &data);
virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
native_handle_t* handle);
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles);
private:
sp<CameraSource> mSource;
@@ -213,6 +217,8 @@
virtual status_t startCameraRecording();
virtual void releaseRecordingFrame(const sp<IMemory>& frame);
virtual void releaseRecordingFrameHandle(native_handle_t* handle);
+ // stagefright recorder not using this for now
+ virtual void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles);
// Returns true if need to skip the current frame.
// Called from dataCallbackTimestamp.
@@ -227,6 +233,10 @@
virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
native_handle_t* handle);
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles);
+
// Process a buffer item received in BufferQueueListener.
virtual void processBufferQueueFrame(BufferItem& buffer);
@@ -271,6 +281,13 @@
KeyedVector<ANativeWindowBuffer*, BufferItem> mReceivedBufferItemMap;
sp<BufferQueueListener> mBufferQueueListener;
+ Mutex mBatchLock; // protecting access to mInflightXXXXX members below
+ // Start of members protected by mBatchLock
+ std::deque<uint32_t> mInflightBatchSizes;
+ std::vector<native_handle_t*> mInflightReturnedHandles;
+ std::vector<const sp<IMemory>> mInflightReturnedMemorys;
+ // End of members protected by mBatchLock
+
void releaseQueuedFrames();
void releaseOneRecordingFrame(const sp<IMemory>& frame);
void createVideoBufferMemoryHeap(size_t size, uint32_t bufferCount);
diff --git a/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h b/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
index 871c1d9..b066f9a 100644
--- a/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
@@ -147,12 +147,23 @@
// In the video camera case calls skipFrameAndModifyTimeStamp() to modify
// timestamp and set mSkipCurrentFrame.
- // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp()
+ // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp() or
+ // CameraSource::recordingFrameHandleCallbackTimestampBatch()
// This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
// the metadata is VideoNativeHandleMetadata.
virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
native_handle_t* handle);
+ // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
+ // timestamp and set mSkipCurrentFrame.
+ // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp() or
+ // CameraSource::recordingFrameHandleCallbackTimestampBatch()
+ // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
+ // the metadata is VideoNativeHandleMetadata.
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles);
+
// Process a buffer item received in CameraSource::BufferQueueListener.
// This will be called in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
virtual void processBufferQueueFrame(BufferItem& buffer);
diff --git a/media/libstagefright/include/media/stagefright/CodecBase.h b/media/libstagefright/include/media/stagefright/CodecBase.h
index e057075..0dd77ba 100644
--- a/media/libstagefright/include/media/stagefright/CodecBase.h
+++ b/media/libstagefright/include/media/stagefright/CodecBase.h
@@ -18,12 +18,16 @@
#define CODEC_BASE_H_
+#include <memory>
+
#include <stdint.h>
#define STRINGIFY_ENUMS
+#include <media/ICrypto.h>
#include <media/IOMX.h>
#include <media/MediaCodecInfo.h>
+#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/hardware/HardwareAPI.h>
@@ -31,35 +35,168 @@
#include <utils/NativeHandle.h>
#include <system/graphics.h>
+#include <android/media/IDescrambler.h>
namespace android {
-
-struct ABuffer;
+using namespace media;
+class BufferChannelBase;
+struct BufferProducerWrapper;
+class MediaCodecBuffer;
struct PersistentSurface;
+struct RenderedFrameInfo;
+class Surface;
struct CodecBase : public AHandler, /* static */ ColorUtils {
- enum {
- kWhatFillThisBuffer = 'fill',
- kWhatDrainThisBuffer = 'drai',
- kWhatEOS = 'eos ',
- kWhatShutdownCompleted = 'scom',
- kWhatFlushCompleted = 'fcom',
- kWhatOutputFormatChanged = 'outC',
- kWhatError = 'erro',
- kWhatComponentAllocated = 'cAll',
- kWhatComponentConfigured = 'cCon',
- kWhatInputSurfaceCreated = 'isfc',
- kWhatInputSurfaceAccepted = 'isfa',
- kWhatSignaledInputEOS = 'seos',
- kWhatBuffersAllocated = 'allc',
- kWhatOutputFramesRendered = 'outR',
+ /**
+ * This interface defines events firing from CodecBase back to MediaCodec.
+ * All methods must not block.
+ */
+ class CodecCallback {
+ public:
+ virtual ~CodecCallback() = default;
+
+ /**
+ * Notify MediaCodec for seeing an output EOS.
+ *
+ * @param err the underlying cause of the EOS. If the value is neither
+ * OK nor ERROR_END_OF_STREAM, the EOS is declared
+ * prematurely for that error.
+ */
+ virtual void onEos(status_t err) = 0;
+ /**
+ * Notify MediaCodec that start operation is complete.
+ */
+ virtual void onStartCompleted() = 0;
+ /**
+ * Notify MediaCodec that stop operation is complete.
+ */
+ virtual void onStopCompleted() = 0;
+ /**
+ * Notify MediaCodec that release operation is complete.
+ */
+ virtual void onReleaseCompleted() = 0;
+ /**
+ * Notify MediaCodec that flush operation is complete.
+ */
+ virtual void onFlushCompleted() = 0;
+ /**
+ * Notify MediaCodec that an error is occurred.
+ *
+ * @param err an error code for the occurred error.
+ * @param actionCode an action code for severity of the error.
+ */
+ virtual void onError(status_t err, enum ActionCode actionCode) = 0;
+ /**
+ * Notify MediaCodec that the underlying component is allocated.
+ *
+ * @param componentName the unique name of the component specified in
+ * MediaCodecList.
+ */
+ virtual void onComponentAllocated(const char *componentName) = 0;
+ /**
+ * Notify MediaCodec that the underlying component is configured.
+ *
+ * @param inputFormat an input format at configure time.
+ * @param outputFormat an output format at configure time.
+ */
+ virtual void onComponentConfigured(
+ const sp<AMessage> &inputFormat, const sp<AMessage> &outputFormat) = 0;
+ /**
+ * Notify MediaCodec that the input surface is created.
+ *
+ * @param inputFormat an input format at surface creation. Formats
+ * could change from the previous state as a result
+ * of creating a surface.
+ * @param outputFormat an output format at surface creation.
+ * @param inputSurface the created surface.
+ */
+ virtual void onInputSurfaceCreated(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat,
+ const sp<BufferProducerWrapper> &inputSurface) = 0;
+ /**
+ * Notify MediaCodec that the input surface creation is failed.
+ *
+ * @param err an error code of the cause.
+ */
+ virtual void onInputSurfaceCreationFailed(status_t err) = 0;
+ /**
+ * Notify MediaCodec that the component accepted the provided input
+ * surface.
+ *
+ * @param inputFormat an input format at surface assignment. Formats
+ * could change from the previous state as a result
+ * of assigning a surface.
+ * @param outputFormat an output format at surface assignment.
+ */
+ virtual void onInputSurfaceAccepted(
+ const sp<AMessage> &inputFormat,
+ const sp<AMessage> &outputFormat) = 0;
+ /**
+ * Notify MediaCodec that the component declined the provided input
+ * surface.
+ *
+ * @param err an error code of the cause.
+ */
+ virtual void onInputSurfaceDeclined(status_t err) = 0;
+ /**
+ * Noitfy MediaCodec that the requested input EOS is sent to the input
+ * surface.
+ *
+ * @param err an error code returned from the surface. If there is no
+ * input surface, the value is INVALID_OPERATION.
+ */
+ virtual void onSignaledInputEOS(status_t err) = 0;
+ /**
+ * Notify MediaCodec that output frames are rendered with information on
+ * those frames.
+ *
+ * @param done a list of rendered frames.
+ */
+ virtual void onOutputFramesRendered(const std::list<RenderedFrameInfo> &done) = 0;
+ /**
+ * Notify MediaCodec that output buffers are changed.
+ */
+ virtual void onOutputBuffersChanged() = 0;
};
+ /**
+ * This interface defines events firing from BufferChannelBase back to MediaCodec.
+ * All methods must not block.
+ */
+ class BufferCallback {
+ public:
+ virtual ~BufferCallback() = default;
+
+ /**
+ * Notify MediaCodec that an input buffer is available with given index.
+ * When BufferChannelBase::getInputBufferArray() is not called,
+ * BufferChannelBase may report different buffers with the same index if
+ * MediaCodec already queued/discarded the buffer. After calling
+ * BufferChannelBase::getInputBufferArray(), the buffer and index match the
+ * returned array.
+ */
+ virtual void onInputBufferAvailable(
+ size_t index, const sp<MediaCodecBuffer> &buffer) = 0;
+ /**
+ * Notify MediaCodec that an output buffer is available with given index.
+ * When BufferChannelBase::getOutputBufferArray() is not called,
+ * BufferChannelBase may report different buffers with the same index if
+ * MediaCodec already queued/discarded the buffer. After calling
+ * BufferChannelBase::getOutputBufferArray(), the buffer and index match the
+ * returned array.
+ */
+ virtual void onOutputBufferAvailable(
+ size_t index, const sp<MediaCodecBuffer> &buffer) = 0;
+ };
enum {
kMaxCodecBufferSize = 8192 * 4096 * 4, // 8K RGBA
};
- virtual void setNotificationMessage(const sp<AMessage> &msg) = 0;
+ inline void setCallback(std::unique_ptr<CodecCallback> &&callback) {
+ mCallback = std::move(callback);
+ }
+ virtual std::shared_ptr<BufferChannelBase> getBufferChannel() = 0;
virtual void initiateAllocateComponent(const sp<AMessage> &msg) = 0;
virtual void initiateConfigureComponent(const sp<AMessage> &msg) = 0;
@@ -85,33 +222,109 @@
virtual void signalSetParameters(const sp<AMessage> &msg) = 0;
virtual void signalEndOfInputStream() = 0;
- struct PortDescription : public RefBase {
- virtual size_t countBuffers() = 0;
- virtual IOMX::buffer_id bufferIDAt(size_t index) const = 0;
- virtual sp<ABuffer> bufferAt(size_t index) const = 0;
- virtual sp<NativeHandle> handleAt(size_t /*index*/) const { return NULL; };
- virtual sp<RefBase> memRefAt(size_t /*index*/) const { return NULL; }
-
- protected:
- PortDescription();
- virtual ~PortDescription();
-
- private:
- DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
- };
-
/*
* Codec-related defines
*/
protected:
- CodecBase();
- virtual ~CodecBase();
+ CodecBase() = default;
+ virtual ~CodecBase() = default;
+
+ std::unique_ptr<CodecCallback> mCallback;
private:
DISALLOW_EVIL_CONSTRUCTORS(CodecBase);
};
+/**
+ * A channel between MediaCodec and CodecBase object which manages buffer
+ * passing. Only MediaCodec is expected to call these methods, and
+ * underlying CodecBase implementation should define its own interface
+ * separately for itself.
+ *
+ * Concurrency assumptions:
+ *
+ * 1) Clients may access the object at multiple threads concurrently.
+ * 2) All methods do not call underlying CodecBase object while holding a lock.
+ * 3) Code inside critical section executes within 1ms.
+ */
+class BufferChannelBase {
+public:
+ virtual ~BufferChannelBase() = default;
+
+ inline void setCallback(std::unique_ptr<CodecBase::BufferCallback> &&callback) {
+ mCallback = std::move(callback);
+ }
+
+ inline void setCrypto(const sp<ICrypto> &crypto) {
+ mCrypto = crypto;
+ }
+
+ inline void setDescrambler(const sp<IDescrambler> &descrambler) {
+ mDescrambler = descrambler;
+ }
+
+ /**
+ * Queue an input buffer into the buffer channel.
+ *
+ * @return OK if successful;
+ * -ENOENT if the buffer is not known (TODO: this should be
+ * handled gracefully in the future, here and below).
+ */
+ virtual status_t queueInputBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+ /**
+ * Queue a secure input buffer into the buffer channel.
+ *
+ * @return OK if successful;
+ * -ENOENT if the buffer is not known;
+ * -ENOSYS if mCrypto is not set so that decryption is not
+ * possible;
+ * other errors if decryption failed.
+ */
+ virtual status_t queueSecureInputBuffer(
+ const sp<MediaCodecBuffer> &buffer,
+ bool secure,
+ const uint8_t *key,
+ const uint8_t *iv,
+ CryptoPlugin::Mode mode,
+ CryptoPlugin::Pattern pattern,
+ const CryptoPlugin::SubSample *subSamples,
+ size_t numSubSamples,
+ AString *errorDetailMsg) = 0;
+ /**
+ * Request buffer rendering at specified time.
+ *
+ * @param timestampNs nanosecond timestamp for rendering time.
+ * @return OK if successful;
+ * -ENOENT if the buffer is not known.
+ */
+ virtual status_t renderOutputBuffer(
+ const sp<MediaCodecBuffer> &buffer, int64_t timestampNs) = 0;
+ /**
+ * Discard a buffer to the underlying CodecBase object.
+ *
+ * TODO: remove once this operation can be handled by just clearing the
+ * reference.
+ *
+ * @return OK if successful;
+ * -ENOENT if the buffer is not known.
+ */
+ virtual status_t discardBuffer(const sp<MediaCodecBuffer> &buffer) = 0;
+ /**
+ * Clear and fill array with input buffers.
+ */
+ virtual void getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) = 0;
+ /**
+ * Clear and fill array with output buffers.
+ */
+ virtual void getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) = 0;
+
+protected:
+ std::unique_ptr<CodecBase::BufferCallback> mCallback;
+ sp<ICrypto> mCrypto;
+ sp<IDescrambler> mDescrambler;
+};
+
} // namespace android
#endif // CODEC_BASE_H_
diff --git a/media/libstagefright/include/media/stagefright/DataSource.h b/media/libstagefright/include/media/stagefright/DataSource.h
index ca21719..63eccea 100644
--- a/media/libstagefright/include/media/stagefright/DataSource.h
+++ b/media/libstagefright/include/media/stagefright/DataSource.h
@@ -44,6 +44,7 @@
kStreamedFromLocalHost = 2,
kIsCachingDataSource = 4,
kIsHTTPBasedSource = 8,
+ kIsLocalFileSource = 16,
};
static sp<DataSource> CreateFromURI(
@@ -55,6 +56,7 @@
static sp<DataSource> CreateMediaHTTP(const sp<IMediaHTTPService> &httpService);
static sp<DataSource> CreateFromIDataSource(const sp<IDataSource> &source);
+ static sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
DataSource() {}
@@ -102,17 +104,6 @@
////////////////////////////////////////////////////////////////////////////
- bool sniff(String8 *mimeType, float *confidence, sp<AMessage> *meta);
-
- // The sniffer can optionally fill in "meta" with an AMessage containing
- // a dictionary of values that helps the corresponding extractor initialize
- // its state without duplicating effort already exerted by the sniffer.
- typedef bool (*SnifferFunc)(
- const sp<DataSource> &source, String8 *mimeType,
- float *confidence, sp<AMessage> *meta);
-
- static void RegisterDefaultSniffers();
-
// for DRM
virtual sp<DecryptHandle> DrmInitialization(const char * /*mime*/ = NULL) {
return NULL;
@@ -127,16 +118,16 @@
virtual void close() {};
+ // creates an IDataSource wrapper to the DataSource.
+ virtual sp<IDataSource> asIDataSource();
+
+ // returns a pointer to IDataSource if it is wrapped.
+ virtual sp<IDataSource> getIDataSource() const;
+
protected:
virtual ~DataSource() {}
private:
- static Mutex gSnifferMutex;
- static List<SnifferFunc> gSniffers;
- static bool gSniffersRegistered;
-
- static void RegisterSniffer_l(SnifferFunc func);
-
DataSource(const DataSource &);
DataSource &operator=(const DataSource &);
};
diff --git a/media/libstagefright/include/media/stagefright/FileSource.h b/media/libstagefright/include/media/stagefright/FileSource.h
index b6349e0..7267e9a 100644
--- a/media/libstagefright/include/media/stagefright/FileSource.h
+++ b/media/libstagefright/include/media/stagefright/FileSource.h
@@ -39,6 +39,10 @@
virtual status_t getSize(off64_t *size);
+ virtual uint32_t flags() {
+ return kIsLocalFileSource;
+ }
+
virtual sp<DecryptHandle> DrmInitialization(const char *mime);
virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
@@ -47,6 +51,8 @@
return mName;
}
+ static bool requiresDrm(int fd, int64_t offset, int64_t length, const char *mime);
+
protected:
virtual ~FileSource();
diff --git a/media/libstagefright/include/media/stagefright/FrameRenderTracker.h b/media/libstagefright/include/media/stagefright/FrameRenderTracker.h
index 327d260..6cbf85d 100644
--- a/media/libstagefright/include/media/stagefright/FrameRenderTracker.h
+++ b/media/libstagefright/include/media/stagefright/FrameRenderTracker.h
@@ -32,58 +32,61 @@
class Fence;
class GraphicBuffer;
+// Tracks the render information about a frame. Frames go through several states while
+// the render information is tracked:
+//
+// 1. queued frame: mMediaTime and mGraphicBuffer are set for the frame. mFence is the
+// queue fence (read fence). mIndex is negative, and mRenderTimeNs is invalid.
+// Key characteristics: mFence is not NULL and mIndex is negative.
+//
+// 2. dequeued frame: mFence is updated with the dequeue fence (write fence). mIndex is set.
+// Key characteristics: mFence is not NULL and mIndex is non-negative. mRenderTime is still
+// invalid.
+//
+// 3. rendered frame or frame: mFence is cleared, mRenderTimeNs is set.
+// Key characteristics: mFence is NULL.
+//
+struct RenderedFrameInfo {
+ // set by client during onFrameQueued or onFrameRendered
+ int64_t getMediaTimeUs() const { return mMediaTimeUs; }
+
+ // -1 if frame is not yet rendered
+ nsecs_t getRenderTimeNs() const { return mRenderTimeNs; }
+
+ // set by client during updateRenderInfoForDequeuedBuffer; -1 otherwise
+ ssize_t getIndex() const { return mIndex; }
+
+ // creates information for a queued frame
+ RenderedFrameInfo(int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer,
+ const sp<Fence> &fence)
+ : mMediaTimeUs(mediaTimeUs),
+ mRenderTimeNs(-1),
+ mIndex(-1),
+ mGraphicBuffer(graphicBuffer),
+ mFence(fence) {
+ }
+
+ // creates information for a frame rendered on a tunneled surface
+ RenderedFrameInfo(int64_t mediaTimeUs, nsecs_t renderTimeNs)
+ : mMediaTimeUs(mediaTimeUs),
+ mRenderTimeNs(renderTimeNs),
+ mIndex(-1),
+ mGraphicBuffer(NULL),
+ mFence(NULL) {
+ }
+
+private:
+ int64_t mMediaTimeUs;
+ nsecs_t mRenderTimeNs;
+ ssize_t mIndex; // to be used by client
+ sp<GraphicBuffer> mGraphicBuffer;
+ sp<Fence> mFence;
+
+ friend struct FrameRenderTracker;
+};
+
struct FrameRenderTracker {
- // Tracks the render information about a frame. Frames go through several states while
- // the render information is tracked:
- //
- // 1. queued frame: mMediaTime and mGraphicBuffer are set for the frame. mFence is the
- // queue fence (read fence). mIndex is negative, and mRenderTimeNs is invalid.
- // Key characteristics: mFence is not NULL and mIndex is negative.
- //
- // 2. dequeued frame: mFence is updated with the dequeue fence (write fence). mIndex is set.
- // Key characteristics: mFence is not NULL and mIndex is non-negative. mRenderTime is still
- // invalid.
- //
- // 3. rendered frame or frame: mFence is cleared, mRenderTimeNs is set.
- // Key characteristics: mFence is NULL.
- //
- struct Info {
- // set by client during onFrameQueued or onFrameRendered
- int64_t getMediaTimeUs() const { return mMediaTimeUs; }
-
- // -1 if frame is not yet rendered
- nsecs_t getRenderTimeNs() const { return mRenderTimeNs; }
-
- // set by client during updateRenderInfoForDequeuedBuffer; -1 otherwise
- ssize_t getIndex() const { return mIndex; }
-
- // creates information for a queued frame
- Info(int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer, const sp<Fence> &fence)
- : mMediaTimeUs(mediaTimeUs),
- mRenderTimeNs(-1),
- mIndex(-1),
- mGraphicBuffer(graphicBuffer),
- mFence(fence) {
- }
-
- // creates information for a frame rendered on a tunneled surface
- Info(int64_t mediaTimeUs, nsecs_t renderTimeNs)
- : mMediaTimeUs(mediaTimeUs),
- mRenderTimeNs(renderTimeNs),
- mIndex(-1),
- mGraphicBuffer(NULL),
- mFence(NULL) {
- }
-
- private:
- int64_t mMediaTimeUs;
- nsecs_t mRenderTimeNs;
- ssize_t mIndex; // to be used by client
- sp<GraphicBuffer> mGraphicBuffer;
- sp<Fence> mFence;
-
- friend struct FrameRenderTracker;
- };
+ typedef RenderedFrameInfo Info;
FrameRenderTracker();
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index a8ba095..1c4827f 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -24,6 +24,8 @@
#include <media/stagefright/MediaWriter.h>
#include <utils/List.h>
#include <utils/threads.h>
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/stagefright/foundation/ALooper.h>
namespace android {
@@ -36,9 +38,8 @@
MPEG4Writer(int fd);
// Limitations
- // 1. No more than 2 tracks can be added
- // 2. Only video or audio source can be added
- // 3. No more than one video and/or one audio source can be added.
+ // No more than one video and/or one audio source can be added, but
+ // multiple metadata sources can be added.
virtual status_t addSource(const sp<IMediaSource> &source);
// Returns INVALID_OPERATION if there is no source or track.
@@ -66,16 +67,25 @@
status_t setGeoData(int latitudex10000, int longitudex10000);
status_t setCaptureRate(float captureFps);
status_t setTemporalLayerCount(uint32_t layerCount);
+ void notifyApproachingLimit();
virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
+ virtual status_t setNextFd(int fd);
protected:
virtual ~MPEG4Writer();
private:
class Track;
+ friend struct AHandlerReflector<MPEG4Writer>;
+
+ enum {
+ kWhatSwitch = 'swch',
+ };
int mFd;
+ int mNextFd;
+ sp<MetaData> mStartMeta;
status_t mInitCheck;
bool mIsRealTimeRecording;
bool mUse4ByteNalLength;
@@ -84,6 +94,7 @@
bool mPaused;
bool mStarted; // Writer thread + track threads started successfully
bool mWriterThreadStarted; // Only writer thread started successfully
+ bool mSendNotify;
off64_t mOffset;
off_t mMdatOffset;
uint8_t *mMoovBoxBuffer;
@@ -100,6 +111,10 @@
int mLongitudex10000;
bool mAreGeoTagsAvailable;
int32_t mStartTimeOffsetMs;
+ bool mSwitchPending;
+
+ sp<ALooper> mLooper;
+ sp<AHandlerReflector<MPEG4Writer> > mReflector;
Mutex mLock;
@@ -185,6 +200,8 @@
void lock();
void unlock();
+ void initInternal(int fd);
+
// Acquire lock before calling these methods
off64_t addSample_l(MediaBuffer *buffer);
off64_t addLengthPrefixedSample_l(MediaBuffer *buffer);
@@ -193,6 +210,7 @@
bool exceedsFileSizeLimit();
bool use32BitFileOffset() const;
bool exceedsFileDurationLimit();
+ bool approachingFileSizeLimit();
bool isFileStreamable() const;
void trackProgressStatus(size_t trackId, int64_t timeUs, status_t err = OK);
void writeCompositionMatrix(int32_t degrees);
@@ -203,6 +221,7 @@
void writeGeoDataBox();
void writeLatitude(int degreex10000);
void writeLongitude(int degreex10000);
+ void finishCurrentSession();
void addDeviceMeta();
void writeHdlr();
@@ -211,10 +230,13 @@
void writeMetaBox();
void sendSessionSummary();
void release();
- status_t reset();
+ status_t switchFd();
+ status_t reset(bool stopSource = true);
static uint32_t getMpeg4Time();
+ void onMessageReceived(const sp<AMessage> &msg);
+
MPEG4Writer(const MPEG4Writer &);
MPEG4Writer &operator=(const MPEG4Writer &);
};
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index d18aad8..4140266 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -18,10 +18,14 @@
#define MEDIA_CODEC_H_
+#include <memory>
+#include <vector>
+
#include <gui/IGraphicBufferProducer.h>
#include <media/hardware/CryptoAPI.h>
#include <media/MediaCodecInfo.h>
#include <media/MediaResource.h>
+#include <media/MediaAnalyticsItem.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/FrameRenderTracker.h>
#include <utils/Vector.h>
@@ -32,16 +36,21 @@
struct AMessage;
struct AReplyToken;
struct AString;
+class BufferChannelBase;
struct CodecBase;
class IBatteryStats;
struct ICrypto;
+class MediaCodecBuffer;
class IMemory;
-class MemoryDealer;
class IResourceManagerClient;
class IResourceManagerService;
struct PersistentSurface;
class SoftwareRenderer;
class Surface;
+namespace media {
+class IDescrambler;
+};
+using namespace media;
struct MediaCodec : public AHandler {
enum ConfigureFlags {
@@ -63,14 +72,15 @@
};
static const pid_t kNoPid = -1;
+ static const uid_t kNoUid = -1;
static sp<MediaCodec> CreateByType(
const sp<ALooper> &looper, const AString &mime, bool encoder, status_t *err = NULL,
- pid_t pid = kNoPid);
+ pid_t pid = kNoPid, uid_t uid = kNoUid);
static sp<MediaCodec> CreateByComponentName(
const sp<ALooper> &looper, const AString &name, status_t *err = NULL,
- pid_t pid = kNoPid);
+ pid_t pid = kNoPid, uid_t uid = kNoUid);
static sp<PersistentSurface> CreatePersistentInputSurface();
@@ -85,6 +95,15 @@
const sp<ICrypto> &crypto,
uint32_t flags);
+ status_t configure(
+ const sp<AMessage> &format,
+ const sp<Surface> &nativeWindow,
+ const sp<ICrypto> &crypto,
+ const sp<IDescrambler> &descrambler,
+ uint32_t flags);
+
+ status_t releaseCrypto();
+
status_t setCallback(const sp<AMessage> &callback);
status_t setOnFrameRenderedNotification(const sp<AMessage> ¬ify);
@@ -149,14 +168,12 @@
status_t getOutputFormat(sp<AMessage> *format) const;
status_t getInputFormat(sp<AMessage> *format) const;
- status_t getWidevineLegacyBuffers(Vector<sp<ABuffer> > *buffers) const;
+ status_t getInputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const;
+ status_t getOutputBuffers(Vector<sp<MediaCodecBuffer> > *buffers) const;
- status_t getInputBuffers(Vector<sp<ABuffer> > *buffers) const;
- status_t getOutputBuffers(Vector<sp<ABuffer> > *buffers) const;
-
- status_t getOutputBuffer(size_t index, sp<ABuffer> *buffer);
+ status_t getOutputBuffer(size_t index, sp<MediaCodecBuffer> *buffer);
status_t getOutputFormat(size_t index, sp<AMessage> *format);
- status_t getInputBuffer(size_t index, sp<ABuffer> *buffer);
+ status_t getInputBuffer(size_t index, sp<MediaCodecBuffer> *buffer);
status_t setSurface(const sp<Surface> &nativeWindow);
@@ -169,6 +186,8 @@
status_t getName(AString *componentName) const;
+ status_t getMetrics(MediaAnalyticsItem * &reply);
+
status_t setParameters(const sp<AMessage> ¶ms);
// Create a MediaCodec notification message from a list of rendered or dropped render infos
@@ -233,6 +252,7 @@
kWhatSetParameters = 'setP',
kWhatSetCallback = 'setC',
kWhatSetNotification = 'setN',
+ kWhatDrmReleaseCrypto = 'rDrm',
};
enum {
@@ -245,21 +265,16 @@
kFlagIsSecure = 64,
kFlagSawMediaServerDie = 128,
kFlagIsEncoder = 256,
- kFlagGatherCodecSpecificData = 512,
+ // 512 skipped
kFlagIsAsync = 1024,
kFlagIsComponentAllocated = 2048,
kFlagPushBlankBuffersOnShutdown = 4096,
};
struct BufferInfo {
- uint32_t mBufferID;
- sp<ABuffer> mData;
- sp<NativeHandle> mNativeHandle;
- sp<RefBase> mMemRef;
- sp<ABuffer> mEncryptedData;
- sp<IMemory> mSharedEncryptedBuffer;
- sp<AMessage> mNotify;
- sp<AMessage> mFormat;
+ BufferInfo();
+
+ sp<MediaCodecBuffer> mData;
bool mOwnedByClient;
};
@@ -288,6 +303,7 @@
};
State mState;
+ uid_t mUid;
bool mReleasedByResourceManager;
sp<ALooper> mLooper;
sp<ALooper> mCodecLooper;
@@ -299,11 +315,12 @@
sp<Surface> mSurface;
SoftwareRenderer *mSoftRenderer;
+ MediaAnalyticsItem *mAnalyticsItem;
+
sp<AMessage> mOutputFormat;
sp<AMessage> mInputFormat;
sp<AMessage> mCallback;
sp<AMessage> mOnFrameRenderedNotification;
- sp<MemoryDealer> mDealer;
sp<IResourceManagerClient> mResourceManagerClient;
sp<ResourceManagerServiceProxy> mResourceManagerService;
@@ -329,7 +346,7 @@
Mutex mBufferLock;
List<size_t> mAvailPortBuffers[2];
- Vector<BufferInfo> mPortBuffers[2];
+ std::vector<BufferInfo> mPortBuffers[2];
int32_t mDequeueInputTimeoutGeneration;
sp<AReplyToken> mDequeueInputReplyID;
@@ -339,6 +356,8 @@
sp<ICrypto> mCrypto;
+ sp<IDescrambler> mDescrambler;
+
List<sp<ABuffer> > mCSD;
sp<AMessage> mActivityNotify;
@@ -346,7 +365,9 @@
bool mHaveInputSurface;
bool mHavePendingInputBuffers;
- MediaCodec(const sp<ALooper> &looper, pid_t pid);
+ std::shared_ptr<BufferChannelBase> mBufferChannel;
+
+ MediaCodec(const sp<ALooper> &looper, pid_t pid, uid_t uid);
static sp<CodecBase> GetCodecBase(const AString &name, bool nameIsType = false);
@@ -367,7 +388,7 @@
status_t getBufferAndFormat(
size_t portIndex, size_t index,
- sp<ABuffer> *buffer, sp<AMessage> *format);
+ sp<MediaCodecBuffer> *buffer, sp<AMessage> *format);
bool handleDequeueInputBuffer(const sp<AReplyToken> &replyID, bool newRequest = false);
bool handleDequeueOutputBuffer(const sp<AReplyToken> &replyID, bool newRequest = false);
@@ -380,6 +401,10 @@
status_t connectToSurface(const sp<Surface> &surface);
status_t disconnectFromSurface();
+ bool hasCryptoOrDescrambler() {
+ return mCrypto != NULL || mDescrambler != NULL;
+ }
+
void postActivityNotificationIfPossible();
void onInputBufferAvailable();
@@ -389,7 +414,7 @@
status_t onSetParameters(const sp<AMessage> ¶ms);
- status_t amendOutputFormatWithCodecSpecificData(const sp<ABuffer> &buffer);
+ status_t amendOutputFormatWithCodecSpecificData(const sp<MediaCodecBuffer> &buffer);
void updateBatteryStat();
bool isExecuting() const;
@@ -411,6 +436,8 @@
mStickyError = err;
}
+ void onReleaseCrypto(const sp<AMessage>& msg);
+
DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
};
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecList.h b/media/libstagefright/include/media/stagefright/MediaCodecList.h
index 44dbde0..430bc16 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecList.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecList.h
@@ -115,7 +115,6 @@
Vector<sp<MediaCodecInfo> > mCodecInfos;
sp<MediaCodecInfo> mCurrentInfo;
- sp<IOMX> mOMX;
MediaCodecList();
~MediaCodecList();
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecSource.h b/media/libstagefright/include/media/stagefright/MediaCodecSource.h
index 18b1955..02ba227 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecSource.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecSource.h
@@ -21,8 +21,7 @@
#include <media/stagefright/foundation/AHandlerReflector.h>
#include <media/stagefright/foundation/Mutexed.h>
#include <media/stagefright/MediaSource.h>
-
-#include <gui/IGraphicBufferConsumer.h>
+#include <media/stagefright/PersistentSurface.h>
namespace android {
@@ -44,7 +43,7 @@
const sp<ALooper> &looper,
const sp<AMessage> &format,
const sp<MediaSource> &source,
- const sp<IGraphicBufferConsumer> &consumer = NULL,
+ const sp<PersistentSurface> &persistentSurface = NULL,
uint32_t flags = 0);
bool isVideo() const { return mIsVideo; }
@@ -55,7 +54,8 @@
// MediaSource
virtual status_t start(MetaData *params = NULL);
virtual status_t stop();
- virtual status_t pause();
+ virtual status_t pause() { return pause(NULL); }
+ virtual status_t pause(MetaData *params);
virtual sp<MetaData> getFormat();
virtual status_t read(
MediaBuffer **buffer,
@@ -67,6 +67,12 @@
// for AHandlerReflector
void onMessageReceived(const sp<AMessage> &msg);
+ // Set GraphicBufferSource stop time. GraphicBufferSource will stop
+ // after receiving a buffer with timestamp larger or equal than stopTimeUs.
+ // All the buffers with timestamp larger or equal to stopTimeUs will be
+ // discarded. stopTimeUs uses SYSTEM_TIME_MONOTONIC time base.
+ status_t setStopStimeUs(int64_t stopTimeUs);
+
protected:
virtual ~MediaCodecSource();
@@ -80,6 +86,7 @@
kWhatStop,
kWhatPause,
kWhatSetInputBufferTimeOffset,
+ kWhatSetStopTimeOffset,
kWhatGetFirstSampleSystemTimeUs,
kWhatStopStalled,
};
@@ -88,17 +95,27 @@
const sp<ALooper> &looper,
const sp<AMessage> &outputFormat,
const sp<MediaSource> &source,
- const sp<IGraphicBufferConsumer> &consumer,
+ const sp<PersistentSurface> &persistentSurface,
uint32_t flags = 0);
status_t onStart(MetaData *params);
- void onPause();
+
+ // Pause the source at pauseStartTimeUs. For non-surface input,
+ // buffers will be dropped immediately. For surface input, buffers
+ // with timestamp smaller than pauseStartTimeUs will still be encoded.
+ // Buffers with timestamp larger or queal to pauseStartTimeUs will be
+ // dropped. pauseStartTimeUs uses SYSTEM_TIME_MONOTONIC time base.
+ void onPause(int64_t pauseStartTimeUs);
+
status_t init();
status_t initEncoder();
void releaseEncoder();
status_t feedEncoderInputBuffers();
- void suspend();
- void resume(int64_t skipFramesBeforeUs = -1ll);
+ // Resume GraphicBufferSource at resumeStartTimeUs. Buffers
+ // from GraphicBufferSource with timestamp larger or equal to
+ // resumeStartTimeUs will be encoded. resumeStartTimeUs uses
+ // SYSTEM_TIME_MONOTONIC time base.
+ void resume(int64_t resumeStartTimeUs = -1ll);
void signalEOS(status_t err = ERROR_END_OF_STREAM);
bool reachedEOS();
status_t postSynchronouslyAndReturnError(const sp<AMessage> &msg);
@@ -121,7 +138,7 @@
int32_t mEncoderDataSpace;
sp<AMessage> mEncoderActivityNotify;
sp<IGraphicBufferProducer> mGraphicBufferProducer;
- sp<IGraphicBufferConsumer> mGraphicBufferConsumer;
+ sp<PersistentSurface> mPersistentSurface;
List<MediaBuffer *> mInputBufferQueue;
List<size_t> mAvailEncoderInputIndices;
List<int64_t> mDecodingTimeQueue; // decoding time (us) for video
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractor.h b/media/libstagefright/include/media/stagefright/MediaExtractor.h
index f383e72..a856b2b 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractor.h
@@ -20,9 +20,13 @@
#include <media/IMediaExtractor.h>
#include <media/IMediaSource.h>
+#include <media/MediaAnalyticsItem.h>
namespace android {
-
+namespace media {
+class ICas;
+};
+using namespace media;
class DataSource;
struct MediaSource;
class MetaData;
@@ -47,6 +51,8 @@
// returns an empty metadata object.
virtual sp<MetaData> getMetaData();
+ status_t getMetrics(Parcel *reply);
+
enum Flags {
CAN_SEEK_BACKWARD = 1, // the "seek 10secs back button"
CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
@@ -59,26 +65,44 @@
virtual uint32_t flags() const;
// for DRM
- void setDrmFlag(bool flag) {
- mIsDrm = flag;
- };
- bool getDrmFlag() {
- return mIsDrm;
- }
virtual char* getDrmTrackInfo(size_t /*trackID*/, int * /*len*/) {
return NULL;
}
virtual void setUID(uid_t /*uid*/) {
}
+ virtual status_t setMediaCas(const sp<ICas>& /*cas*/) override {
+ return INVALID_OPERATION;
+ }
virtual const char * name() { return "<unspecified>"; }
protected:
MediaExtractor();
- virtual ~MediaExtractor() {}
+ virtual ~MediaExtractor();
+
+ MediaAnalyticsItem *mAnalyticsItem;
+
+ virtual void populateMetrics();
private:
- bool mIsDrm;
+
+ typedef bool (*SnifferFunc)(
+ const sp<DataSource> &source, String8 *mimeType,
+ float *confidence, sp<AMessage> *meta);
+
+ static Mutex gSnifferMutex;
+ static List<SnifferFunc> gSniffers;
+ static bool gSniffersRegistered;
+
+ // The sniffer can optionally fill in "meta" with an AMessage containing
+ // a dictionary of values that helps the corresponding extractor initialize
+ // its state without duplicating effort already exerted by the sniffer.
+ static void RegisterSniffer_l(SnifferFunc func);
+
+ static bool sniff(const sp<DataSource> &source,
+ String8 *mimeType, float *confidence, sp<AMessage> *meta);
+
+ static void RegisterDefaultSniffers();
MediaExtractor(const MediaExtractor &);
MediaExtractor &operator=(const MediaExtractor &);
diff --git a/media/libstagefright/include/media/stagefright/MediaFilter.h b/media/libstagefright/include/media/stagefright/MediaFilter.h
index 5725f88..0c10d11 100644
--- a/media/libstagefright/include/media/stagefright/MediaFilter.h
+++ b/media/libstagefright/include/media/stagefright/MediaFilter.h
@@ -21,7 +21,7 @@
namespace android {
-struct ABuffer;
+class ACodecBufferChannel;
struct GraphicBufferListener;
class MemoryDealer;
struct SimpleFilter;
@@ -29,8 +29,7 @@
struct MediaFilter : public CodecBase {
MediaFilter();
- virtual void setNotificationMessage(const sp<AMessage> &msg);
-
+ virtual std::shared_ptr<BufferChannelBase> getBufferChannel() override;
virtual void initiateAllocateComponent(const sp<AMessage> &msg);
virtual void initiateConfigureComponent(const sp<AMessage> &msg);
virtual void initiateCreateInputSurface();
@@ -48,25 +47,6 @@
virtual void onMessageReceived(const sp<AMessage> &msg);
- struct PortDescription : public CodecBase::PortDescription {
- virtual size_t countBuffers();
- virtual IOMX::buffer_id bufferIDAt(size_t index) const;
- virtual sp<ABuffer> bufferAt(size_t index) const;
-
- protected:
- PortDescription();
-
- private:
- friend struct MediaFilter;
-
- Vector<IOMX::buffer_id> mBufferIDs;
- Vector<sp<ABuffer> > mBuffers;
-
- void addBuffer(IOMX::buffer_id id, const sp<ABuffer> &buffer);
-
- DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
- };
-
protected:
virtual ~MediaFilter();
@@ -82,7 +62,7 @@
int32_t mOutputFlags;
Status mStatus;
- sp<ABuffer> mData;
+ sp<MediaCodecBuffer> mData;
};
enum State {
@@ -121,7 +101,6 @@
int32_t mColorFormatIn, mColorFormatOut;
size_t mMaxInputSize, mMaxOutputSize;
int32_t mGeneration;
- sp<AMessage> mNotify;
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
@@ -134,6 +113,8 @@
sp<SimpleFilter> mFilter;
sp<GraphicBufferListener> mGraphicBufferListener;
+ std::shared_ptr<ACodecBufferChannel> mBufferChannel;
+
// helper functions
void signalProcessBuffers();
void signalError(status_t error);
@@ -145,7 +126,6 @@
void postFillThisBuffer(BufferInfo *info);
void postDrainThisBuffer(BufferInfo *info);
void postEOS();
- void sendFormatChange();
void requestFillEmptyInput();
void processBuffers();
diff --git a/media/libstagefright/include/media/stagefright/MediaMuxer.h b/media/libstagefright/include/media/stagefright/MediaMuxer.h
index fa855a8..63c3ca5 100644
--- a/media/libstagefright/include/media/stagefright/MediaMuxer.h
+++ b/media/libstagefright/include/media/stagefright/MediaMuxer.h
@@ -45,8 +45,9 @@
// Please update media/java/android/media/MediaMuxer.java if the
// OutputFormat is updated.
enum OutputFormat {
- OUTPUT_FORMAT_MPEG_4 = 0,
- OUTPUT_FORMAT_WEBM = 1,
+ OUTPUT_FORMAT_MPEG_4 = 0,
+ OUTPUT_FORMAT_WEBM = 1,
+ OUTPUT_FORMAT_THREE_GPP = 2,
OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
};
diff --git a/media/libstagefright/include/media/stagefright/MediaWriter.h b/media/libstagefright/include/media/stagefright/MediaWriter.h
index 2b19523..cd4af4d 100644
--- a/media/libstagefright/include/media/stagefright/MediaWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaWriter.h
@@ -50,6 +50,7 @@
virtual void setStartTimeOffsetMs(int /*ms*/) {}
virtual int32_t getStartTimeOffsetMs() const { return 0; }
+ virtual status_t setNextFd(int /*fd*/) { return INVALID_OPERATION; }
protected:
virtual ~MediaWriter() {}
diff --git a/media/libstagefright/include/media/stagefright/MetaData.h b/media/libstagefright/include/media/stagefright/MetaData.h
index 6ba7b32..7afd22d 100644
--- a/media/libstagefright/include/media/stagefright/MetaData.h
+++ b/media/libstagefright/include/media/stagefright/MetaData.h
@@ -176,6 +176,8 @@
kKeyCryptoDefaultIVSize = 'cryS', // int32_t
kKeyPssh = 'pssh', // raw data
+ kKeyCASystemID = 'caid', // int32_t
+ kKeyCASessionID = 'seid', // raw data
// Please see MediaFormat.KEY_IS_AUTOSELECT.
kKeyTrackIsAutoselect = 'auto', // bool (int32_t)
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index dd31447..3e3cc17 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -28,6 +28,10 @@
#include <utils/Vector.h>
namespace android {
+namespace media {
+class ICas;
+}
+using namespace media;
struct ABuffer;
struct AMessage;
@@ -60,6 +64,8 @@
status_t setDataSource(const sp<DataSource> &datasource);
+ status_t setMediaCas(const sp<ICas> &cas);
+
size_t countTracks() const;
status_t getTrackFormat(size_t index, sp<AMessage> *format, uint32_t flags = 0) const;
@@ -78,6 +84,7 @@
status_t getSampleTrackIndex(size_t *trackIndex);
status_t getSampleTime(int64_t *sampleTimeUs);
status_t getSampleMeta(sp<MetaData> *sampleMeta);
+ status_t getMetrics(Parcel *reply);
bool getCachedDuration(int64_t *durationUs, bool *eos) const;
@@ -108,7 +115,7 @@
sp<DataSource> mDataSource;
sp<IMediaExtractor> mImpl;
- bool mIsWidevineExtractor;
+ sp<ICas> mCas;
Vector<TrackInfo> mSelectedTracks;
int64_t mTotalBitrate; // in bits/sec
diff --git a/media/libstagefright/include/media/stagefright/OMXClient.h b/media/libstagefright/include/media/stagefright/OMXClient.h
index 2f14d06..203a181 100644
--- a/media/libstagefright/include/media/stagefright/OMXClient.h
+++ b/media/libstagefright/include/media/stagefright/OMXClient.h
@@ -18,15 +18,19 @@
#define OMX_CLIENT_H_
-#include <media/IOMX.h>
-
namespace android {
+class IOMX;
+
class OMXClient {
public:
OMXClient();
status_t connect();
+ status_t connect(bool* trebleFlag);
+
+ status_t connectLegacy();
+ status_t connectTreble();
void disconnect();
sp<IOMX> interface() {
diff --git a/media/libstagefright/include/media/stagefright/PersistentSurface.h b/media/libstagefright/include/media/stagefright/PersistentSurface.h
index a35b9f1..d8b75a2 100644
--- a/media/libstagefright/include/media/stagefright/PersistentSurface.h
+++ b/media/libstagefright/include/media/stagefright/PersistentSurface.h
@@ -19,29 +19,46 @@
#define PERSISTENT_SURFACE_H_
#include <gui/IGraphicBufferProducer.h>
-#include <gui/IGraphicBufferConsumer.h>
+#include <android/IGraphicBufferSource.h>
#include <media/stagefright/foundation/ABase.h>
+#include <binder/Parcel.h>
namespace android {
struct PersistentSurface : public RefBase {
+ PersistentSurface() {}
+
PersistentSurface(
const sp<IGraphicBufferProducer>& bufferProducer,
- const sp<IGraphicBufferConsumer>& bufferConsumer) :
+ const sp<IGraphicBufferSource>& bufferSource) :
mBufferProducer(bufferProducer),
- mBufferConsumer(bufferConsumer) { }
+ mBufferSource(bufferSource) { }
sp<IGraphicBufferProducer> getBufferProducer() const {
return mBufferProducer;
}
- sp<IGraphicBufferConsumer> getBufferConsumer() const {
- return mBufferConsumer;
+ sp<IGraphicBufferSource> getBufferSource() const {
+ return mBufferSource;
+ }
+
+ status_t writeToParcel(Parcel *parcel) const {
+ parcel->writeStrongBinder(IInterface::asBinder(mBufferProducer));
+ parcel->writeStrongBinder(IInterface::asBinder(mBufferSource));
+ return NO_ERROR;
+ }
+
+ status_t readFromParcel(const Parcel *parcel) {
+ mBufferProducer = interface_cast<IGraphicBufferProducer>(
+ parcel->readStrongBinder());
+ mBufferSource = interface_cast<IGraphicBufferSource>(
+ parcel->readStrongBinder());
+ return NO_ERROR;
}
private:
- const sp<IGraphicBufferProducer> mBufferProducer;
- const sp<IGraphicBufferConsumer> mBufferConsumer;
+ sp<IGraphicBufferProducer> mBufferProducer;
+ sp<IGraphicBufferSource> mBufferSource;
DISALLOW_EVIL_CONSTRUCTORS(PersistentSurface);
};
diff --git a/media/libstagefright/include/media/stagefright/RemoteDataSource.h b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
new file mode 100644
index 0000000..c91ddfc
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef REMOTE_DATA_SOURCE_H_
+#define REMOTE_DATA_SOURCE_H_
+
+#include <binder/IMemory.h>
+#include <binder/MemoryDealer.h>
+#include <media/IDataSource.h>
+#include <media/stagefright/DataSource.h>
+
+namespace android {
+
+// Originally in MediaExtractor.cpp
+class RemoteDataSource : public BnDataSource {
+public:
+ static sp<IDataSource> wrap(const sp<DataSource> &source) {
+ if (source.get() == nullptr) {
+ return nullptr;
+ }
+ if (source->getIDataSource().get() != nullptr) {
+ return source->getIDataSource();
+ }
+ return new RemoteDataSource(source);
+ }
+
+ virtual ~RemoteDataSource() {
+ close();
+ }
+ virtual sp<IMemory> getIMemory() {
+ return mMemory;
+ }
+ virtual ssize_t readAt(off64_t offset, size_t size) {
+ ALOGV("readAt(%lld, %zu)", (long long)offset, size);
+ if (size > kBufferSize) {
+ size = kBufferSize;
+ }
+ return mSource->readAt(offset, mMemory->pointer(), size);
+ }
+ virtual status_t getSize(off64_t *size) {
+ return mSource->getSize(size);
+ }
+ virtual void close() {
+ // Protect strong pointer assignments. This also can be called from the binder
+ // clean-up procedure which is running on a separate thread.
+ Mutex::Autolock lock(mCloseLock);
+ mSource = nullptr;
+ mMemory = nullptr;
+ }
+ virtual uint32_t getFlags() {
+ return mSource->flags();
+ }
+ virtual String8 toString() {
+ return mName;
+ }
+ virtual sp<DecryptHandle> DrmInitialization(const char *mime) {
+ return mSource->DrmInitialization(mime);
+ }
+
+private:
+ enum {
+ kBufferSize = 64 * 1024,
+ };
+
+ sp<IMemory> mMemory;
+ sp<DataSource> mSource;
+ String8 mName;
+ Mutex mCloseLock;
+
+ explicit RemoteDataSource(const sp<DataSource> &source) {
+ mSource = source;
+ sp<MemoryDealer> memoryDealer = new MemoryDealer(kBufferSize, "RemoteDataSource");
+ mMemory = memoryDealer->allocate(kBufferSize);
+ if (mMemory.get() == nullptr) {
+ ALOGE("Failed to allocate memory!");
+ }
+ mName = String8::format("RemoteDataSource(%s)", mSource->toString().string());
+ }
+
+ DISALLOW_EVIL_CONSTRUCTORS(RemoteDataSource);
+};
+
+} // namespace android
+
+#endif // REMOTE_DATA_SOURCE_H_
diff --git a/media/libstagefright/include/media/stagefright/SkipCutBuffer.h b/media/libstagefright/include/media/stagefright/SkipCutBuffer.h
index 61f9949..0fb5690 100644
--- a/media/libstagefright/include/media/stagefright/SkipCutBuffer.h
+++ b/media/libstagefright/include/media/stagefright/SkipCutBuffer.h
@@ -18,6 +18,7 @@
#define SKIP_CUT_BUFFER_H_
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -39,6 +40,7 @@
// After this, the caller should continue processing the buffer as usual.
void submit(MediaBuffer *buffer);
void submit(const sp<ABuffer>& buffer); // same as above, but with an ABuffer
+ void submit(const sp<MediaCodecBuffer>& buffer); // same as above, but with an ABuffer
void clear();
size_t size(); // how many bytes are currently stored in the buffer
@@ -48,6 +50,8 @@
private:
void write(const char *src, size_t num);
size_t read(char *dst, size_t num);
+ template <typename T>
+ void submitInternal(const sp<T>& buffer);
int32_t mSkip;
int32_t mFrontPadding;
int32_t mBackPadding;
diff --git a/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h b/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h
index 2c11e84..d38c337 100644
--- a/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h
+++ b/media/libstagefright/include/media/stagefright/SurfaceMediaSource.h
@@ -32,7 +32,6 @@
namespace android {
// ----------------------------------------------------------------------------
-class IGraphicBufferAlloc;
class String8;
class GraphicBuffer;
diff --git a/media/libstagefright/include/media/stagefright/SurfaceUtils.h b/media/libstagefright/include/media/stagefright/SurfaceUtils.h
index 13d580c..a7747c7 100644
--- a/media/libstagefright/include/media/stagefright/SurfaceUtils.h
+++ b/media/libstagefright/include/media/stagefright/SurfaceUtils.h
@@ -33,6 +33,8 @@
ANativeWindow *nativeWindow /* nonnull */,
int width, int height, int format, int rotation, int usage, bool reconnect);
status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */);
+status_t nativeWindowConnect(ANativeWindow *surface, const char *reason);
+status_t nativeWindowDisconnect(ANativeWindow *surface, const char *reason);
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/Utils.h b/media/libstagefright/include/media/stagefright/Utils.h
index 8eff914..88a416a 100644
--- a/media/libstagefright/include/media/stagefright/Utils.h
+++ b/media/libstagefright/include/media/stagefright/Utils.h
@@ -23,6 +23,7 @@
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include <system/audio.h>
+#include <media/BufferingSettings.h>
#include <media/MediaPlayerInterface.h>
namespace android {
@@ -90,6 +91,9 @@
void readFromAMessage(
const sp<AMessage> &msg, AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering);
+void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */);
+
AString nameForFd(int fd);
} // namespace android
diff --git a/media/libstagefright/matroska/Android.bp b/media/libstagefright/matroska/Android.bp
index bbfd06c..a5891c3 100644
--- a/media/libstagefright/matroska/Android.bp
+++ b/media/libstagefright/matroska/Android.bp
@@ -20,6 +20,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
shared_libs: ["libmedia"],
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 8e82486..bea4ced 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -139,6 +139,7 @@
enum Type {
AVC,
AAC,
+ HEVC,
OTHER
};
@@ -147,7 +148,7 @@
Type mType;
bool mIsAudio;
BlockIterator mBlockIter;
- ssize_t mNALSizeLen; // for type AVC
+ ssize_t mNALSizeLen; // for type AVC or HEVC
List<MediaBuffer *> mPendingFrames;
@@ -243,6 +244,19 @@
} else {
ALOGE("No mNALSizeLen");
}
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ mType = HEVC;
+
+ uint32_t dummy;
+ const uint8_t *hvcc;
+ size_t hvccSize;
+ if (meta->findData(kKeyHVCC, &dummy, (const void **)&hvcc, &hvccSize)
+ && hvccSize >= 22u) {
+ mNALSizeLen = 1 + (hvcc[14+7] & 3);
+ ALOGV("mNALSizeLen = %zu", mNALSizeLen);
+ } else {
+ ALOGE("No mNALSizeLen");
+ }
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
mType = AAC;
}
@@ -377,6 +391,16 @@
*actualFrameTimeUs = -1ll;
+ if (seekTimeUs > INT64_MAX / 1000ll ||
+ seekTimeUs < INT64_MIN / 1000ll ||
+ (mExtractor->mSeekPreRollNs > 0 &&
+ (seekTimeUs * 1000ll) < INT64_MIN + mExtractor->mSeekPreRollNs) ||
+ (mExtractor->mSeekPreRollNs < 0 &&
+ (seekTimeUs * 1000ll) > INT64_MAX + mExtractor->mSeekPreRollNs)) {
+ ALOGE("cannot seek to %lld", (long long) seekTimeUs);
+ return;
+ }
+
const int64_t seekTimeNs = seekTimeUs * 1000ll - mExtractor->mSeekPreRollNs;
mkvparser::Segment* const pSegment = mExtractor->mSegment;
@@ -605,16 +629,27 @@
int64_t timeUs = mBlockIter.blockTimeUs();
for (int i = 0; i < block->GetFrameCount(); ++i) {
+ MatroskaExtractor::TrackInfo *trackInfo = &mExtractor->mTracks.editItemAt(mTrackIndex);
const mkvparser::Block::Frame &frame = block->GetFrame(i);
+ size_t len = frame.len;
+ if (SIZE_MAX - len < trackInfo->mHeaderLen) {
+ return ERROR_MALFORMED;
+ }
- MediaBuffer *mbuf = new MediaBuffer(frame.len);
+ len += trackInfo->mHeaderLen;
+ MediaBuffer *mbuf = new MediaBuffer(len);
+ uint8_t *data = static_cast<uint8_t *>(mbuf->data());
+ if (trackInfo->mHeader) {
+ memcpy(data, trackInfo->mHeader, trackInfo->mHeaderLen);
+ }
+
mbuf->meta_data()->setInt64(kKeyTime, timeUs);
mbuf->meta_data()->setInt32(kKeyIsSyncFrame, block->IsKey());
- status_t err = frame.Read(mExtractor->mReader, static_cast<uint8_t *>(mbuf->data()));
+ status_t err = frame.Read(mExtractor->mReader, data + trackInfo->mHeaderLen);
if (err == OK
&& mExtractor->mIsWebm
- && mExtractor->mTracks.itemAt(mTrackIndex).mEncrypted) {
+ && trackInfo->mEncrypted) {
err = setWebmBlockCryptoInfo(mbuf);
}
@@ -670,7 +705,7 @@
MediaBuffer *frame = *mPendingFrames.begin();
mPendingFrames.erase(mPendingFrames.begin());
- if (mType != AVC || mNALSizeLen == 0) {
+ if ((mType != AVC && mType != HEVC) || mNALSizeLen == 0) {
if (targetSampleTimeUs >= 0ll) {
frame->meta_data()->setInt64(
kKeyTargetTime, targetSampleTimeUs);
@@ -1164,6 +1199,42 @@
}
}
+status_t MatroskaExtractor::initTrackInfo(
+ const mkvparser::Track *track, const sp<MetaData> &meta, TrackInfo *trackInfo) {
+ trackInfo->mTrackNum = track->GetNumber();
+ trackInfo->mMeta = meta;
+ trackInfo->mExtractor = this;
+ trackInfo->mEncrypted = false;
+ trackInfo->mHeader = NULL;
+ trackInfo->mHeaderLen = 0;
+
+ for(size_t i = 0; i < track->GetContentEncodingCount(); i++) {
+ const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
+ for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
+ const mkvparser::ContentEncoding::ContentEncryption *encryption;
+ encryption = encoding->GetEncryptionByIndex(j);
+ trackInfo->mMeta->setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
+ trackInfo->mEncrypted = true;
+ break;
+ }
+
+ for(size_t j = 0; j < encoding->GetCompressionCount(); j++) {
+ const mkvparser::ContentEncoding::ContentCompression *compression;
+ compression = encoding->GetCompressionByIndex(j);
+ ALOGV("compression algo %llu settings_len %lld",
+ compression->algo, compression->settings_len);
+ if (compression->algo == 3
+ && compression->settings
+ && compression->settings_len > 0) {
+ trackInfo->mHeader = compression->settings;
+ trackInfo->mHeaderLen = compression->settings_len;
+ }
+ }
+ }
+
+ return OK;
+}
+
void MatroskaExtractor::addTracks() {
const mkvparser::Tracks *tracks = mSegment->GetTracks();
@@ -1204,6 +1275,14 @@
if (!strcmp("V_MPEG4/ISO/AVC", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
meta->setData(kKeyAVCC, 0, codecPrivate, codecPrivateSize);
+ } else if (!strcmp("V_MPEGH/ISO/HEVC", codecID)) {
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
+ if (codecPrivateSize > 0) {
+ meta->setData(kKeyHVCC, kTypeHVCC, codecPrivate, codecPrivateSize);
+ } else {
+ ALOGW("HEVC is detected, but does not have configuration.");
+ continue;
+ }
} else if (!strcmp("V_MPEG4/ISO/ASP", codecID)) {
if (codecPrivateSize > 0) {
meta->setCString(
@@ -1231,8 +1310,51 @@
continue;
}
- meta->setInt32(kKeyWidth, vtrack->GetWidth());
- meta->setInt32(kKeyHeight, vtrack->GetHeight());
+ const long long width = vtrack->GetWidth();
+ const long long height = vtrack->GetHeight();
+ if (width <= 0 || width > INT32_MAX) {
+ ALOGW("track width exceeds int32_t, %lld", width);
+ continue;
+ }
+ if (height <= 0 || height > INT32_MAX) {
+ ALOGW("track height exceeds int32_t, %lld", height);
+ continue;
+ }
+ meta->setInt32(kKeyWidth, (int32_t)width);
+ meta->setInt32(kKeyHeight, (int32_t)height);
+
+ // setting display width/height is optional
+ const long long displayUnit = vtrack->GetDisplayUnit();
+ const long long displayWidth = vtrack->GetDisplayWidth();
+ const long long displayHeight = vtrack->GetDisplayHeight();
+ if (displayWidth > 0 && displayWidth <= INT32_MAX
+ && displayHeight > 0 && displayHeight <= INT32_MAX) {
+ switch (displayUnit) {
+ case 0: // pixels
+ meta->setInt32(kKeyDisplayWidth, (int32_t)displayWidth);
+ meta->setInt32(kKeyDisplayHeight, (int32_t)displayHeight);
+ break;
+ case 1: // centimeters
+ case 2: // inches
+ case 3: // aspect ratio
+ {
+ // Physical layout size is treated the same as aspect ratio.
+ // Note: displayWidth and displayHeight are never zero as they are
+ // checked in the if above.
+ const long long computedWidth =
+ std::max(width, height * displayWidth / displayHeight);
+ const long long computedHeight =
+ std::max(height, width * displayHeight / displayWidth);
+ if (computedWidth <= INT32_MAX && computedHeight <= INT32_MAX) {
+ meta->setInt32(kKeyDisplayWidth, (int32_t)computedWidth);
+ meta->setInt32(kKeyDisplayHeight, (int32_t)computedHeight);
+ }
+ break;
+ }
+ default: // unknown display units, perhaps future version of spec.
+ break;
+ }
+ }
getColorInformation(vtrack, meta);
@@ -1288,21 +1410,7 @@
mTracks.push();
size_t n = mTracks.size() - 1;
TrackInfo *trackInfo = &mTracks.editItemAt(n);
- trackInfo->mTrackNum = track->GetNumber();
- trackInfo->mMeta = meta;
- trackInfo->mExtractor = this;
-
- trackInfo->mEncrypted = false;
- for(size_t i = 0; i < track->GetContentEncodingCount() && !trackInfo->mEncrypted; i++) {
- const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
- for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
- const mkvparser::ContentEncoding::ContentEncryption *encryption;
- encryption = encoding->GetEncryptionByIndex(j);
- meta->setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
- trackInfo->mEncrypted = true;
- break;
- }
- }
+ initTrackInfo(track, meta, trackInfo);
if (!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) {
// Attempt to recover from AVC track without codec private data
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
index 588bd39c..19775ce 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ b/media/libstagefright/matroska/MatroskaExtractor.h
@@ -63,6 +63,12 @@
const MatroskaExtractor *mExtractor;
Vector<const mkvparser::CuePoint*> mCuePoints;
+ // mHeader points to memory managed by mkvparser;
+ // mHeader would be deleted when mSegment is deleted
+ // in ~MatroskaExtractor.
+ unsigned char *mHeader;
+ size_t mHeaderLen;
+
const mkvparser::Track* getTrack() const;
const mkvparser::CuePoint::TrackPosition *find(long long timeNs) const;
};
@@ -79,6 +85,7 @@
int64_t mSeekPreRollNs;
status_t synthesizeAVCC(TrackInfo *trackInfo, size_t index);
+ status_t initTrackInfo(const mkvparser::Track *track, const sp<MetaData> &meta, TrackInfo *trackInfo);
void addTracks();
void findThumbnails();
void getColorInformation(const mkvparser::VideoTrack *vtrack, sp<MetaData> &meta);
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 844479e..31edb21 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -17,13 +17,14 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ATSParser"
#include <utils/Log.h>
-
#include "ATSParser.h"
-
#include "AnotherPacketSource.h"
+#include "CasManager.h"
#include "ESQueue.h"
#include "include/avc_utils.h"
+#include <android/media/IDescrambler.h>
+#include <binder/MemoryDealer.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -40,6 +41,8 @@
#include <inttypes.h>
namespace android {
+using binder::Status;
+using MediaDescrambler::DescrambleInfo;
// I want the expression "y" evaluated even if verbose logging is off.
#define MY_LOGV(x, y) \
@@ -60,6 +63,8 @@
bool parsePID(
unsigned pid, unsigned continuity_counter,
unsigned payload_unit_start_indicator,
+ unsigned transport_scrambling_control,
+ unsigned random_access_indicator,
ABitReader *br, status_t *err, SyncEvent *event);
void signalDiscontinuity(
@@ -90,10 +95,23 @@
return mParser->mFlags;
}
+ sp<CasManager> casManager() const {
+ return mParser->mCasManager;
+ }
+
+ uint64_t firstPTS() const {
+ return mFirstPTS;
+ }
+
+ void updateCasSessions();
+
+ void signalNewSampleAesKey(const sp<AMessage> &keyItem);
+
private:
struct StreamInfo {
unsigned mType;
unsigned mPID;
+ int32_t mCASystemId;
};
ATSParser *mParser;
@@ -103,9 +121,12 @@
bool mFirstPTSValid;
uint64_t mFirstPTS;
int64_t mLastRecoveredPTS;
+ sp<AMessage> mSampleAesKeyItem;
status_t parseProgramMap(ABitReader *br);
int64_t recoverPTS(uint64_t PTS_33bit);
+ bool findCADescriptor(
+ ABitReader *br, unsigned infoLength, CADescriptor *caDescriptor);
bool switchPIDs(const Vector<StreamInfo> &infos);
DISALLOW_EVIL_CONSTRUCTORS(Program);
@@ -115,18 +136,26 @@
Stream(Program *program,
unsigned elementaryPID,
unsigned streamType,
- unsigned PCR_PID);
+ unsigned PCR_PID,
+ int32_t CA_system_ID);
unsigned type() const { return mStreamType; }
unsigned pid() const { return mElementaryPID; }
void setPID(unsigned pid) { mElementaryPID = pid; }
+ void setCasInfo(
+ int32_t systemId,
+ const sp<IDescrambler> &descrambler,
+ const std::vector<uint8_t> &sessionId);
+
// Parse the payload and set event when PES with a sync frame is detected.
// This method knows when a PES starts; so record mPesStartOffsets in that
// case.
status_t parse(
unsigned continuity_counter,
unsigned payload_unit_start_indicator,
+ unsigned transport_scrambling_control,
+ unsigned random_access_indicator,
ABitReader *br,
SyncEvent *event);
@@ -135,16 +164,24 @@
void signalEOS(status_t finalResult);
+ SourceType getSourceType();
sp<MediaSource> getSource(SourceType type);
bool isAudio() const;
bool isVideo() const;
bool isMeta() const;
+ void signalNewSampleAesKey(const sp<AMessage> &keyItem);
+
protected:
virtual ~Stream();
private:
+ struct SubSampleInfo {
+ size_t subSampleSize;
+ unsigned transport_scrambling_mode;
+ unsigned random_access_indicator;
+ };
Program *mProgram;
unsigned mElementaryPID;
unsigned mStreamType;
@@ -161,10 +198,28 @@
ElementaryStreamQueue *mQueue;
+ bool mScrambled;
+ bool mSampleEncrypted;
+ sp<AMessage> mSampleAesKeyItem;
+ sp<IMemory> mMem;
+ sp<MemoryDealer> mDealer;
+ sp<ABuffer> mDescrambledBuffer;
+ List<SubSampleInfo> mSubSamples;
+ sp<IDescrambler> mDescrambler;
+
// Flush accumulated payload if necessary --- i.e. at EOS or at the start of
// another payload. event is set if the flushed payload is PES with a sync
// frame.
status_t flush(SyncEvent *event);
+
+ // Flush accumulated payload for scrambled streams if necessary --- i.e. at
+ // EOS or at the start of another payload. event is set if the flushed
+ // payload is PES with a sync frame.
+ status_t flushScrambled(SyncEvent *event);
+
+ // Check if a PES packet is scrambled at PES level.
+ uint32_t getPesScramblingControl(ABitReader *br, int32_t *pesOffset);
+
// Strip and parse PES headers and pass remaining payload into onPayload
// with parsed metadata. event is set if the PES contains a sync frame.
status_t parsePES(ABitReader *br, SyncEvent *event);
@@ -174,7 +229,13 @@
// and timestamp of the packet.
void onPayloadData(
unsigned PTS_DTS_flags, uint64_t PTS, uint64_t DTS,
- const uint8_t *data, size_t size, SyncEvent *event);
+ unsigned PES_scrambling_control,
+ const uint8_t *data, size_t size,
+ int32_t payloadOffset, SyncEvent *event);
+
+ // Ensure internal buffers can hold specified size, and will re-allocate
+ // as needed.
+ void ensureBufferCapacity(size_t size);
DISALLOW_EVIL_CONSTRUCTORS(Stream);
};
@@ -208,11 +269,12 @@
: mHasReturnedData(false), mOffset(offset), mTimeUs(0) {}
void ATSParser::SyncEvent::init(off64_t offset, const sp<MediaSource> &source,
- int64_t timeUs) {
+ int64_t timeUs, SourceType type) {
mHasReturnedData = true;
mOffset = offset;
mMediaSource = source;
mTimeUs = timeUs;
+ mType = type;
}
void ATSParser::SyncEvent::reset() {
@@ -248,6 +310,8 @@
bool ATSParser::Program::parsePID(
unsigned pid, unsigned continuity_counter,
unsigned payload_unit_start_indicator,
+ unsigned transport_scrambling_control,
+ unsigned random_access_indicator,
ABitReader *br, status_t *err, SyncEvent *event) {
*err = OK;
@@ -257,7 +321,11 @@
}
*err = mStreams.editValueAt(index)->parse(
- continuity_counter, payload_unit_start_indicator, br, event);
+ continuity_counter,
+ payload_unit_start_indicator,
+ transport_scrambling_control,
+ random_access_indicator,
+ br, event);
return true;
}
@@ -353,6 +421,38 @@
return success;
}
+bool ATSParser::Program::findCADescriptor(
+ ABitReader *br, unsigned infoLength,
+ ATSParser::CADescriptor *caDescriptor) {
+ bool found = false;
+ while (infoLength > 2) {
+ unsigned descriptor_tag = br->getBits(8);
+ ALOGV(" tag = 0x%02x", descriptor_tag);
+
+ unsigned descriptor_length = br->getBits(8);
+ ALOGV(" len = %u", descriptor_length);
+
+ infoLength -= 2;
+ if (descriptor_length > infoLength) {
+ break;
+ }
+ if (descriptor_tag == 9 && descriptor_length >= 4) {
+ found = true;
+ caDescriptor->mSystemID = br->getBits(16);
+ caDescriptor->mPID = br->getBits(16) & 0x1fff;
+ infoLength -= 4;
+ caDescriptor->mPrivateData.assign(
+ br->data(), br->data() + descriptor_length - 4);
+ break;
+ } else {
+ infoLength -= descriptor_length;
+ br->skipBits(descriptor_length * 8);
+ }
+ }
+ br->skipBits(infoLength * 8);
+ return found;
+}
+
status_t ATSParser::Program::parseProgramMap(ABitReader *br) {
unsigned table_id = br->getBits(8);
ALOGV(" table_id = %u", table_id);
@@ -389,7 +489,13 @@
unsigned program_info_length = br->getBits(12);
ALOGV(" program_info_length = %u", program_info_length);
- br->skipBits(program_info_length * 8); // skip descriptors
+ // descriptors
+ CADescriptor programCA;
+ bool hasProgramCA = findCADescriptor(br, program_info_length, &programCA);
+ if (hasProgramCA && !mParser->mCasManager->addProgram(
+ mProgramNumber, programCA)) {
+ return ERROR_MALFORMED;
+ }
Vector<StreamInfo> infos;
@@ -413,28 +519,17 @@
unsigned ES_info_length = br->getBits(12);
ALOGV(" ES_info_length = %u", ES_info_length);
-#if 0
- br->skipBits(ES_info_length * 8); // skip descriptors
-#else
- unsigned info_bytes_remaining = ES_info_length;
- while (info_bytes_remaining >= 2) {
- MY_LOGV(" tag = 0x%02x", br->getBits(8));
-
- unsigned descLength = br->getBits(8);
- ALOGV(" len = %u", descLength);
-
- if (info_bytes_remaining < descLength) {
- return ERROR_MALFORMED;
- }
- br->skipBits(descLength * 8);
-
- info_bytes_remaining -= descLength + 2;
+ CADescriptor streamCA;
+ bool hasStreamCA = findCADescriptor(br, ES_info_length, &streamCA);
+ if (hasStreamCA && !mParser->mCasManager->addStream(
+ mProgramNumber, elementaryPID, streamCA)) {
+ return ERROR_MALFORMED;
}
-#endif
-
StreamInfo info;
info.mType = streamType;
info.mPID = elementaryPID;
+ info.mCASystemId = hasProgramCA ? programCA.mSystemID :
+ hasStreamCA ? streamCA.mSystemID : -1;
infos.push(info);
infoBytesRemaining -= 5 + ES_info_length;
@@ -484,19 +579,33 @@
}
}
+ bool isAddingScrambledStream = false;
for (size_t i = 0; i < infos.size(); ++i) {
StreamInfo &info = infos.editItemAt(i);
+ if (mParser->mCasManager->isCAPid(info.mPID)) {
+ // skip CA streams (EMM/ECM)
+ continue;
+ }
ssize_t index = mStreams.indexOfKey(info.mPID);
if (index < 0) {
sp<Stream> stream = new Stream(
- this, info.mPID, info.mType, PCR_PID);
+ this, info.mPID, info.mType, PCR_PID, info.mCASystemId);
+ if (mSampleAesKeyItem != NULL) {
+ stream->signalNewSampleAesKey(mSampleAesKeyItem);
+ }
+
+ isAddingScrambledStream |= info.mCASystemId >= 0;
mStreams.add(info.mPID, stream);
}
}
+ if (isAddingScrambledStream) {
+ ALOGI("Receiving scrambled streams without descrambler!");
+ return ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED;
+ }
return OK;
}
@@ -580,13 +689,28 @@
return timeUs;
}
+void ATSParser::Program::updateCasSessions() {
+ for (size_t i = 0; i < mStreams.size(); ++i) {
+ sp<Stream> &stream = mStreams.editValueAt(i);
+ sp<IDescrambler> descrambler;
+ std::vector<uint8_t> sessionId;
+ int32_t systemId;
+ if (mParser->mCasManager->getCasInfo(mProgramNumber, stream->pid(),
+ &systemId, &descrambler, &sessionId)) {
+ stream->setCasInfo(systemId, descrambler, sessionId);
+ }
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
+static const size_t kInitialStreamBufferSize = 192 * 1024;
ATSParser::Stream::Stream(
Program *program,
unsigned elementaryPID,
unsigned streamType,
- unsigned PCR_PID)
+ unsigned PCR_PID,
+ int32_t CA_system_ID)
: mProgram(program),
mElementaryPID(elementaryPID),
mStreamType(streamType),
@@ -595,54 +719,86 @@
mPayloadStarted(false),
mEOSReached(false),
mPrevPTS(0),
- mQueue(NULL) {
+ mQueue(NULL),
+ mScrambled(CA_system_ID >= 0) {
+
+ mSampleEncrypted =
+ mStreamType == STREAMTYPE_H264_ENCRYPTED ||
+ mStreamType == STREAMTYPE_AAC_ENCRYPTED ||
+ mStreamType == STREAMTYPE_AC3_ENCRYPTED;
+
+ ALOGV("new stream PID 0x%02x, type 0x%02x, scrambled %d, SampleEncrypted: %d",
+ elementaryPID, streamType, mScrambled, mSampleEncrypted);
+
+ uint32_t flags =
+ (isVideo() && mScrambled) ? ElementaryStreamQueue::kFlag_ScrambledData :
+ (mSampleEncrypted) ? ElementaryStreamQueue::kFlag_SampleEncryptedData :
+ 0;
+
+ ElementaryStreamQueue::Mode mode = ElementaryStreamQueue::INVALID;
+
switch (mStreamType) {
case STREAMTYPE_H264:
- mQueue = new ElementaryStreamQueue(
- ElementaryStreamQueue::H264,
- (mProgram->parserFlags() & ALIGNED_VIDEO_DATA)
- ? ElementaryStreamQueue::kFlag_AlignedData : 0);
+ case STREAMTYPE_H264_ENCRYPTED:
+ mode = ElementaryStreamQueue::H264;
+ flags |= (mProgram->parserFlags() & ALIGNED_VIDEO_DATA) ?
+ ElementaryStreamQueue::kFlag_AlignedData : 0;
break;
+
case STREAMTYPE_MPEG2_AUDIO_ADTS:
- mQueue = new ElementaryStreamQueue(ElementaryStreamQueue::AAC);
+ case STREAMTYPE_AAC_ENCRYPTED:
+ mode = ElementaryStreamQueue::AAC;
break;
+
case STREAMTYPE_MPEG1_AUDIO:
case STREAMTYPE_MPEG2_AUDIO:
- mQueue = new ElementaryStreamQueue(
- ElementaryStreamQueue::MPEG_AUDIO);
+ mode = ElementaryStreamQueue::MPEG_AUDIO;
break;
case STREAMTYPE_MPEG1_VIDEO:
case STREAMTYPE_MPEG2_VIDEO:
- mQueue = new ElementaryStreamQueue(
- ElementaryStreamQueue::MPEG_VIDEO);
+ mode = ElementaryStreamQueue::MPEG_VIDEO;
break;
case STREAMTYPE_MPEG4_VIDEO:
- mQueue = new ElementaryStreamQueue(
- ElementaryStreamQueue::MPEG4_VIDEO);
+ mode = ElementaryStreamQueue::MPEG4_VIDEO;
break;
case STREAMTYPE_LPCM_AC3:
case STREAMTYPE_AC3:
- mQueue = new ElementaryStreamQueue(
- ElementaryStreamQueue::AC3);
+ case STREAMTYPE_AC3_ENCRYPTED:
+ mode = ElementaryStreamQueue::AC3;
break;
case STREAMTYPE_METADATA:
- mQueue = new ElementaryStreamQueue(
- ElementaryStreamQueue::METADATA);
+ mode = ElementaryStreamQueue::METADATA;
break;
default:
- break;
+ ALOGE("stream PID 0x%02x has invalid stream type 0x%02x",
+ elementaryPID, streamType);
+ return;
}
- ALOGV("new stream PID 0x%02x, type 0x%02x", elementaryPID, streamType);
+ mQueue = new ElementaryStreamQueue(mode, flags);
if (mQueue != NULL) {
- mBuffer = new ABuffer(192 * 1024);
- mBuffer->setRange(0, 0);
+ if (mSampleAesKeyItem != NULL) {
+ mQueue->signalNewSampleAesKey(mSampleAesKeyItem);
+ }
+
+ ensureBufferCapacity(kInitialStreamBufferSize);
+
+ if (mScrambled && (isAudio() || isVideo())) {
+ // Set initial format to scrambled
+ sp<MetaData> meta = new MetaData();
+ meta->setCString(kKeyMIMEType,
+ isAudio() ? MEDIA_MIMETYPE_AUDIO_SCRAMBLED
+ : MEDIA_MIMETYPE_VIDEO_SCRAMBLED);
+ // for MediaExtractor.CasInfo
+ meta->setInt32(kKeyCASystemID, CA_system_ID);
+ mSource = new AnotherPacketSource(meta);
+ }
}
}
@@ -651,10 +807,57 @@
mQueue = NULL;
}
+void ATSParser::Stream::ensureBufferCapacity(size_t neededSize) {
+ if (mBuffer != NULL && mBuffer->capacity() >= neededSize) {
+ return;
+ }
+
+ ALOGV("ensureBufferCapacity: current size %zu, new size %zu, scrambled %d",
+ mBuffer == NULL ? 0 : mBuffer->capacity(), neededSize, mScrambled);
+
+ sp<ABuffer> newBuffer, newScrambledBuffer;
+ sp<IMemory> newMem;
+ sp<MemoryDealer> newDealer;
+ if (mScrambled) {
+ size_t alignment = MemoryDealer::getAllocationAlignment();
+ neededSize = (neededSize + (alignment - 1)) & ~(alignment - 1);
+ // Align to multiples of 64K.
+ neededSize = (neededSize + 65535) & ~65535;
+ newDealer = new MemoryDealer(neededSize, "ATSParser");
+ newMem = newDealer->allocate(neededSize);
+ newScrambledBuffer = new ABuffer(newMem->pointer(), newMem->size());
+
+ if (mDescrambledBuffer != NULL) {
+ memcpy(newScrambledBuffer->data(),
+ mDescrambledBuffer->data(), mDescrambledBuffer->size());
+ newScrambledBuffer->setRange(0, mDescrambledBuffer->size());
+ } else {
+ newScrambledBuffer->setRange(0, 0);
+ }
+ mMem = newMem;
+ mDealer = newDealer;
+ mDescrambledBuffer = newScrambledBuffer;
+ } else {
+ // Align to multiples of 64K.
+ neededSize = (neededSize + 65535) & ~65535;
+ }
+
+ newBuffer = new ABuffer(neededSize);
+ if (mBuffer != NULL) {
+ memcpy(newBuffer->data(), mBuffer->data(), mBuffer->size());
+ newBuffer->setRange(0, mBuffer->size());
+ } else {
+ newBuffer->setRange(0, 0);
+ }
+ mBuffer = newBuffer;
+}
+
status_t ATSParser::Stream::parse(
unsigned continuity_counter,
- unsigned payload_unit_start_indicator, ABitReader *br,
- SyncEvent *event) {
+ unsigned payload_unit_start_indicator,
+ unsigned transport_scrambling_control,
+ unsigned random_access_indicator,
+ ABitReader *br, SyncEvent *event) {
if (mQueue == NULL) {
return OK;
}
@@ -666,6 +869,7 @@
mPayloadStarted = false;
mPesStartOffsets.clear();
mBuffer->setRange(0, 0);
+ mSubSamples.clear();
mExpectedContinuityCounter = -1;
#if 0
@@ -719,27 +923,23 @@
}
size_t neededSize = mBuffer->size() + payloadSizeBits / 8;
- if (mBuffer->capacity() < neededSize) {
- // Increment in multiples of 64K.
- neededSize = (neededSize + 65535) & ~65535;
-
- ALOGI("resizing buffer to %zu bytes", neededSize);
-
- sp<ABuffer> newBuffer = new ABuffer(neededSize);
- memcpy(newBuffer->data(), mBuffer->data(), mBuffer->size());
- newBuffer->setRange(0, mBuffer->size());
- mBuffer = newBuffer;
- }
+ ensureBufferCapacity(neededSize);
memcpy(mBuffer->data() + mBuffer->size(), br->data(), payloadSizeBits / 8);
mBuffer->setRange(0, mBuffer->size() + payloadSizeBits / 8);
+ if (mScrambled) {
+ mSubSamples.push_back({payloadSizeBits / 8,
+ transport_scrambling_control, random_access_indicator});
+ }
+
return OK;
}
bool ATSParser::Stream::isVideo() const {
switch (mStreamType) {
case STREAMTYPE_H264:
+ case STREAMTYPE_H264_ENCRYPTED:
case STREAMTYPE_MPEG1_VIDEO:
case STREAMTYPE_MPEG2_VIDEO:
case STREAMTYPE_MPEG4_VIDEO:
@@ -757,6 +957,8 @@
case STREAMTYPE_MPEG2_AUDIO_ADTS:
case STREAMTYPE_LPCM_AC3:
case STREAMTYPE_AC3:
+ case STREAMTYPE_AAC_ENCRYPTED:
+ case STREAMTYPE_AC3_ENCRYPTED:
return true;
default:
@@ -783,6 +985,7 @@
mPesStartOffsets.clear();
mEOSReached = false;
mBuffer->setRange(0, 0);
+ mSubSamples.clear();
bool clearFormat = false;
if (isAudio()) {
@@ -811,7 +1014,15 @@
}
if (mSource != NULL) {
- mSource->queueDiscontinuity(type, extra, true);
+ sp<MetaData> meta = mSource->getFormat();
+ const char* mime;
+ if (clearFormat && meta != NULL && meta->findCString(kKeyMIMEType, &mime)
+ && (!strncasecmp(mime, MEDIA_MIMETYPE_AUDIO_SCRAMBLED, 15)
+ || !strncasecmp(mime, MEDIA_MIMETYPE_VIDEO_SCRAMBLED, 15))){
+ mSource->clear();
+ } else {
+ mSource->queueDiscontinuity(type, extra, true);
+ }
}
}
@@ -824,6 +1035,8 @@
}
status_t ATSParser::Stream::parsePES(ABitReader *br, SyncEvent *event) {
+ const uint8_t *basePtr = br->data();
+
unsigned packet_startcode_prefix = br->getBits(24);
ALOGV("packet_startcode_prefix = 0x%08x", packet_startcode_prefix);
@@ -853,7 +1066,9 @@
return ERROR_MALFORMED;
}
- MY_LOGV("PES_scrambling_control = %u", br->getBits(2));
+ unsigned PES_scrambling_control = br->getBits(2);
+ ALOGV("PES_scrambling_control = %u", PES_scrambling_control);
+
MY_LOGV("PES_priority = %u", br->getBits(1));
MY_LOGV("data_alignment_indicator = %u", br->getBits(1));
MY_LOGV("copyright = %u", br->getBits(1));
@@ -986,6 +1201,7 @@
br->skipBits(optional_bytes_remaining * 8);
// ES data follows.
+ int32_t pesOffset = br->data() - basePtr;
if (PES_packet_length != 0) {
if (PES_packet_length < PES_header_data_length + 3) {
@@ -1003,21 +1219,26 @@
return ERROR_MALFORMED;
}
+ ALOGV("There's %u bytes of payload, PES_packet_length=%u, offset=%d",
+ dataLength, PES_packet_length, pesOffset);
+
onPayloadData(
- PTS_DTS_flags, PTS, DTS, br->data(), dataLength, event);
+ PTS_DTS_flags, PTS, DTS, PES_scrambling_control,
+ br->data(), dataLength, pesOffset, event);
br->skipBits(dataLength * 8);
} else {
onPayloadData(
- PTS_DTS_flags, PTS, DTS,
- br->data(), br->numBitsLeft() / 8, event);
+ PTS_DTS_flags, PTS, DTS, PES_scrambling_control,
+ br->data(), br->numBitsLeft() / 8, pesOffset, event);
size_t payloadSizeBits = br->numBitsLeft();
if (payloadSizeBits % 8 != 0u) {
return ERROR_MALFORMED;
}
- ALOGV("There's %zu bytes of payload.", payloadSizeBits / 8);
+ ALOGV("There's %zu bytes of payload, offset=%d",
+ payloadSizeBits / 8, pesOffset);
}
} else if (stream_id == 0xbe) { // padding_stream
if (PES_packet_length == 0u) {
@@ -1034,6 +1255,200 @@
return OK;
}
+uint32_t ATSParser::Stream::getPesScramblingControl(
+ ABitReader *br, int32_t *pesOffset) {
+ unsigned packet_startcode_prefix = br->getBits(24);
+
+ ALOGV("packet_startcode_prefix = 0x%08x", packet_startcode_prefix);
+
+ if (packet_startcode_prefix != 1) {
+ ALOGV("unit does not start with startcode.");
+ return 0;
+ }
+
+ if (br->numBitsLeft() < 48) {
+ return 0;
+ }
+
+ unsigned stream_id = br->getBits(8);
+ ALOGV("stream_id = 0x%02x", stream_id);
+
+ br->skipBits(16); // PES_packet_length
+
+ if (stream_id != 0xbc // program_stream_map
+ && stream_id != 0xbe // padding_stream
+ && stream_id != 0xbf // private_stream_2
+ && stream_id != 0xf0 // ECM
+ && stream_id != 0xf1 // EMM
+ && stream_id != 0xff // program_stream_directory
+ && stream_id != 0xf2 // DSMCC
+ && stream_id != 0xf8) { // H.222.1 type E
+ if (br->getBits(2) != 2u) {
+ return 0;
+ }
+
+ unsigned PES_scrambling_control = br->getBits(2);
+ ALOGV("PES_scrambling_control = %u", PES_scrambling_control);
+
+ if (PES_scrambling_control == 0) {
+ return 0;
+ }
+
+ br->skipBits(12); // don't care
+
+ unsigned PES_header_data_length = br->getBits(8);
+ ALOGV("PES_header_data_length = %u", PES_header_data_length);
+
+ if (PES_header_data_length * 8 > br->numBitsLeft()) {
+ return 0;
+ }
+
+ *pesOffset = 9 + PES_header_data_length;
+ ALOGD("found PES_scrambling_control=%d, PES offset=%d",
+ PES_scrambling_control, *pesOffset);
+ return PES_scrambling_control;
+ }
+
+ return 0;
+}
+
+status_t ATSParser::Stream::flushScrambled(SyncEvent *event) {
+ if (mDescrambler == NULL) {
+ ALOGE("received scrambled packets without descrambler!");
+ return UNKNOWN_ERROR;
+ }
+
+ if (mDescrambledBuffer == NULL || mMem == NULL) {
+ ALOGE("received scrambled packets without shared memory!");
+
+ return UNKNOWN_ERROR;
+ }
+
+ int32_t pesOffset = 0;
+ int32_t descrambleSubSamples = 0, descrambleBytes = 0;
+ uint32_t tsScramblingControl = 0, pesScramblingControl = 0;
+
+ // First, go over subsamples to find TS-level scrambling key id, and
+ // calculate how many subsample we need to descramble (assuming we don't
+ // have PES-level scrambling).
+ for (auto it = mSubSamples.begin(); it != mSubSamples.end(); it++) {
+ if (it->transport_scrambling_mode != 0) {
+ // TODO: handle keyId change, use the first non-zero keyId for now.
+ if (tsScramblingControl == 0) {
+ tsScramblingControl = it->transport_scrambling_mode;
+ }
+ }
+ if (tsScramblingControl == 0 || descrambleSubSamples == 0
+ || !mQueue->isScrambled()) {
+ descrambleSubSamples++;
+ descrambleBytes += it->subSampleSize;
+ }
+ }
+ // If not scrambled at TS-level, check PES-level scrambling
+ if (tsScramblingControl == 0) {
+ ABitReader br(mBuffer->data(), mBuffer->size());
+ pesScramblingControl = getPesScramblingControl(&br, &pesOffset);
+ // If not scrambled at PES-level either, or scrambled at PES-level but
+ // requires output to remain scrambled, we don't need to descramble
+ // anything.
+ if (pesScramblingControl == 0 || mQueue->isScrambled()) {
+ descrambleSubSamples = 0;
+ descrambleBytes = 0;
+ }
+ }
+
+ uint32_t sctrl = tsScramblingControl != 0 ?
+ tsScramblingControl : pesScramblingControl;
+
+ // Perform the 1st pass descrambling if needed
+ if (descrambleBytes > 0) {
+ memcpy(mDescrambledBuffer->data(), mBuffer->data(), descrambleBytes);
+ mDescrambledBuffer->setRange(0, descrambleBytes);
+
+ sp<ABuffer> subSamples = new ABuffer(
+ sizeof(DescramblerPlugin::SubSample) * descrambleSubSamples);
+
+ DescrambleInfo info;
+ info.dstType = DescrambleInfo::kDestinationTypeVmPointer;
+ info.scramblingControl = (DescramblerPlugin::ScramblingControl)sctrl;
+ info.numSubSamples = descrambleSubSamples;
+ info.subSamples = (DescramblerPlugin::SubSample *)subSamples->data();
+ info.srcMem = mMem;
+ info.srcOffset = 0;
+ info.dstPtr = NULL; // in-place descrambling into srcMem
+ info.dstOffset = 0;
+
+ int32_t i = 0;
+ for (auto it = mSubSamples.begin();
+ it != mSubSamples.end() && i < descrambleSubSamples; it++, i++) {
+ if (it->transport_scrambling_mode != 0 || pesScramblingControl != 0) {
+ info.subSamples[i].mNumBytesOfClearData = 0;
+ info.subSamples[i].mNumBytesOfEncryptedData = it->subSampleSize;
+ } else {
+ info.subSamples[i].mNumBytesOfClearData = it->subSampleSize;
+ info.subSamples[i].mNumBytesOfEncryptedData = 0;
+ }
+ }
+ // If scrambled at PES-level, PES header should be skipped
+ if (pesScramblingControl != 0) {
+ info.srcOffset = info.dstOffset = pesOffset;
+ info.subSamples[0].mNumBytesOfEncryptedData -= pesOffset;
+ }
+
+ int32_t result;
+ Status status = mDescrambler->descramble(info, &result);
+
+ if (!status.isOk()) {
+ ALOGE("[stream %d] descramble failed, exceptionCode=%d",
+ mElementaryPID, status.exceptionCode());
+ return UNKNOWN_ERROR;
+ }
+
+ ALOGV("[stream %d] descramble succeeded, %d bytes",
+ mElementaryPID, result);
+ memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
+ }
+
+ if (mQueue->isScrambled()) {
+ // Queue subSample info for scrambled queue
+ sp<ABuffer> clearSizesBuffer = new ABuffer(mSubSamples.size() * 4);
+ sp<ABuffer> encSizesBuffer = new ABuffer(mSubSamples.size() * 4);
+ int32_t *clearSizePtr = (int32_t*)clearSizesBuffer->data();
+ int32_t *encSizePtr = (int32_t*)encSizesBuffer->data();
+ int32_t isSync = 0;
+ int32_t i = 0;
+ for (auto it = mSubSamples.begin();
+ it != mSubSamples.end(); it++, i++) {
+ if ((it->transport_scrambling_mode == 0
+ && pesScramblingControl == 0)
+ || i < descrambleSubSamples) {
+ clearSizePtr[i] = it->subSampleSize;
+ encSizePtr[i] = 0;
+ } else {
+ clearSizePtr[i] = 0;
+ encSizePtr[i] = it->subSampleSize;
+ }
+ isSync |= it->random_access_indicator;
+ }
+ // Pass the original TS subsample size now. The PES header adjust
+ // will be applied when the scrambled AU is dequeued.
+ mQueue->appendScrambledData(
+ mBuffer->data(), mBuffer->size(), sctrl,
+ isSync, clearSizesBuffer, encSizesBuffer);
+ }
+
+ ABitReader br(mBuffer->data(), mBuffer->size());
+ status_t err = parsePES(&br, event);
+
+ if (err != OK) {
+ ALOGE("[stream %d] failed to parse descrambled PES, err=%d",
+ mElementaryPID, err);
+ }
+
+ return err;
+}
+
+
status_t ATSParser::Stream::flush(SyncEvent *event) {
if (mBuffer == NULL || mBuffer->size() == 0) {
return OK;
@@ -1041,9 +1456,14 @@
ALOGV("flushing stream 0x%04x size = %zu", mElementaryPID, mBuffer->size());
- ABitReader br(mBuffer->data(), mBuffer->size());
-
- status_t err = parsePES(&br, event);
+ status_t err = OK;
+ if (mScrambled) {
+ err = flushScrambled(event);
+ mSubSamples.clear();
+ } else {
+ ABitReader br(mBuffer->data(), mBuffer->size());
+ err = parsePES(&br, event);
+ }
mBuffer->setRange(0, 0);
@@ -1052,7 +1472,9 @@
void ATSParser::Stream::onPayloadData(
unsigned PTS_DTS_flags, uint64_t PTS, uint64_t /* DTS */,
- const uint8_t *data, size_t size, SyncEvent *event) {
+ unsigned PES_scrambling_control,
+ const uint8_t *data, size_t size,
+ int32_t payloadOffset, SyncEvent *event) {
#if 0
ALOGI("payload streamType 0x%02x, PTS = 0x%016llx, dPTS = %lld",
mStreamType,
@@ -1061,14 +1483,15 @@
mPrevPTS = PTS;
#endif
- ALOGV("onPayloadData mStreamType=0x%02x", mStreamType);
+ ALOGV("onPayloadData mStreamType=0x%02x size: %zu", mStreamType, size);
int64_t timeUs = 0ll; // no presentation timestamp available.
if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
timeUs = mProgram->convertPTSToTimestamp(PTS);
}
- status_t err = mQueue->appendData(data, size, timeUs);
+ status_t err = mQueue->appendData(
+ data, size, timeUs, payloadOffset, PES_scrambling_control);
if (mEOSReached) {
mQueue->signalEOS();
@@ -1090,12 +1513,16 @@
const char *mime;
if (meta->findCString(kKeyMIMEType, &mime)
- && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
- && !IsIDR(accessUnit)) {
- continue;
+ && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+ int32_t sync = 0;
+ if (!accessUnit->meta()->findInt32("isSync", &sync) || !sync) {
+ continue;
+ }
}
mSource = new AnotherPacketSource(meta);
mSource->queueAccessUnit(accessUnit);
+ ALOGV("onPayloadData: created AnotherPacketSource PID 0x%08x of type 0x%02x",
+ mElementaryPID, mStreamType);
}
} else if (mQueue->getFormat() != NULL) {
// After a discontinuity we invalidate the queue's format
@@ -1121,13 +1548,24 @@
int64_t timeUs;
if (accessUnit->meta()->findInt64("timeUs", &timeUs)) {
found = true;
- event->init(pesStartOffset, mSource, timeUs);
+ event->init(pesStartOffset, mSource, timeUs, getSourceType());
}
}
}
}
}
+ATSParser::SourceType ATSParser::Stream::getSourceType() {
+ if (isVideo()) {
+ return VIDEO;
+ } else if (isAudio()) {
+ return AUDIO;
+ } else if (isMeta()) {
+ return META;
+ }
+ return NUM_SOURCE_TYPES;
+}
+
sp<MediaSource> ATSParser::Stream::getSource(SourceType type) {
switch (type) {
case VIDEO:
@@ -1161,6 +1599,18 @@
return NULL;
}
+void ATSParser::Stream::setCasInfo(
+ int32_t systemId, const sp<IDescrambler> &descrambler,
+ const std::vector<uint8_t> &sessionId) {
+ if (mSource != NULL && mDescrambler == NULL && descrambler != NULL) {
+ signalDiscontinuity(DISCONTINUITY_FORMAT_ONLY, NULL);
+ mDescrambler = descrambler;
+ if (mQueue->isScrambled()) {
+ mQueue->setCasInfo(systemId, sessionId);
+ }
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
ATSParser::ATSParser(uint32_t flags)
@@ -1172,6 +1622,7 @@
mNumTSPacketsParsed(0),
mNumPCRs(0) {
mPSISections.add(0 /* PID */, new PSISection);
+ mCasManager = new CasManager();
}
ATSParser::~ATSParser() {
@@ -1188,6 +1639,17 @@
return parseTS(&br, event);
}
+status_t ATSParser::setMediaCas(const sp<ICas> &cas) {
+ status_t err = mCasManager->setMediaCas(cas);
+ if (err != OK) {
+ return err;
+ }
+ for (size_t i = 0; i < mPrograms.size(); ++i) {
+ mPrograms.editItemAt(i)->updateCasSessions();
+ }
+ return OK;
+}
+
void ATSParser::signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra) {
int64_t mediaTimeUs;
@@ -1299,6 +1761,9 @@
if (!found) {
mPrograms.push(
new Program(this, program_number, programMapPID, mLastRecoveredPTS));
+ if (mSampleAesKeyItem != NULL) {
+ mPrograms.top()->signalNewSampleAesKey(mSampleAesKeyItem);
+ }
}
if (mPSISections.indexOfKey(programMapPID) < 0) {
@@ -1314,6 +1779,8 @@
ABitReader *br, unsigned PID,
unsigned continuity_counter,
unsigned payload_unit_start_indicator,
+ unsigned transport_scrambling_control,
+ unsigned random_access_indicator,
SyncEvent *event) {
ssize_t sectionIndex = mPSISections.indexOfKey(PID);
@@ -1385,7 +1852,10 @@
for (size_t i = 0; i < mPrograms.size(); ++i) {
status_t err;
if (mPrograms.editItemAt(i)->parsePID(
- PID, continuity_counter, payload_unit_start_indicator,
+ PID, continuity_counter,
+ payload_unit_start_indicator,
+ transport_scrambling_control,
+ random_access_indicator,
br, &err, event)) {
if (err != OK) {
return err;
@@ -1397,13 +1867,19 @@
}
if (!handled) {
+ handled = mCasManager->parsePID(br, PID);
+ }
+
+ if (!handled) {
ALOGV("PID 0x%04x not handled.", PID);
}
return OK;
}
-status_t ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
+status_t ATSParser::parseAdaptationField(
+ ABitReader *br, unsigned PID, unsigned *random_access_indicator) {
+ *random_access_indicator = 0;
unsigned adaptation_field_length = br->getBits(8);
if (adaptation_field_length > 0) {
@@ -1418,7 +1894,16 @@
ALOGV("PID 0x%04x: discontinuity_indicator = 1 (!!!)", PID);
}
- br->skipBits(2);
+ *random_access_indicator = br->getBits(1);
+ if (*random_access_indicator) {
+ ALOGV("PID 0x%04x: random_access_indicator = 1", PID);
+ }
+
+ unsigned elementary_stream_priority_indicator = br->getBits(1);
+ if (elementary_stream_priority_indicator) {
+ ALOGV("PID 0x%04x: elementary_stream_priority_indicator = 1", PID);
+ }
+
unsigned PCR_flag = br->getBits(1);
size_t numBitsRead = 4;
@@ -1484,7 +1969,8 @@
unsigned PID = br->getBits(13);
ALOGV("PID = 0x%04x", PID);
- MY_LOGV("transport_scrambling_control = %u", br->getBits(2));
+ unsigned transport_scrambling_control = br->getBits(2);
+ ALOGV("transport_scrambling_control = %u", transport_scrambling_control);
unsigned adaptation_field_control = br->getBits(2);
ALOGV("adaptation_field_control = %u", adaptation_field_control);
@@ -1496,13 +1982,17 @@
status_t err = OK;
+ unsigned random_access_indicator = 0;
if (adaptation_field_control == 2 || adaptation_field_control == 3) {
- err = parseAdaptationField(br, PID);
+ err = parseAdaptationField(br, PID, &random_access_indicator);
}
if (err == OK) {
if (adaptation_field_control == 1 || adaptation_field_control == 3) {
err = parsePID(br, PID, continuity_counter,
- payload_unit_start_indicator, event);
+ payload_unit_start_indicator,
+ transport_scrambling_control,
+ random_access_indicator,
+ event);
}
}
@@ -1565,6 +2055,16 @@
return mPrograms.editItemAt(0)->PTSTimeDeltaEstablished();
}
+int64_t ATSParser::getFirstPTSTimeUs() {
+ for (size_t i = 0; i < mPrograms.size(); ++i) {
+ sp<ATSParser::Program> program = mPrograms.itemAt(i);
+ if (program->PTSTimeDeltaEstablished()) {
+ return (program->firstPTS() * 100) / 9;
+ }
+ }
+ return -1;
+}
+
__attribute__((no_sanitize("integer")))
void ATSParser::updatePCR(
unsigned /* PID */, uint64_t PCR, uint64_t byteOffsetFromStart) {
@@ -1762,4 +2262,40 @@
ALOGV("crc: %08x\n", crc);
return (crc == 0);
}
+
+// SAMPLE_AES key handling
+// TODO: Merge these to their respective class after Widevine-HLS
+void ATSParser::signalNewSampleAesKey(const sp<AMessage> &keyItem) {
+ ALOGD("signalNewSampleAesKey: %p", keyItem.get());
+
+ mSampleAesKeyItem = keyItem;
+
+ // a NULL key item will propagate to existing ElementaryStreamQueues
+ for (size_t i = 0; i < mPrograms.size(); ++i) {
+ mPrograms[i]->signalNewSampleAesKey(keyItem);
+ }
+}
+
+void ATSParser::Program::signalNewSampleAesKey(const sp<AMessage> &keyItem) {
+ ALOGD("Program::signalNewSampleAesKey: %p", keyItem.get());
+
+ mSampleAesKeyItem = keyItem;
+
+ // a NULL key item will propagate to existing ElementaryStreamQueues
+ for (size_t i = 0; i < mStreams.size(); ++i) {
+ mStreams[i]->signalNewSampleAesKey(keyItem);
+ }
+}
+
+void ATSParser::Stream::signalNewSampleAesKey(const sp<AMessage> &keyItem) {
+ ALOGD("Stream::signalNewSampleAesKey: 0x%04x size = %zu keyItem: %p",
+ mElementaryPID, mBuffer->size(), keyItem.get());
+
+ // a NULL key item will propagate to existing ElementaryStreamQueues
+ mSampleAesKeyItem = keyItem;
+
+ flush(NULL);
+ mQueue->signalNewSampleAesKey(keyItem);
+}
+
} // namespace android
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 2b166f0..374e011 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -26,11 +26,17 @@
#include <utils/KeyedVector.h>
#include <utils/Vector.h>
#include <utils/RefBase.h>
+#include <vector>
namespace android {
-
+namespace media {
+class ICas;
+class IDescrambler;
+};
+using namespace media;
class ABitReader;
struct ABuffer;
+struct AnotherPacketSource;
struct ATSParser : public RefBase {
enum DiscontinuityType {
@@ -62,18 +68,26 @@
ALIGNED_VIDEO_DATA = 2,
};
+ enum SourceType {
+ VIDEO = 0,
+ AUDIO = 1,
+ META = 2,
+ NUM_SOURCE_TYPES = 3
+ };
+
// Event is used to signal sync point event at feedTSPacket().
struct SyncEvent {
explicit SyncEvent(off64_t offset);
void init(off64_t offset, const sp<MediaSource> &source,
- int64_t timeUs);
+ int64_t timeUs, SourceType type);
bool hasReturnedData() const { return mHasReturnedData; }
void reset();
off64_t getOffset() const { return mOffset; }
const sp<MediaSource> &getMediaSource() const { return mMediaSource; }
int64_t getTimeUs() const { return mTimeUs; }
+ SourceType getType() const { return mType; }
private:
bool mHasReturnedData;
@@ -87,10 +101,13 @@
sp<MediaSource> mMediaSource;
/* The timestamp of the sync frame. */
int64_t mTimeUs;
+ SourceType mType;
};
explicit ATSParser(uint32_t flags = 0);
+ status_t setMediaCas(const sp<ICas> &cas);
+
// Feed a TS packet into the parser. uninitialized event with the start
// offset of this TS packet goes in, and if the parser detects PES with
// a sync frame, the event will be initiailzed with the start offset of the
@@ -107,17 +124,15 @@
void signalEOS(status_t finalResult);
- enum SourceType {
- VIDEO = 0,
- AUDIO = 1,
- META = 2,
- NUM_SOURCE_TYPES = 3
- };
sp<MediaSource> getSource(SourceType type);
bool hasSource(SourceType type) const;
bool PTSTimeDeltaEstablished();
+ int64_t getFirstPTSTimeUs();
+
+ void signalNewSampleAesKey(const sp<AMessage> &keyItem);
+
enum {
// From ISO/IEC 13818-1: 2000 (E), Table 2-29
STREAMTYPE_RESERVED = 0x00,
@@ -136,6 +151,11 @@
// Stream type 0x83 is non-standard,
// it could be LPCM or TrueHD AC3
STREAMTYPE_LPCM_AC3 = 0x83,
+
+ //Sample Encrypted types
+ STREAMTYPE_H264_ENCRYPTED = 0xDB,
+ STREAMTYPE_AAC_ENCRYPTED = 0xCF,
+ STREAMTYPE_AC3_ENCRYPTED = 0xC1,
};
protected:
@@ -145,6 +165,14 @@
struct Program;
struct Stream;
struct PSISection;
+ struct CasManager;
+ struct CADescriptor {
+ int32_t mSystemID;
+ unsigned mPID;
+ std::vector<uint8_t> mPrivateData;
+ };
+
+ sp<CasManager> mCasManager;
uint32_t mFlags;
Vector<sp<Program> > mPrograms;
@@ -160,6 +188,8 @@
size_t mNumTSPacketsParsed;
+ sp<AMessage> mSampleAesKeyItem;
+
void parseProgramAssociationTable(ABitReader *br);
void parseProgramMap(ABitReader *br);
// Parse PES packet where br is pointing to. If the PES contains a sync
@@ -176,9 +206,13 @@
ABitReader *br, unsigned PID,
unsigned continuity_counter,
unsigned payload_unit_start_indicator,
+ unsigned transport_scrambling_control,
+ unsigned random_access_indicator,
SyncEvent *event);
- status_t parseAdaptationField(ABitReader *br, unsigned PID);
+ status_t parseAdaptationField(
+ ABitReader *br, unsigned PID, unsigned *random_access_indicator);
+
// see feedTSPacket().
status_t parseTS(ABitReader *br, SyncEvent *event);
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index ea3b1a6..96eb5bf 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -4,7 +4,9 @@
srcs: [
"AnotherPacketSource.cpp",
"ATSParser.cpp",
+ "CasManager.cpp",
"ESQueue.cpp",
+ "HlsSampleDecryptor.cpp",
"MPEG2PSExtractor.cpp",
"MPEG2TSExtractor.cpp",
],
@@ -24,7 +26,14 @@
"unsigned-integer-overflow",
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
- shared_libs: ["libmedia"],
+ shared_libs: [
+ "libcrypto",
+ "libmedia",
+ ],
}
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 4fcf7b5..433b1fc 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -44,6 +44,7 @@
mEnabled(true),
mFormat(NULL),
mLastQueuedTimeUs(0),
+ mEstimatedBufferDurationUs(-1),
mEOSResult(OK),
mLatestEnqueuedMeta(NULL),
mLatestDequeuedMeta(NULL) {
@@ -203,25 +204,53 @@
}
MediaBuffer *mediaBuffer = new MediaBuffer(buffer);
+ sp<MetaData> bufmeta = mediaBuffer->meta_data();
- mediaBuffer->meta_data()->setInt64(kKeyTime, timeUs);
+ bufmeta->setInt64(kKeyTime, timeUs);
int32_t isSync;
if (buffer->meta()->findInt32("isSync", &isSync)) {
- mediaBuffer->meta_data()->setInt32(kKeyIsSyncFrame, isSync);
+ bufmeta->setInt32(kKeyIsSyncFrame, isSync);
}
sp<ABuffer> sei;
if (buffer->meta()->findBuffer("sei", &sei) && sei != NULL) {
- mediaBuffer->meta_data()->setData(kKeySEI, 0, sei->data(), sei->size());
+ bufmeta->setData(kKeySEI, 0, sei->data(), sei->size());
}
sp<ABuffer> mpegUserData;
if (buffer->meta()->findBuffer("mpegUserData", &mpegUserData) && mpegUserData != NULL) {
- mediaBuffer->meta_data()->setData(
+ bufmeta->setData(
kKeyMpegUserData, 0, mpegUserData->data(), mpegUserData->size());
}
+ int32_t cryptoMode;
+ if (buffer->meta()->findInt32("cryptoMode", &cryptoMode)) {
+ int32_t cryptoKey;
+ sp<ABuffer> clearBytesBuffer, encBytesBuffer;
+
+ CHECK(buffer->meta()->findInt32("cryptoKey", &cryptoKey));
+ CHECK(buffer->meta()->findBuffer("clearBytes", &clearBytesBuffer)
+ && clearBytesBuffer != NULL);
+ CHECK(buffer->meta()->findBuffer("encBytes", &encBytesBuffer)
+ && encBytesBuffer != NULL);
+
+ bufmeta->setInt32(kKeyCryptoMode, cryptoMode);
+
+ uint8_t array[16] = {0};
+ bufmeta->setData(kKeyCryptoIV, 0, array, 16);
+
+ array[0] = (uint8_t) (cryptoKey & 0xff);
+ bufmeta->setData(kKeyCryptoKey, 0, array, 16);
+
+ bufmeta->setData(kKeyPlainSizes, 0,
+ clearBytesBuffer->data(), clearBytesBuffer->size());
+
+ bufmeta->setData(kKeyEncryptedSizes, 0,
+ encBytesBuffer->data(), encBytesBuffer->size());
+ }
+
+
*out = mediaBuffer;
return OK;
}
@@ -309,6 +338,8 @@
mFormat = NULL;
mLatestEnqueuedMeta = NULL;
+
+ mEstimatedBufferDurationUs = -1;
}
void AnotherPacketSource::queueDiscontinuity(
@@ -431,6 +462,31 @@
return durationUs;
}
+int64_t AnotherPacketSource::getEstimatedBufferDurationUs() {
+ Mutex::Autolock autoLock(mLock);
+ if (mEstimatedBufferDurationUs >= 0) {
+ return mEstimatedBufferDurationUs;
+ }
+
+ SortedVector<int64_t> maxTimesUs;
+ List<sp<ABuffer> >::iterator it;
+ int64_t t1 = 0, t2 = 0;
+ for (it = mBuffers.begin(); it != mBuffers.end(); ++it) {
+ int64_t timeUs = 0;
+ const sp<ABuffer> &buffer = *it;
+ if (!buffer->meta()->findInt64("timeUs", &timeUs)) {
+ continue;
+ }
+ maxTimesUs.add(timeUs);
+ while (maxTimesUs.size() > 2) {
+ maxTimesUs.removeAt(0);
+ t1 = maxTimesUs.itemAt(0);
+ t2 = maxTimesUs.itemAt(1);
+ }
+ }
+ return mEstimatedBufferDurationUs = t2 - t1;
+}
+
status_t AnotherPacketSource::nextBufferTime(int64_t *timeUs) {
*timeUs = 0;
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index dd6849e..b0890d7 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -57,6 +57,9 @@
// presentation timestamps since the last discontinuity (if any).
int64_t getBufferedDurationUs(status_t *finalResult);
+ // Returns the difference between the two largest timestamps queued
+ int64_t getEstimatedBufferDurationUs();
+
status_t nextBufferTime(int64_t *timeUs);
void queueAccessUnit(const sp<ABuffer> &buffer);
@@ -113,6 +116,7 @@
bool mEnabled;
sp<MetaData> mFormat;
int64_t mLastQueuedTimeUs;
+ int64_t mEstimatedBufferDurationUs;
List<sp<ABuffer> > mBuffers;
status_t mEOSResult;
sp<AMessage> mLatestEnqueuedMeta;
diff --git a/media/libstagefright/mpeg2ts/CasManager.cpp b/media/libstagefright/mpeg2ts/CasManager.cpp
new file mode 100644
index 0000000..047b1b3
--- /dev/null
+++ b/media/libstagefright/mpeg2ts/CasManager.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CasManager"
+#include "CasManager.h"
+
+#include <android/media/ICas.h>
+#include <android/media/IDescrambler.h>
+#include <android/media/IMediaCasService.h>
+#include <binder/IServiceManager.h>
+#include <media/stagefright/foundation/ABitReader.h>
+#include <utils/Log.h>
+
+namespace android {
+using binder::Status;
+
+struct ATSParser::CasManager::ProgramCasManager : public RefBase {
+ ProgramCasManager(unsigned programNumber, const CADescriptor &descriptor);
+ ProgramCasManager(unsigned programNumber);
+
+ bool addStream(unsigned elementaryPID, const CADescriptor &descriptor);
+
+ status_t setMediaCas(const sp<ICas> &cas, PidToSessionMap &sessionMap);
+
+ bool getCasSession(unsigned elementaryPID,
+ sp<IDescrambler> *descrambler, std::vector<uint8_t> *sessionId) const;
+
+ void closeAllSessions(const sp<ICas>& cas);
+
+private:
+ struct CasSession {
+ CasSession() {}
+ CasSession(const CADescriptor &descriptor) :
+ mCADescriptor(descriptor) {}
+
+ CADescriptor mCADescriptor;
+ std::vector<uint8_t> mSessionId;
+ sp<IDescrambler> mDescrambler;
+ };
+ status_t initSession(
+ const sp<ICas>& cas,
+ PidToSessionMap &sessionMap,
+ CasSession *session);
+ void closeSession(const sp<ICas>& cas, const CasSession &casSession);
+
+ unsigned mProgramNumber;
+ bool mHasProgramCas;
+ CasSession mProgramCas;
+ KeyedVector<unsigned, CasSession> mStreamPidToCasMap;
+};
+
+ATSParser::CasManager::ProgramCasManager::ProgramCasManager(
+ unsigned programNumber, const CADescriptor &descriptor) :
+ mProgramNumber(programNumber),
+ mHasProgramCas(true),
+ mProgramCas(descriptor) {}
+
+ATSParser::CasManager::ProgramCasManager::ProgramCasManager(
+ unsigned programNumber) :
+ mProgramNumber(programNumber),
+ mHasProgramCas(false) {}
+
+bool ATSParser::CasManager::ProgramCasManager::addStream(
+ unsigned elementaryPID, const CADescriptor &descriptor) {
+ ssize_t index = mStreamPidToCasMap.indexOfKey(elementaryPID);
+ if (index >= 0) {
+ return false;
+ }
+ ALOGV("addStream: program=%d, elementaryPID=%d, CA_system_ID=0x%x",
+ mProgramNumber, elementaryPID, descriptor.mSystemID);
+ mStreamPidToCasMap.add(elementaryPID, CasSession(descriptor));
+ return true;
+}
+
+status_t ATSParser::CasManager::ProgramCasManager::setMediaCas(
+ const sp<ICas> &cas, PidToSessionMap &sessionMap) {
+ if (mHasProgramCas) {
+ return initSession(cas, sessionMap, &mProgramCas);
+ }
+ // TODO: share session among streams that has identical CA_descriptors.
+ // For now, we open one session for each stream that has CA_descriptor.
+ for (size_t index = 0; index < mStreamPidToCasMap.size(); index++) {
+ status_t err = initSession(
+ cas, sessionMap, &mStreamPidToCasMap.editValueAt(index));
+ if (err != OK) {
+ return err;
+ }
+ }
+ return OK;
+}
+
+bool ATSParser::CasManager::ProgramCasManager::getCasSession(
+ unsigned elementaryPID, sp<IDescrambler> *descrambler,
+ std::vector<uint8_t> *sessionId) const {
+ if (mHasProgramCas) {
+ *descrambler = mProgramCas.mDescrambler;
+ *sessionId = mProgramCas.mSessionId;
+ return true;
+ }
+ ssize_t index = mStreamPidToCasMap.indexOfKey(elementaryPID);
+ if (index < 0) {
+ return false;
+ }
+
+ *descrambler = mStreamPidToCasMap[index].mDescrambler;
+ *sessionId = mStreamPidToCasMap[index].mSessionId;
+ return true;
+}
+
+status_t ATSParser::CasManager::ProgramCasManager::initSession(
+ const sp<ICas>& cas,
+ PidToSessionMap &sessionMap,
+ CasSession *session) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> casServiceBinder = sm->getService(String16("media.cas"));
+ sp<IMediaCasService> casService =
+ interface_cast<IMediaCasService>(casServiceBinder);
+
+ if (casService == NULL) {
+ ALOGE("Cannot obtain IMediaCasService");
+ return NO_INIT;
+ }
+
+ sp<IDescrambler> descrambler;
+ std::vector<uint8_t> sessionId;
+ const CADescriptor &descriptor = session->mCADescriptor;
+
+ Status status = cas->openSession(&sessionId);
+ if (!status.isOk()) {
+ ALOGE("Failed to open session: exception=%d, error=%d",
+ status.exceptionCode(), status.serviceSpecificErrorCode());
+ goto l_fail;
+ }
+
+ cas->setSessionPrivateData(sessionId, descriptor.mPrivateData);
+ if (!status.isOk()) {
+ ALOGE("Failed to set private data: exception=%d, error=%d",
+ status.exceptionCode(), status.serviceSpecificErrorCode());
+ goto l_fail;
+ }
+
+ status = casService->createDescrambler(descriptor.mSystemID, &descrambler);
+ if (!status.isOk() || descrambler == NULL) {
+ ALOGE("Failed to create descrambler: : exception=%d, error=%d",
+ status.exceptionCode(), status.serviceSpecificErrorCode());
+ goto l_fail;
+ }
+
+ status = descrambler->setMediaCasSession(sessionId);
+ if (!status.isOk()) {
+ ALOGE("Failed to init descrambler: : exception=%d, error=%d",
+ status.exceptionCode(), status.serviceSpecificErrorCode());
+ goto l_fail;
+ }
+
+ session->mSessionId = sessionId;
+ session->mDescrambler = descrambler;
+ sessionMap.add(descriptor.mPID, sessionId);
+
+ return OK;
+
+l_fail:
+ if (!sessionId.empty()) {
+ cas->closeSession(sessionId);
+ }
+ if (descrambler != NULL) {
+ descrambler->release();
+ }
+ return NO_INIT;
+}
+
+void ATSParser::CasManager::ProgramCasManager::closeSession(
+ const sp<ICas>& cas, const CasSession &casSession) {
+ if (casSession.mDescrambler != NULL) {
+ casSession.mDescrambler->release();
+ }
+ if (!casSession.mSessionId.empty()) {
+ cas->closeSession(casSession.mSessionId);
+ }
+}
+
+void ATSParser::CasManager::ProgramCasManager::closeAllSessions(
+ const sp<ICas>& cas) {
+ if (mHasProgramCas) {
+ closeSession(cas, mProgramCas);
+ }
+ for (size_t index = 0; index < mStreamPidToCasMap.size(); index++) {
+ closeSession(cas, mStreamPidToCasMap.editValueAt(index));
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+ATSParser::CasManager::CasManager() : mSystemId(-1) {}
+
+ATSParser::CasManager::~CasManager() {
+ // Explictly close the sessions opened by us, since the CAS object is owned
+ // by the app and may not go away after the parser is destroyed, and the app
+ // may not have information about the sessions.
+ if (mICas != NULL) {
+ for (size_t index = 0; index < mProgramCasMap.size(); index++) {
+ mProgramCasMap.editValueAt(index)->closeAllSessions(mICas);
+ }
+ }
+}
+
+bool ATSParser::CasManager::setSystemId(int32_t CA_system_ID) {
+ if (mSystemId == -1) {
+ // Verify the CA_system_ID is within range on the first program
+ if (CA_system_ID < 0 || CA_system_ID > 0xffff) {
+ ALOGE("Invalid CA_system_id: %d", CA_system_ID);
+ return false;
+ }
+ mSystemId = CA_system_ID;
+ } else if (mSystemId != CA_system_ID) {
+ // All sessions need to be under the same CA system
+ ALOGE("Multiple CA systems not allowed: %d vs %d",
+ mSystemId, CA_system_ID);
+ return false;
+ }
+ return true;
+}
+
+status_t ATSParser::CasManager::setMediaCas(const sp<ICas> &cas) {
+ if (cas == NULL) {
+ ALOGE("setMediaCas: received NULL object");
+ return BAD_VALUE;
+ }
+ if (mICas != NULL) {
+ ALOGW("setMediaCas: already set");
+ return ALREADY_EXISTS;
+ }
+ for (size_t index = 0; index < mProgramCasMap.size(); index++) {
+ status_t err;
+ if ((err = mProgramCasMap.editValueAt(
+ index)->setMediaCas(cas, mCAPidToSessionIdMap)) != OK) {
+ return err;
+ }
+ }
+ mICas = cas;
+ return OK;
+}
+
+bool ATSParser::CasManager::addProgram(
+ unsigned programNumber, const CADescriptor &descriptor) {
+ if (!setSystemId(descriptor.mSystemID)) {
+ return false;
+ }
+
+ ssize_t index = mProgramCasMap.indexOfKey(programNumber);
+ if (index < 0) {
+ ALOGV("addProgram: programNumber=%d, CA_system_ID=0x%x",
+ programNumber, descriptor.mSystemID);
+ mProgramCasMap.add(programNumber,
+ new ProgramCasManager(programNumber, descriptor));
+ mCAPidSet.insert(descriptor.mPID);
+ }
+ return true;
+}
+
+bool ATSParser::CasManager::addStream(
+ unsigned programNumber, unsigned elementaryPID,
+ const CADescriptor &descriptor) {
+ if (!setSystemId(descriptor.mSystemID)) {
+ return false;
+ }
+
+ ssize_t index = mProgramCasMap.indexOfKey(programNumber);
+ sp<ProgramCasManager> programCasManager;
+ if (index < 0) {
+ ALOGV("addProgram (no CADescriptor): programNumber=%d", programNumber);
+ programCasManager = new ProgramCasManager(programNumber);
+ mProgramCasMap.add(programNumber, programCasManager);
+ } else {
+ programCasManager = mProgramCasMap.editValueAt(index);
+ }
+ if (programCasManager->addStream(elementaryPID, descriptor)) {
+ mCAPidSet.insert(descriptor.mPID);
+ }
+ return true;
+}
+
+bool ATSParser::CasManager::getCasInfo(
+ unsigned programNumber, unsigned elementaryPID,
+ int32_t *systemId, sp<IDescrambler> *descrambler,
+ std::vector<uint8_t> *sessionId) const {
+ ssize_t index = mProgramCasMap.indexOfKey(programNumber);
+ if (index < 0) {
+ return false;
+ }
+ *systemId = mSystemId;
+ return mProgramCasMap[index]->getCasSession(
+ elementaryPID, descrambler, sessionId);
+}
+
+bool ATSParser::CasManager::isCAPid(unsigned pid) {
+ return mCAPidSet.find(pid) != mCAPidSet.end();
+}
+
+bool ATSParser::CasManager::parsePID(ABitReader *br, unsigned pid) {
+ ssize_t index = mCAPidToSessionIdMap.indexOfKey(pid);
+ if (index < 0) {
+ return false;
+ }
+ MediaCas::ParcelableCasData ecm(br->data(), br->numBitsLeft() / 8);
+ Status status = mICas->processEcm(mCAPidToSessionIdMap[index], ecm);
+ if (!status.isOk()) {
+ ALOGE("Failed to process ECM: exception=%d, error=%d",
+ status.exceptionCode(), status.serviceSpecificErrorCode());
+ }
+ return true; // handled
+}
+
+} // namespace android
diff --git a/media/libstagefright/mpeg2ts/CasManager.h b/media/libstagefright/mpeg2ts/CasManager.h
new file mode 100644
index 0000000..8088dec
--- /dev/null
+++ b/media/libstagefright/mpeg2ts/CasManager.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+
+#include "ATSParser.h"
+#include <utils/KeyedVector.h>
+#include <set>
+
+namespace android {
+namespace media {
+class ICas;
+class IDescrambler;
+}
+
+struct ATSParser::CasManager : public RefBase {
+ CasManager();
+ virtual ~CasManager();
+
+ status_t setMediaCas(const sp<ICas> &cas);
+
+ bool addProgram(
+ unsigned programNumber, const CADescriptor &descriptor);
+
+ bool addStream(
+ unsigned programNumber, unsigned elementaryPID,
+ const CADescriptor &descriptor);
+
+ bool getCasInfo(
+ unsigned programNumber, unsigned elementaryPID,
+ int32_t *systemId, sp<IDescrambler> *descrambler,
+ std::vector<uint8_t> *sessionId) const;
+
+ bool isCAPid(unsigned pid);
+
+ bool parsePID(ABitReader *br, unsigned pid);
+
+private:
+ typedef KeyedVector<unsigned, std::vector<uint8_t> > PidToSessionMap;
+ struct ProgramCasManager;
+
+ bool setSystemId(int32_t CA_system_ID);
+
+ int32_t mSystemId;
+ sp<ICas> mICas;
+ KeyedVector<unsigned, sp<ProgramCasManager> > mProgramCasMap;
+ PidToSessionMap mCAPidToSessionIdMap;
+ std::set<uint32_t> mCAPidSet;
+};
+
+} // namespace android
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 96ca405..f1b44ae 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -28,6 +28,8 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#include <media/cas/DescramblerAPI.h>
+#include <media/hardware/CryptoAPI.h>
#include "include/avc_utils.h"
@@ -39,7 +41,16 @@
ElementaryStreamQueue::ElementaryStreamQueue(Mode mode, uint32_t flags)
: mMode(mode),
mFlags(flags),
- mEOSReached(false) {
+ mEOSReached(false),
+ mCASystemId(0),
+ mAUIndex(0) {
+
+ ALOGV("ElementaryStreamQueue(%p) mode %x flags %x isScrambled %d isSampleEncrypted %d",
+ this, mode, flags, isScrambled(), isSampleEncrypted());
+
+ // Create the decryptor anyway since we don't know the use-case unless key is provided
+ // Won't decrypt if key info not available (e.g., scanner/extractor just parsing ts files)
+ mSampleDecryptor = isSampleEncrypted() ? new HlsSampleDecryptor : NULL;
}
sp<MetaData> ElementaryStreamQueue::getFormat() {
@@ -53,6 +64,11 @@
mRangeInfos.clear();
+ if (mScrambledBuffer != NULL) {
+ mScrambledBuffer->setRange(0, 0);
+ }
+ mScrambledRangeInfos.clear();
+
if (clearFormat) {
mFormat.clear();
}
@@ -60,6 +76,16 @@
mEOSReached = false;
}
+bool ElementaryStreamQueue::isScrambled() const {
+ return (mFlags & kFlag_ScrambledData) != 0;
+}
+
+void ElementaryStreamQueue::setCasInfo(
+ int32_t systemId, const std::vector<uint8_t> &sessionId) {
+ mCASystemId = systemId;
+ mCasSessionId = sessionId;
+}
+
// Parse AC3 header assuming the current ptr is start position of syncframe,
// update metadata only applicable, and return the payload size
static unsigned parseAC3SyncFrame(
@@ -246,7 +272,8 @@
}
status_t ElementaryStreamQueue::appendData(
- const void *data, size_t size, int64_t timeUs) {
+ const void *data, size_t size, int64_t timeUs,
+ int32_t payloadOffset, uint32_t pesScramblingControl) {
if (mEOSReached) {
ALOGE("appending data after EOS");
@@ -276,7 +303,7 @@
return ERROR_MALFORMED;
}
- if (startOffset > 0) {
+ if (mFormat == NULL && startOffset > 0) {
ALOGI("found something resembling an H.264/MPEG syncword "
"at offset %zd",
startOffset);
@@ -451,6 +478,8 @@
RangeInfo info;
info.mLength = size;
info.mTimestampUs = timeUs;
+ info.mPesOffset = payloadOffset;
+ info.mPesScramblingControl = pesScramblingControl;
mRangeInfos.push_back(info);
#if 0
@@ -463,8 +492,129 @@
return OK;
}
+void ElementaryStreamQueue::appendScrambledData(
+ const void *data, size_t size,
+ int32_t keyId, bool isSync,
+ sp<ABuffer> clearSizes, sp<ABuffer> encSizes) {
+ if (!isScrambled()) {
+ return;
+ }
+
+ size_t neededSize = (mScrambledBuffer == NULL ? 0 : mScrambledBuffer->size()) + size;
+ if (mScrambledBuffer == NULL || neededSize > mScrambledBuffer->capacity()) {
+ neededSize = (neededSize + 65535) & ~65535;
+
+ ALOGI("resizing scrambled buffer to size %zu", neededSize);
+
+ sp<ABuffer> buffer = new ABuffer(neededSize);
+ if (mScrambledBuffer != NULL) {
+ memcpy(buffer->data(), mScrambledBuffer->data(), mScrambledBuffer->size());
+ buffer->setRange(0, mScrambledBuffer->size());
+ } else {
+ buffer->setRange(0, 0);
+ }
+
+ mScrambledBuffer = buffer;
+ }
+ memcpy(mScrambledBuffer->data() + mScrambledBuffer->size(), data, size);
+ mScrambledBuffer->setRange(0, mScrambledBuffer->size() + size);
+
+ ScrambledRangeInfo scrambledInfo;
+ scrambledInfo.mLength = size;
+ scrambledInfo.mKeyId = keyId;
+ scrambledInfo.mIsSync = isSync;
+ scrambledInfo.mClearSizes = clearSizes;
+ scrambledInfo.mEncSizes = encSizes;
+
+ ALOGV("[stream %d] appending scrambled range: size=%zu", mMode, size);
+
+ mScrambledRangeInfos.push_back(scrambledInfo);
+}
+
+sp<ABuffer> ElementaryStreamQueue::dequeueScrambledAccessUnit() {
+ size_t nextScan = mBuffer->size();
+ mBuffer->setRange(0, 0);
+ int32_t pesOffset = 0, pesScramblingControl = 0;
+ int64_t timeUs = fetchTimestamp(nextScan, &pesOffset, &pesScramblingControl);
+ if (timeUs < 0ll) {
+ ALOGE("Negative timeUs");
+ return NULL;
+ }
+
+ // return scrambled unit
+ int32_t keyId = pesScramblingControl, isSync = 0, scrambledLength = 0;
+ sp<ABuffer> clearSizes, encSizes;
+ while (mScrambledRangeInfos.size() > mRangeInfos.size()) {
+ auto it = mScrambledRangeInfos.begin();
+ ALOGV("[stream %d] fetching scrambled range: size=%zu", mMode, it->mLength);
+
+ if (scrambledLength > 0) {
+ // This shouldn't happen since we always dequeue the entire PES.
+ ALOGW("Discarding srambled length %d", scrambledLength);
+ }
+ scrambledLength = it->mLength;
+
+ // TODO: handle key id change, use first non-zero keyId for now
+ if (keyId == 0) {
+ keyId = it->mKeyId;
+ }
+ clearSizes = it->mClearSizes;
+ encSizes = it->mEncSizes;
+ isSync = it->mIsSync;
+ mScrambledRangeInfos.erase(it);
+ }
+ if (scrambledLength == 0) {
+ ALOGE("[stream %d] empty scrambled unit!", mMode);
+ return NULL;
+ }
+
+ // skip the PES header, and copy the rest into scrambled access unit
+ sp<ABuffer> scrambledAccessUnit = ABuffer::CreateAsCopy(
+ mScrambledBuffer->data() + pesOffset,
+ scrambledLength - pesOffset);
+
+ // fix up first sample size after skipping the PES header
+ if (pesOffset > 0) {
+ int32_t &firstClearSize = *(int32_t*)clearSizes->data();
+ int32_t &firstEncSize = *(int32_t*)encSizes->data();
+ // Cut away the PES header
+ if (firstClearSize >= pesOffset) {
+ // This is for TS-level scrambling, we descrambled the first
+ // (or it was clear to begin with)
+ firstClearSize -= pesOffset;
+ } else if (firstEncSize >= pesOffset) {
+ // This can only be PES-level scrambling
+ firstEncSize -= pesOffset;
+ }
+ }
+
+ scrambledAccessUnit->meta()->setInt64("timeUs", timeUs);
+ if (isSync) {
+ scrambledAccessUnit->meta()->setInt32("isSync", 1);
+ }
+
+ // fill in CryptoInfo fields for AnotherPacketSource::read()
+ // MediaCas doesn't use cryptoMode, but set to non-zero value here.
+ scrambledAccessUnit->meta()->setInt32(
+ "cryptoMode", CryptoPlugin::kMode_AES_CTR);
+ scrambledAccessUnit->meta()->setInt32("cryptoKey", keyId);
+ scrambledAccessUnit->meta()->setBuffer("clearBytes", clearSizes);
+ scrambledAccessUnit->meta()->setBuffer("encBytes", encSizes);
+
+ memmove(mScrambledBuffer->data(),
+ mScrambledBuffer->data() + scrambledLength,
+ mScrambledBuffer->size() - scrambledLength);
+
+ mScrambledBuffer->setRange(0, mScrambledBuffer->size() - scrambledLength);
+
+ ALOGV("[stream %d] dequeued scrambled AU: timeUs=%lld, size=%zu",
+ mMode, (long long)timeUs, scrambledAccessUnit->size());
+
+ return scrambledAccessUnit;
+}
+
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnit() {
- if ((mFlags & kFlag_AlignedData) && mMode == H264) {
+ if ((mFlags & kFlag_AlignedData) && mMode == H264 && !isScrambled()) {
if (mRangeInfos.empty()) {
return NULL;
}
@@ -517,6 +667,9 @@
unsigned syncStartPos = 0; // in bytes
unsigned payloadSize = 0;
sp<MetaData> format = new MetaData;
+
+ ALOGV("dequeueAccessUnit_AC3[%d]: mBuffer %p(%zu)", mAUIndex, mBuffer->data(), mBuffer->size());
+
while (true) {
if (syncStartPos + 2 >= mBuffer->size()) {
return NULL;
@@ -529,6 +682,10 @@
if (payloadSize > 0) {
break;
}
+
+ ALOGV("dequeueAccessUnit_AC3[%d]: syncStartPos %u payloadSize %u",
+ mAUIndex, syncStartPos, payloadSize);
+
++syncStartPos;
}
@@ -541,14 +698,22 @@
mFormat = format;
}
- sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
- memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
if (timeUs < 0ll) {
ALOGE("negative timeUs");
return NULL;
}
+
+ // Not decrypting if key info not available (e.g., scanner/extractor parsing ts files)
+ if (mSampleDecryptor != NULL) {
+ mSampleDecryptor->processAC3(mBuffer->data() + syncStartPos, payloadSize);
+ }
+ mAUIndex++;
+
+ sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+ memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
accessUnit->meta()->setInt64("timeUs", timeUs);
accessUnit->meta()->setInt32("isSync", 1);
@@ -649,6 +814,17 @@
return NULL;
}
+ ALOGV("dequeueAccessUnit_AAC[%d]: mBuffer %zu info.mLength %zu",
+ mAUIndex, mBuffer->size(), info.mLength);
+
+ struct ADTSPosition {
+ size_t offset;
+ size_t headerSize;
+ size_t length;
+ };
+
+ Vector<ADTSPosition> frames;
+
// The idea here is consume all AAC frames starting at offsets before
// info.mLength so we can assign a meaningful timestamp without
// having to interpolate.
@@ -669,7 +845,7 @@
return NULL;
}
bits.skipBits(3); // ID, layer
- bool protection_absent __unused = bits.getBits(1) != 0;
+ bool protection_absent = bits.getBits(1) != 0;
if (mFormat == NULL) {
unsigned profile = bits.getBits(2);
@@ -731,11 +907,36 @@
return NULL;
}
- size_t headerSize __unused = protection_absent ? 7 : 9;
+ size_t headerSize = protection_absent ? 7 : 9;
+
+ // tracking the frame positions first then decrypt only if an accessUnit to be generated
+ if (mSampleDecryptor != NULL) {
+ ADTSPosition frame = {
+ .offset = offset,
+ .headerSize = headerSize,
+ .length = aac_frame_length
+ };
+
+ frames.push(frame);
+ }
offset += aac_frame_length;
}
+ // Decrypting only if the loop didn't exit early and an accessUnit is about to be generated
+ // Not decrypting if key info not available (e.g., scanner/extractor parsing ts files)
+ if (mSampleDecryptor != NULL) {
+ for (size_t frameId = 0; frameId < frames.size(); frameId++) {
+ const ADTSPosition &frame = frames.itemAt(frameId);
+
+ mSampleDecryptor->processAAC(frame.headerSize,
+ mBuffer->data() + frame.offset, frame.length);
+// ALOGV("dequeueAccessUnitAAC[%zu]: while offset %zu headerSize %zu frame_len %zu",
+// frameId, frame.offset, frame.headerSize, frame.length);
+ }
+ }
+ mAUIndex++;
+
int64_t timeUs = fetchTimestamp(offset);
sp<ABuffer> accessUnit = new ABuffer(offset);
@@ -751,7 +952,8 @@
return accessUnit;
}
-int64_t ElementaryStreamQueue::fetchTimestamp(size_t size) {
+int64_t ElementaryStreamQueue::fetchTimestamp(
+ size_t size, int32_t *pesOffset, int32_t *pesScramblingControl) {
int64_t timeUs = -1;
bool first = true;
@@ -764,6 +966,12 @@
if (first) {
timeUs = info->mTimestampUs;
+ if (pesOffset != NULL) {
+ *pesOffset = info->mPesOffset;
+ }
+ if (pesScramblingControl != NULL) {
+ *pesScramblingControl = info->mPesScramblingControl;
+ }
first = false;
}
@@ -787,6 +995,27 @@
}
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() {
+ if (isScrambled()) {
+ if (mBuffer == NULL || mBuffer->size() == 0) {
+ return NULL;
+ }
+ if (mFormat == NULL) {
+ mFormat = MakeAVCCodecSpecificData(mBuffer);
+ if (mFormat == NULL) {
+ ALOGI("Creating dummy AVC format for scrambled content");
+ mFormat = new MetaData;
+ mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+ mFormat->setInt32(kKeyWidth, 1280);
+ mFormat->setInt32(kKeyHeight, 720);
+ }
+ // for MediaExtractor.CasInfo
+ mFormat->setInt32(kKeyCASystemID, mCASystemId);
+ mFormat->setData(kKeyCASessionID, 0,
+ mCasSessionId.data(), mCasSessionId.size());
+ }
+ return dequeueScrambledAccessUnit();
+ }
+
const uint8_t *data = mBuffer->data();
size_t size = mBuffer->size();
@@ -800,6 +1029,9 @@
size_t nalSize;
bool foundSlice = false;
bool foundIDR = false;
+
+ ALOGV("dequeueAccessUnit_H264[%d] %p/%zu", mAUIndex, data, size);
+
while ((err = getNextNALUnit(&data, &size, &nalStart, &nalSize)) == OK) {
if (nalSize == 0) continue;
@@ -811,6 +1043,7 @@
foundIDR = true;
}
if (foundSlice) {
+ //TODO: Shouldn't this have been called with nalSize-1?
ABitReader br(nalStart + 1, nalSize);
unsigned first_mb_in_slice = parseUE(&br);
@@ -851,6 +1084,7 @@
size_t dstOffset = 0;
size_t seiIndex = 0;
+ size_t shrunkBytes = 0;
for (size_t i = 0; i < nals.size(); ++i) {
const NALPosition &pos = nals.itemAt(i);
@@ -877,11 +1111,30 @@
memcpy(accessUnit->data() + dstOffset, "\x00\x00\x00\x01", 4);
- memcpy(accessUnit->data() + dstOffset + 4,
- mBuffer->data() + pos.nalOffset,
- pos.nalSize);
+ if (mSampleDecryptor != NULL && (nalType == 1 || nalType == 5)) {
+ uint8_t *nalData = mBuffer->data() + pos.nalOffset;
+ size_t newSize = mSampleDecryptor->processNal(nalData, pos.nalSize);
+ // Note: the data can shrink due to unescaping
+ memcpy(accessUnit->data() + dstOffset + 4,
+ nalData,
+ newSize);
+ dstOffset += newSize + 4;
- dstOffset += pos.nalSize + 4;
+ size_t thisShrunkBytes = pos.nalSize - newSize;
+ //ALOGV("dequeueAccessUnitH264[%d]: nalType: %d -> %zu (%zu)",
+ // nalType, (int)pos.nalSize, newSize, thisShrunkBytes);
+
+ shrunkBytes += thisShrunkBytes;
+ }
+ else {
+ memcpy(accessUnit->data() + dstOffset + 4,
+ mBuffer->data() + pos.nalOffset,
+ pos.nalSize);
+
+ dstOffset += pos.nalSize + 4;
+ //ALOGV("dequeueAccessUnitH264 [%d] %d @%d",
+ // nalType, (int)pos.nalSize, (int)pos.nalOffset);
+ }
}
#if !LOG_NDEBUG
@@ -912,6 +1165,18 @@
mFormat = MakeAVCCodecSpecificData(accessUnit);
}
+ if (mSampleDecryptor != NULL && shrunkBytes > 0) {
+ size_t adjustedSize = accessUnit->size() - shrunkBytes;
+ ALOGV("dequeueAccessUnitH264[%d]: AU size adjusted %zu -> %zu",
+ mAUIndex, accessUnit->size(), adjustedSize);
+ accessUnit->setRange(0, adjustedSize);
+ }
+
+ ALOGV("dequeueAccessUnitH264[%d]: AU %p(%zu) dstOffset:%zu, nals:%zu, totalSize:%zu ",
+ mAUIndex, accessUnit->data(), accessUnit->size(),
+ dstOffset, nals.size(), totalSize);
+ mAUIndex++;
+
return accessUnit;
}
@@ -1045,6 +1310,25 @@
}
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitMPEGVideo() {
+ if (isScrambled()) {
+ if (mBuffer == NULL || mBuffer->size() == 0) {
+ return NULL;
+ }
+ if (mFormat == NULL) {
+ ALOGI("Creating dummy MPEG format for scrambled content");
+ mFormat = new MetaData;
+ mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
+ mFormat->setInt32(kKeyWidth, 1280);
+ mFormat->setInt32(kKeyHeight, 720);
+
+ // for MediaExtractor.CasInfo
+ mFormat->setInt32(kKeyCASystemID, mCASystemId);
+ mFormat->setData(kKeyCASessionID, 0,
+ mCasSessionId.data(), mCasSessionId.size());
+ }
+ return dequeueScrambledAccessUnit();
+ }
+
const uint8_t *data = mBuffer->data();
size_t size = mBuffer->size();
@@ -1423,4 +1707,15 @@
return accessUnit;
}
+void ElementaryStreamQueue::signalNewSampleAesKey(const sp<AMessage> &keyItem) {
+ if (mSampleDecryptor == NULL) {
+ ALOGE("signalNewSampleAesKey: Stream %x is not encrypted; keyItem: %p",
+ mMode, keyItem.get());
+ return;
+ }
+
+ mSampleDecryptor->signalNewSampleAesKey(keyItem);
+}
+
+
} // namespace android
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index 56f0706..ffcb502 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -19,9 +19,13 @@
#define ES_QUEUE_H_
#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AMessage.h>
#include <utils/Errors.h>
#include <utils/List.h>
#include <utils/RefBase.h>
+#include <vector>
+
+#include "HlsSampleDecryptor.h"
namespace android {
@@ -30,6 +34,7 @@
struct ElementaryStreamQueue {
enum Mode {
+ INVALID = 0,
H264,
AAC,
AC3,
@@ -43,10 +48,20 @@
enum Flags {
// Data appended to the queue is always at access unit boundaries.
kFlag_AlignedData = 1,
+ kFlag_ScrambledData = 2,
+ kFlag_SampleEncryptedData = 4,
};
explicit ElementaryStreamQueue(Mode mode, uint32_t flags = 0);
- status_t appendData(const void *data, size_t size, int64_t timeUs);
+ status_t appendData(const void *data, size_t size,
+ int64_t timeUs, int32_t payloadOffset = 0,
+ uint32_t pesScramblingControl = 0);
+
+ void appendScrambledData(
+ const void *data, size_t size,
+ int32_t keyId, bool isSync,
+ sp<ABuffer> clearSizes, sp<ABuffer> encSizes);
+
void signalEOS();
void clear(bool clearFormat);
@@ -54,10 +69,27 @@
sp<MetaData> getFormat();
+ bool isScrambled() const;
+
+ void setCasInfo(int32_t systemId, const std::vector<uint8_t> &sessionId);
+
+ void signalNewSampleAesKey(const sp<AMessage> &keyItem);
+
private:
struct RangeInfo {
int64_t mTimestampUs;
size_t mLength;
+ int32_t mPesOffset;
+ uint32_t mPesScramblingControl;
+ };
+
+ struct ScrambledRangeInfo {
+ //int64_t mTimestampUs;
+ size_t mLength;
+ int32_t mKeyId;
+ int32_t mIsSync;
+ sp<ABuffer> mClearSizes;
+ sp<ABuffer> mEncSizes;
};
Mode mMode;
@@ -67,8 +99,20 @@
sp<ABuffer> mBuffer;
List<RangeInfo> mRangeInfos;
+ sp<ABuffer> mScrambledBuffer;
+ List<ScrambledRangeInfo> mScrambledRangeInfos;
+ int32_t mCASystemId;
+ std::vector<uint8_t> mCasSessionId;
+
sp<MetaData> mFormat;
+ sp<HlsSampleDecryptor> mSampleDecryptor;
+ int mAUIndex;
+
+ bool isSampleEncrypted() const {
+ return (mFlags & kFlag_SampleEncryptedData) != 0;
+ }
+
sp<ABuffer> dequeueAccessUnitH264();
sp<ABuffer> dequeueAccessUnitAAC();
sp<ABuffer> dequeueAccessUnitAC3();
@@ -80,7 +124,11 @@
// consume a logical (compressed) access unit of size "size",
// returns its timestamp in us (or -1 if no time information).
- int64_t fetchTimestamp(size_t size);
+ int64_t fetchTimestamp(size_t size,
+ int32_t *pesOffset = NULL,
+ int32_t *pesScramblingControl = NULL);
+
+ sp<ABuffer> dequeueScrambledAccessUnit();
DISALLOW_EVIL_CONSTRUCTORS(ElementaryStreamQueue);
};
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
new file mode 100644
index 0000000..e32f676
--- /dev/null
+++ b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.cpp
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "HlsSampleDecryptor"
+
+#include "HlsSampleDecryptor.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/Utils.h>
+
+
+namespace android {
+
+HlsSampleDecryptor::HlsSampleDecryptor()
+ : mValidKeyInfo(false) {
+}
+
+HlsSampleDecryptor::HlsSampleDecryptor(const sp<AMessage> &sampleAesKeyItem)
+ : mValidKeyInfo(false) {
+
+ signalNewSampleAesKey(sampleAesKeyItem);
+}
+
+void HlsSampleDecryptor::signalNewSampleAesKey(const sp<AMessage> &sampleAesKeyItem) {
+
+ if (sampleAesKeyItem == NULL) {
+ mValidKeyInfo = false;
+ ALOGW("signalNewSampleAesKey: sampleAesKeyItem is NULL");
+ return;
+ }
+
+ sp<ABuffer> keyDataBuffer, initVecBuffer;
+ sampleAesKeyItem->findBuffer("keyData", &keyDataBuffer);
+ sampleAesKeyItem->findBuffer("initVec", &initVecBuffer);
+
+ if (keyDataBuffer != NULL && keyDataBuffer->size() == AES_BLOCK_SIZE &&
+ initVecBuffer != NULL && initVecBuffer->size() == AES_BLOCK_SIZE) {
+
+ ALOGV("signalNewSampleAesKey: Key: %s IV: %s",
+ aesBlockToStr(keyDataBuffer->data()).c_str(),
+ aesBlockToStr(initVecBuffer->data()).c_str());
+
+ uint8_t KeyData[AES_BLOCK_SIZE];
+ memcpy(KeyData, keyDataBuffer->data(), AES_BLOCK_SIZE);
+ memcpy(mAESInitVec, initVecBuffer->data(), AES_BLOCK_SIZE);
+
+ mValidKeyInfo = (AES_set_decrypt_key(KeyData, 8*AES_BLOCK_SIZE/*128*/, &mAesKey) == 0);
+ if (!mValidKeyInfo) {
+ ALOGE("signalNewSampleAesKey: failed to set AES decryption key.");
+ }
+
+ } else {
+ // Media scanner might try extract/parse the TS files without knowing the key.
+ // Otherwise, shouldn't get here (unless an invalid playlist has swaped SAMPLE-AES with
+ // NONE method while still sample-encrypted stream is parsed).
+
+ mValidKeyInfo = false;
+ ALOGE("signalNewSampleAesKey Can't decrypt; keyDataBuffer: %p(%zu) initVecBuffer: %p(%zu)",
+ keyDataBuffer.get(), (keyDataBuffer.get() == NULL)? -1 : keyDataBuffer->size(),
+ initVecBuffer.get(), (initVecBuffer.get() == NULL)? -1 : initVecBuffer->size());
+ }
+}
+
+size_t HlsSampleDecryptor::processNal(uint8_t *nalData, size_t nalSize) {
+
+ unsigned nalType = nalData[0] & 0x1f;
+ if (!mValidKeyInfo) {
+ ALOGV("processNal[%d]: (%p)/%zu Skipping due to invalid key", nalType, nalData, nalSize);
+ return nalSize;
+ }
+
+ bool isEncrypted = (nalSize > VIDEO_CLEAR_LEAD + AES_BLOCK_SIZE);
+ ALOGV("processNal[%d]: (%p)/%zu isEncrypted: %d", nalType, nalData, nalSize, isEncrypted);
+
+ if (isEncrypted) {
+ // Encrypted NALUs have extra start code emulation prevention that must be
+ // stripped out before we can decrypt it.
+ size_t newSize = unescapeStream(nalData, nalSize);
+
+ ALOGV("processNal:unescapeStream[%d]: %zu -> %zu", nalType, nalSize, newSize);
+ nalSize = newSize;
+
+ //Encrypted_nal_unit () {
+ // nal_unit_type_byte // 1 byte
+ // unencrypted_leader // 31 bytes
+ // while (bytes_remaining() > 0) {
+ // if (bytes_remaining() > 16) {
+ // encrypted_block // 16 bytes
+ // }
+ // unencrypted_block // MIN(144, bytes_remaining()) bytes
+ // }
+ //}
+
+ size_t offset = VIDEO_CLEAR_LEAD;
+ size_t remainingBytes = nalSize - VIDEO_CLEAR_LEAD;
+
+ // a copy of initVec as decryptBlock updates it
+ unsigned char AESInitVec[AES_BLOCK_SIZE];
+ memcpy(AESInitVec, mAESInitVec, AES_BLOCK_SIZE);
+
+ while (remainingBytes > 0) {
+ // encrypted_block: protected block uses 10% skip encryption
+ if (remainingBytes > AES_BLOCK_SIZE) {
+ uint8_t *encrypted = nalData + offset;
+ status_t ret = decryptBlock(encrypted, AES_BLOCK_SIZE, AESInitVec);
+ if (ret != OK) {
+ ALOGE("processNal failed with %d", ret);
+ return nalSize; // revisit this
+ }
+
+ offset += AES_BLOCK_SIZE;
+ remainingBytes -= AES_BLOCK_SIZE;
+ }
+
+ // unencrypted_block
+ size_t clearBytes = std::min(remainingBytes, (size_t)(9 * AES_BLOCK_SIZE));
+
+ offset += clearBytes;
+ remainingBytes -= clearBytes;
+ } // while
+
+ } else { // isEncrypted == false
+ ALOGV("processNal[%d]: Unencrypted NALU (%p)/%zu", nalType, nalData, nalSize);
+ }
+
+ return nalSize;
+}
+
+void HlsSampleDecryptor::processAAC(size_t adtsHdrSize, uint8_t *data, size_t size) {
+
+ if (!mValidKeyInfo) {
+ ALOGV("processAAC: (%p)/%zu Skipping due to invalid key", data, size);
+ return;
+ }
+
+ // ADTS header is included in the size
+ size_t offset = adtsHdrSize;
+ size_t remainingBytes = size - adtsHdrSize;
+
+ bool isEncrypted = (remainingBytes >= AUDIO_CLEAR_LEAD + AES_BLOCK_SIZE);
+ ALOGV("processAAC: header: %zu data: %p(%zu) isEncrypted: %d",
+ adtsHdrSize, data, size, isEncrypted);
+
+ //Encrypted_AAC_Frame () {
+ // ADTS_Header // 7 or 9 bytes
+ // unencrypted_leader // 16 bytes
+ // while (bytes_remaining() >= 16) {
+ // encrypted_block // 16 bytes
+ // }
+ // unencrypted_trailer // 0-15 bytes
+ //}
+
+ // with lead bytes
+ if (remainingBytes >= AUDIO_CLEAR_LEAD) {
+ offset += AUDIO_CLEAR_LEAD;
+ remainingBytes -= AUDIO_CLEAR_LEAD;
+
+ // encrypted_block
+ if (remainingBytes >= AES_BLOCK_SIZE) {
+
+ size_t encryptedBytes = (remainingBytes / AES_BLOCK_SIZE) * AES_BLOCK_SIZE;
+ unsigned char AESInitVec[AES_BLOCK_SIZE];
+ memcpy(AESInitVec, mAESInitVec, AES_BLOCK_SIZE);
+
+ // decrypting all blocks at once
+ uint8_t *encrypted = data + offset;
+ status_t ret = decryptBlock(encrypted, encryptedBytes, AESInitVec);
+ if (ret != OK) {
+ ALOGE("processAAC: decryptBlock failed with %d", ret);
+ return;
+ }
+
+ offset += encryptedBytes;
+ remainingBytes -= encryptedBytes;
+ } // encrypted
+
+ // unencrypted_trailer
+ size_t clearBytes = remainingBytes;
+ if (clearBytes > 0) {
+ CHECK(clearBytes < AES_BLOCK_SIZE);
+ }
+
+ } else { // without lead bytes
+ ALOGV("processAAC: Unencrypted frame (without lead bytes) size %zu = %zu (hdr) + %zu (rem)",
+ size, adtsHdrSize, remainingBytes);
+ }
+
+}
+
+void HlsSampleDecryptor::processAC3(uint8_t *data, size_t size) {
+
+ if (!mValidKeyInfo) {
+ ALOGV("processAC3: (%p)/%zu Skipping due to invalid key", data, size);
+ return;
+ }
+
+ bool isEncrypted = (size >= AUDIO_CLEAR_LEAD + AES_BLOCK_SIZE);
+ ALOGV("processAC3 %p(%zu) isEncrypted: %d", data, size, isEncrypted);
+
+ //Encrypted_AC3_Frame () {
+ // unencrypted_leader // 16 bytes
+ // while (bytes_remaining() >= 16) {
+ // encrypted_block // 16 bytes
+ // }
+ // unencrypted_trailer // 0-15 bytes
+ //}
+
+ if (size >= AUDIO_CLEAR_LEAD) {
+ // unencrypted_leader
+ size_t offset = AUDIO_CLEAR_LEAD;
+ size_t remainingBytes = size - AUDIO_CLEAR_LEAD;
+
+ if (remainingBytes >= AES_BLOCK_SIZE) {
+
+ size_t encryptedBytes = (remainingBytes / AES_BLOCK_SIZE) * AES_BLOCK_SIZE;
+
+ // encrypted_block
+ unsigned char AESInitVec[AES_BLOCK_SIZE];
+ memcpy(AESInitVec, mAESInitVec, AES_BLOCK_SIZE);
+
+ // decrypting all blocks at once
+ uint8_t *encrypted = data + offset;
+ status_t ret = decryptBlock(encrypted, encryptedBytes, AESInitVec);
+ if (ret != OK) {
+ ALOGE("processAC3: decryptBlock failed with %d", ret);
+ return;
+ }
+
+ offset += encryptedBytes;
+ remainingBytes -= encryptedBytes;
+ } // encrypted
+
+ // unencrypted_trailer
+ size_t clearBytes = remainingBytes;
+ if (clearBytes > 0) {
+ CHECK(clearBytes < AES_BLOCK_SIZE);
+ }
+
+ } else {
+ ALOGV("processAC3: Unencrypted frame (without lead bytes) size %zu", size);
+ }
+}
+
+// Unescapes data replacing occurrences of [0, 0, 3] with [0, 0] and returns the new size
+size_t HlsSampleDecryptor::unescapeStream(uint8_t *data, size_t limit) const {
+ Vector<size_t> scratchEscapePositions;
+ size_t position = 0;
+
+ while (position < limit) {
+ position = findNextUnescapeIndex(data, position, limit);
+ if (position < limit) {
+ scratchEscapePositions.add(position);
+ position += 3;
+ }
+ }
+
+ size_t scratchEscapeCount = scratchEscapePositions.size();
+ size_t escapedPosition = 0; // The position being read from.
+ size_t unescapedPosition = 0; // The position being written to.
+ for (size_t i = 0; i < scratchEscapeCount; i++) {
+ size_t nextEscapePosition = scratchEscapePositions[i];
+ //TODO: add 2 and get rid of the later = 0 assignments
+ size_t copyLength = nextEscapePosition - escapedPosition;
+ memmove(data+unescapedPosition, data+escapedPosition, copyLength);
+ unescapedPosition += copyLength;
+ data[unescapedPosition++] = 0;
+ data[unescapedPosition++] = 0;
+ escapedPosition += copyLength + 3;
+ }
+
+ size_t unescapedLength = limit - scratchEscapeCount;
+ size_t remainingLength = unescapedLength - unescapedPosition;
+ memmove(data+unescapedPosition, data+escapedPosition, remainingLength);
+
+ return unescapedLength;
+}
+
+size_t HlsSampleDecryptor::findNextUnescapeIndex(uint8_t *data, size_t offset, size_t limit) const {
+ for (size_t i = offset; i < limit - 2; i++) {
+ //TODO: speed
+ if (data[i] == 0x00 && data[i + 1] == 0x00 && data[i + 2] == 0x03) {
+ return i;
+ }
+ }
+ return limit;
+}
+
+status_t HlsSampleDecryptor::decryptBlock(uint8_t *buffer, size_t size,
+ uint8_t AESInitVec[AES_BLOCK_SIZE]) {
+ if (size == 0) {
+ return OK;
+ }
+
+ if ((size % AES_BLOCK_SIZE) != 0) {
+ ALOGE("decryptBlock: size (%zu) not a multiple of block size", size);
+ return ERROR_MALFORMED;
+ }
+
+ ALOGV("decryptBlock: %p (%zu)", buffer, size);
+
+ AES_cbc_encrypt(buffer, buffer, size, &mAesKey, AESInitVec, AES_DECRYPT);
+
+ return OK;
+}
+
+AString HlsSampleDecryptor::aesBlockToStr(uint8_t block[AES_BLOCK_SIZE]) {
+ AString result;
+
+ if (block == NULL) {
+ result = AString("null");
+ } else {
+ result = AStringPrintf("0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X",
+ block[0], block[1], block[2], block[3], block[4], block[5], block[6], block[7],
+ block[8], block[9], block[10], block[11], block[12], block[13], block[14], block[15]);
+ }
+
+ return result;
+}
+
+
+} // namespace android
diff --git a/media/libstagefright/mpeg2ts/HlsSampleDecryptor.h b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.h
new file mode 100644
index 0000000..2c76620
--- /dev/null
+++ b/media/libstagefright/mpeg2ts/HlsSampleDecryptor.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SAMPLE_AES_PROCESSOR_H_
+
+#define SAMPLE_AES_PROCESSOR_H_
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <openssl/aes.h>
+
+#include <utils/Errors.h>
+#include <utils/List.h>
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+struct HlsSampleDecryptor : RefBase {
+
+ HlsSampleDecryptor();
+ explicit HlsSampleDecryptor(const sp<AMessage> &sampleAesKeyItem);
+
+ void signalNewSampleAesKey(const sp<AMessage> &sampleAesKeyItem);
+
+ size_t processNal(uint8_t *nalData, size_t nalSize);
+ void processAAC(size_t adtsHdrSize, uint8_t *data, size_t size);
+ void processAC3(uint8_t *data, size_t size);
+
+ static AString aesBlockToStr(uint8_t block[AES_BLOCK_SIZE]);
+
+private:
+ size_t unescapeStream(uint8_t *data, size_t limit) const;
+ size_t findNextUnescapeIndex(uint8_t *data, size_t offset, size_t limit) const;
+ status_t decryptBlock(uint8_t *buffer, size_t size, uint8_t AESInitVec[AES_BLOCK_SIZE]);
+
+ static const int VIDEO_CLEAR_LEAD = 32;
+ static const int AUDIO_CLEAR_LEAD = 16;
+
+ AES_KEY mAesKey;
+ uint8_t mAESInitVec[AES_BLOCK_SIZE];
+ bool mValidKeyInfo;
+
+ DISALLOW_EVIL_CONSTRUCTORS(HlsSampleDecryptor);
+};
+
+} // namespace android
+
+#endif // SAMPLE_AES_PROCESSOR_H_
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index fb5e079..c3f1274 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -26,6 +26,7 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
@@ -40,6 +41,8 @@
namespace android {
static const size_t kTSPacketSize = 188;
+static const int kMaxDurationReadSize = 250000LL;
+static const int kMaxDurationRetry = 6;
struct MPEG2TSSource : public MediaSource {
MPEG2TSSource(
@@ -145,12 +148,46 @@
return meta;
}
+//static
+bool MPEG2TSExtractor::isScrambledFormat(const sp<MetaData> &format) {
+ const char *mime;
+ return format->findCString(kKeyMIMEType, &mime)
+ && (!strcasecmp(MEDIA_MIMETYPE_VIDEO_SCRAMBLED, mime)
+ || !strcasecmp(MEDIA_MIMETYPE_AUDIO_SCRAMBLED, mime));
+}
+
+status_t MPEG2TSExtractor::setMediaCas(const sp<ICas> &cas) {
+ ALOGD("setMediaCas: %p", cas.get());
+
+ status_t err = mParser->setMediaCas(cas);
+ if (err == OK) {
+ ALOGI("All tracks now have descramblers");
+ init();
+ }
+ return err;
+}
+
+void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
+ bool found = false;
+ for (size_t i = 0; i < mSourceImpls.size(); i++) {
+ if (mSourceImpls[i] == impl) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ mSourceImpls.push(impl);
+ }
+}
+
void MPEG2TSExtractor::init() {
bool haveAudio = false;
bool haveVideo = false;
int64_t startTime = ALooper::GetNowUs();
- while (feedMore(true /* isInit */) == OK) {
+ status_t err;
+ while ((err = feedMore(true /* isInit */)) == OK
+ || err == ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED) {
if (haveAudio && haveVideo) {
addSyncPoint_l(mLastSyncEvent);
mLastSyncEvent.reset();
@@ -162,10 +199,15 @@
ATSParser::VIDEO).get();
if (impl != NULL) {
- haveVideo = true;
- mSourceImpls.push(impl);
- mSyncPoints.push();
- mSeekSyncPoints = &mSyncPoints.editTop();
+ sp<MetaData> format = impl->getFormat();
+ if (format != NULL) {
+ haveVideo = true;
+ addSource(impl);
+ if (!isScrambledFormat(format)) {
+ mSyncPoints.push();
+ mSeekSyncPoints = &mSyncPoints.editTop();
+ }
+ }
}
}
@@ -175,11 +217,16 @@
ATSParser::AUDIO).get();
if (impl != NULL) {
- haveAudio = true;
- mSourceImpls.push(impl);
- mSyncPoints.push();
- if (!haveVideo) {
- mSeekSyncPoints = &mSyncPoints.editTop();
+ sp<MetaData> format = impl->getFormat();
+ if (format != NULL) {
+ haveAudio = true;
+ addSource(impl);
+ if (!isScrambledFormat(format)) {
+ mSyncPoints.push();
+ if (!haveVideo) {
+ mSeekSyncPoints = &mSyncPoints.editTop();
+ }
+ }
}
}
}
@@ -187,6 +234,16 @@
addSyncPoint_l(mLastSyncEvent);
mLastSyncEvent.reset();
+ // ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED is returned when the mpeg2ts
+ // is scrambled but we don't have a MediaCas object set. The extraction
+ // will only continue when setMediaCas() is called successfully.
+ if (err == ERROR_DRM_DECRYPT_UNIT_NOT_INITIALIZED) {
+ ALOGI("stopped parsing scrambled content, "
+ "haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
+ haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
+ return;
+ }
+
// Wait only for 2 seconds to detect audio/video streams.
if (ALooper::GetNowUs() - startTime > 2000000ll) {
break;
@@ -212,23 +269,22 @@
- mSeekSyncPoints->keyAt(0);
off64_t diffOffset = mSeekSyncPoints->valueAt(prevSyncSize - 1)
- mSeekSyncPoints->valueAt(0);
- durationUs = size * diffUs / diffOffset;
- durations.push_back(durationUs);
+ int64_t currentDurationUs = size * diffUs / diffOffset;
+ durations.push_back(currentDurationUs);
if (durations.size() > 5) {
durations.erase(durations.begin());
int64_t min = *durations.begin();
int64_t max = *durations.begin();
- for (List<int64_t>::iterator i = durations.begin();
- i != durations.end();
- ++i) {
- if (min > *i) {
- min = *i;
+ for (auto duration : durations) {
+ if (min > duration) {
+ min = duration;
}
- if (max < *i) {
- max = *i;
+ if (max < duration) {
+ max = duration;
}
}
if (max - min < 500 * 1000) {
+ durationUs = currentDurationUs;
break;
}
}
@@ -244,6 +300,8 @@
const sp<MetaData> meta = impl->getFormat();
meta->setInt64(kKeyDuration, durationUs);
impl->setFormat(meta);
+ } else {
+ estimateDurationsFromTimesUsAtEnd();
}
}
@@ -302,6 +360,106 @@
}
}
+status_t MPEG2TSExtractor::estimateDurationsFromTimesUsAtEnd() {
+ if (!(mDataSource->flags() & DataSource::kIsLocalFileSource)) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ off64_t size = 0;
+ status_t err = mDataSource->getSize(&size);
+ if (err != OK) {
+ return err;
+ }
+
+ uint8_t packet[kTSPacketSize];
+ const off64_t zero = 0;
+ off64_t offset = max(zero, size - kMaxDurationReadSize);
+ if (mDataSource->readAt(offset, &packet, 0) < 0) {
+ return ERROR_IO;
+ }
+
+ int retry = 0;
+ bool allDurationsFound = false;
+ int64_t timeAnchorUs = mParser->getFirstPTSTimeUs();
+ do {
+ int bytesRead = 0;
+ sp<ATSParser> parser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
+ ATSParser::SyncEvent ev(0);
+ offset = max(zero, size - (kMaxDurationReadSize << retry));
+ offset = (offset / kTSPacketSize) * kTSPacketSize;
+ for (;;) {
+ if (bytesRead >= kMaxDurationReadSize << max(0, retry - 1)) {
+ break;
+ }
+
+ ssize_t n = mDataSource->readAt(offset, packet, kTSPacketSize);
+ if (n < 0) {
+ return n;
+ } else if (n < (ssize_t)kTSPacketSize) {
+ break;
+ }
+
+ offset += kTSPacketSize;
+ bytesRead += kTSPacketSize;
+ err = parser->feedTSPacket(packet, kTSPacketSize, &ev);
+ if (err != OK) {
+ return err;
+ }
+
+ if (ev.hasReturnedData()) {
+ int64_t durationUs = ev.getTimeUs();
+ ATSParser::SourceType type = ev.getType();
+ ev.reset();
+
+ int64_t firstTimeUs;
+ sp<AnotherPacketSource> src =
+ (AnotherPacketSource *)mParser->getSource(type).get();
+ if (src == NULL || src->nextBufferTime(&firstTimeUs) != OK) {
+ continue;
+ }
+ durationUs += src->getEstimatedBufferDurationUs();
+ durationUs -= timeAnchorUs;
+ durationUs -= firstTimeUs;
+ if (durationUs > 0) {
+ int64_t origDurationUs, lastDurationUs;
+ const sp<MetaData> meta = src->getFormat();
+ const uint32_t kKeyLastDuration = 'ldur';
+ // Require two consecutive duration calculations to be within 1 sec before
+ // updating; use MetaData to store previous duration estimate in per-stream
+ // context.
+ if (!meta->findInt64(kKeyDuration, &origDurationUs)
+ || !meta->findInt64(kKeyLastDuration, &lastDurationUs)
+ || (origDurationUs < durationUs
+ && abs(durationUs - lastDurationUs) < 60000000)) {
+ meta->setInt64(kKeyDuration, durationUs);
+ }
+ meta->setInt64(kKeyLastDuration, durationUs);
+ }
+ }
+ }
+
+ if (!allDurationsFound) {
+ allDurationsFound = true;
+ for (auto t: {ATSParser::VIDEO, ATSParser::AUDIO}) {
+ sp<AnotherPacketSource> src = (AnotherPacketSource *)mParser->getSource(t).get();
+ if (src == NULL) {
+ continue;
+ }
+ int64_t durationUs;
+ const sp<MetaData> meta = src->getFormat();
+ if (!meta->findInt64(kKeyDuration, &durationUs)) {
+ allDurationsFound = false;
+ break;
+ }
+ }
+ }
+
+ ++retry;
+ } while(!allDurationsFound && offset > 0 && retry <= kMaxDurationRetry);
+
+ return allDurationsFound? OK : ERROR_UNSUPPORTED;
+}
+
uint32_t MPEG2TSExtractor::flags() const {
return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
}
diff --git a/media/libstagefright/omx/1.0/Conversion.h b/media/libstagefright/omx/1.0/Conversion.h
new file mode 100644
index 0000000..fd91574
--- /dev/null
+++ b/media/libstagefright/omx/1.0/Conversion.h
@@ -0,0 +1,2179 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
+
+#include <vector>
+#include <list>
+
+#include <unistd.h>
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <hidlmemory/mapping.h>
+
+#include <binder/Binder.h>
+#include <binder/Status.h>
+#include <ui/FenceTime.h>
+#include <media/OMXFenceParcelable.h>
+#include <cutils/native_handle.h>
+#include <gui/IGraphicBufferProducer.h>
+
+#include <media/OMXBuffer.h>
+#include <VideoAPI.h>
+
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
+#include <android/hardware/media/omx/1.0/types.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+
+#include <android/IGraphicBufferSource.h>
+#include <android/IOMXBufferSource.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::String8;
+using ::android::OMXFenceParcelable;
+
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::omx_message;
+
+using ::android::hardware::media::omx::V1_0::ColorAspects;
+using ::android::hardware::media::V1_0::Rect;
+using ::android::hardware::media::V1_0::Region;
+
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+
+using ::android::OMXBuffer;
+
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::GraphicBuffer;
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::IOMX;
+
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::IOMXNode;
+
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::IOMXObserver;
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::IOMXBufferSource;
+
+typedef ::android::hardware::graphics::bufferqueue::V1_0::IGraphicBufferProducer
+ HGraphicBufferProducer;
+typedef ::android::IGraphicBufferProducer
+ BGraphicBufferProducer;
+
+// native_handle_t helper functions.
+
+/**
+ * \brief Take an fd and create a native handle containing only the given fd.
+ * The created handle will need to be deleted manually with
+ * `native_handle_delete()`.
+ *
+ * \param[in] fd The source file descriptor (of type `int`).
+ * \return The create `native_handle_t*` that contains the given \p fd. If the
+ * supplied \p fd is negative, the created native handle will contain no file
+ * descriptors.
+ *
+ * If the native handle cannot be created, the return value will be
+ * `nullptr`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline native_handle_t* native_handle_create_from_fd(int fd) {
+ if (fd < 0) {
+ return native_handle_create(0, 0);
+ }
+ native_handle_t* nh = native_handle_create(1, 0);
+ if (nh == nullptr) {
+ return nullptr;
+ }
+ nh->data[0] = fd;
+ return nh;
+}
+
+/**
+ * \brief Extract a file descriptor from a native handle.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \param[in] index The index of the file descriptor in \p nh to read from. This
+ * input has the default value of `0`.
+ * \return The `index`-th file descriptor in \p nh. If \p nh does not have
+ * enough file descriptors, the returned value will be `-1`.
+ *
+ * This function does not duplicate the file descriptor.
+ */
+inline int native_handle_read_fd(native_handle_t const* nh, int index = 0) {
+ return ((nh == nullptr) || (nh->numFds == 0) ||
+ (nh->numFds <= index) || (index < 0)) ?
+ -1 : nh->data[index];
+}
+
+/**
+ * Conversion functions
+ * ====================
+ *
+ * There are two main directions of conversion:
+ * - `inTargetType(...)`: Create a wrapper whose lifetime depends on the
+ * input. The wrapper has type `TargetType`.
+ * - `toTargetType(...)`: Create a standalone object of type `TargetType` that
+ * corresponds to the input. The lifetime of the output does not depend on the
+ * lifetime of the input.
+ * - `wrapIn(TargetType*, ...)`: Same as `inTargetType()`, but for `TargetType`
+ * that cannot be copied and/or moved efficiently, or when there are multiple
+ * output arguments.
+ * - `convertTo(TargetType*, ...)`: Same as `toTargetType()`, but for
+ * `TargetType` that cannot be copied and/or moved efficiently, or when there
+ * are multiple output arguments.
+ *
+ * `wrapIn()` and `convertTo()` functions will take output arguments before
+ * input arguments. Some of these functions might return a value to indicate
+ * success or error.
+ *
+ * In converting or wrapping something as a Treble type that contains a
+ * `hidl_handle`, `native_handle_t*` will need to be created and returned as
+ * an additional output argument, hence only `wrapIn()` or `convertTo()` would
+ * be available. The caller must call `native_handle_delete()` to deallocate the
+ * returned native handle when it is no longer needed.
+ *
+ * For types that contain file descriptors, `inTargetType()` and `wrapAs()` do
+ * not perform duplication of file descriptors, while `toTargetType()` and
+ * `convertTo()` do.
+ */
+
+/**
+ * \brief Convert `Return<void>` to `binder::Status`.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `binder::Status`.
+ */
+// convert: Return<void> -> ::android::binder::Status
+inline ::android::binder::Status toBinderStatus(
+ Return<void> const& t) {
+ return ::android::binder::Status::fromExceptionCode(
+ t.isOk() ? OK : UNKNOWN_ERROR,
+ t.description().c_str());
+}
+
+/**
+ * \brief Convert `Return<Status>` to `status_t`. This is for legacy binder
+ * calls.
+ *
+ * \param[in] t The source `Return<Status>`.
+ * \return The corresponding `status_t`.
+ *
+ * This function first check if \p t has a transport error. If it does, then the
+ * return value is the transport error code. Otherwise, the return value is
+ * converted from `Status` contained inside \p t.
+ *
+ * Note:
+ * - This `Status` is omx-specific. It is defined in `types.hal`.
+ * - The name of this function is not `convert`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Return<Status> const& t) {
+ return t.isOk() ? static_cast<status_t>(static_cast<Status>(t)) : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Return<void>` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Return<void>`.
+ * \return The corresponding `status_t`.
+ */
+// convert: Return<void> -> status_t
+inline status_t toStatusT(Return<void> const& t) {
+ return t.isOk() ? OK : UNKNOWN_ERROR;
+}
+
+/**
+ * \brief Convert `Status` to `status_t`. This is for legacy binder calls.
+ *
+ * \param[in] t The source `Status`.
+ * \return the corresponding `status_t`.
+ */
+// convert: Status -> status_t
+inline status_t toStatusT(Status const& t) {
+ return static_cast<status_t>(t);
+}
+
+/**
+ * \brief Convert `status_t` to `Status`.
+ *
+ * \param[in] l The source `status_t`.
+ * \return The corresponding `Status`.
+ */
+// convert: status_t -> Status
+inline Status toStatus(status_t l) {
+ return static_cast<Status>(l);
+}
+
+/**
+ * \brief Wrap `native_handle_t*` in `hidl_handle`.
+ *
+ * \param[in] nh The source `native_handle_t*`.
+ * \return The `hidl_handle` that points to \p nh.
+ */
+// wrap: native_handle_t* -> hidl_handle
+inline hidl_handle inHidlHandle(native_handle_t const* nh) {
+ return hidl_handle(nh);
+}
+
+/**
+ * \brief Wrap an `omx_message` and construct the corresponding `Message`.
+ *
+ * \param[out] t The wrapper of type `Message`.
+ * \param[out] nh The native_handle_t referred to by `t->fence`.
+ * \param[in] l The source `omx_message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ *
+ * Upon success, \p nh will be created to hold the file descriptor stored in
+ * `l.fenceFd`, and `t->fence` will point to \p nh. \p nh will need to be
+ * destroyed manually by `native_handle_delete()` when \p t is no longer needed.
+ *
+ * Upon failure, \p nh will not be created and will not need to be deleted. \p t
+ * will be invalid.
+ */
+// wrap, omx_message -> Message, native_handle_t*
+inline bool wrapAs(Message* t, native_handle_t** nh, omx_message const& l) {
+ *nh = native_handle_create_from_fd(l.fenceFd);
+ if (!*nh) {
+ return false;
+ }
+ t->fence = *nh;
+ switch (l.type) {
+ case omx_message::EVENT:
+ t->type = Message::Type::EVENT;
+ t->data.eventData.event = uint32_t(l.u.event_data.event);
+ t->data.eventData.data1 = l.u.event_data.data1;
+ t->data.eventData.data2 = l.u.event_data.data2;
+ t->data.eventData.data3 = l.u.event_data.data3;
+ t->data.eventData.data4 = l.u.event_data.data4;
+ break;
+ case omx_message::EMPTY_BUFFER_DONE:
+ t->type = Message::Type::EMPTY_BUFFER_DONE;
+ t->data.bufferData.buffer = l.u.buffer_data.buffer;
+ break;
+ case omx_message::FILL_BUFFER_DONE:
+ t->type = Message::Type::FILL_BUFFER_DONE;
+ t->data.extendedBufferData.buffer = l.u.extended_buffer_data.buffer;
+ t->data.extendedBufferData.rangeOffset =
+ l.u.extended_buffer_data.range_offset;
+ t->data.extendedBufferData.rangeLength =
+ l.u.extended_buffer_data.range_length;
+ t->data.extendedBufferData.flags = l.u.extended_buffer_data.flags;
+ t->data.extendedBufferData.timestampUs =
+ l.u.extended_buffer_data.timestamp;
+ break;
+ case omx_message::FRAME_RENDERED:
+ t->type = Message::Type::FRAME_RENDERED;
+ t->data.renderData.timestampUs = l.u.render_data.timestamp;
+ t->data.renderData.systemTimeNs = l.u.render_data.nanoTime;
+ break;
+ default:
+ native_handle_delete(*nh);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * \brief Wrap a `Message` inside an `omx_message`.
+ *
+ * \param[out] l The wrapper of type `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: Message -> omx_message
+inline bool wrapAs(omx_message* l, Message const& t) {
+ l->fenceFd = native_handle_read_fd(t.fence);
+ switch (t.type) {
+ case Message::Type::EVENT:
+ l->type = omx_message::EVENT;
+ l->u.event_data.event = OMX_EVENTTYPE(t.data.eventData.event);
+ l->u.event_data.data1 = t.data.eventData.data1;
+ l->u.event_data.data2 = t.data.eventData.data2;
+ l->u.event_data.data3 = t.data.eventData.data3;
+ l->u.event_data.data4 = t.data.eventData.data4;
+ break;
+ case Message::Type::EMPTY_BUFFER_DONE:
+ l->type = omx_message::EMPTY_BUFFER_DONE;
+ l->u.buffer_data.buffer = t.data.bufferData.buffer;
+ break;
+ case Message::Type::FILL_BUFFER_DONE:
+ l->type = omx_message::FILL_BUFFER_DONE;
+ l->u.extended_buffer_data.buffer = t.data.extendedBufferData.buffer;
+ l->u.extended_buffer_data.range_offset =
+ t.data.extendedBufferData.rangeOffset;
+ l->u.extended_buffer_data.range_length =
+ t.data.extendedBufferData.rangeLength;
+ l->u.extended_buffer_data.flags = t.data.extendedBufferData.flags;
+ l->u.extended_buffer_data.timestamp =
+ t.data.extendedBufferData.timestampUs;
+ break;
+ case Message::Type::FRAME_RENDERED:
+ l->type = omx_message::FRAME_RENDERED;
+ l->u.render_data.timestamp = t.data.renderData.timestampUs;
+ l->u.render_data.nanoTime = t.data.renderData.systemTimeNs;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * \brief Similar to `wrapTo(omx_message*, Message const&)`, but the output will
+ * have an extended lifetime.
+ *
+ * \param[out] l The output `omx_message`.
+ * \param[in] t The source `Message`.
+ * \return `true` if the conversion is successful; `false` otherwise.
+ *
+ * This function calls `wrapto()`, then attempts to duplicate the file
+ * descriptor for the fence if it is not `-1`. If duplication fails, `false`
+ * will be returned.
+ */
+// convert: Message -> omx_message
+inline bool convertTo(omx_message* l, Message const& t) {
+ if (!wrapAs(l, t)) {
+ return false;
+ }
+ if (l->fenceFd == -1) {
+ return true;
+ }
+ l->fenceFd = dup(l->fenceFd);
+ return l->fenceFd != -1;
+}
+
+/**
+ * \brief Wrap an `OMXFenceParcelable` inside a `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle created to hold the file descriptor inside
+ * \p l.
+ * \param[in] l The source `OMXFenceParcelable`, which essentially contains one
+ * file descriptor.
+ * \return `true` if \p t and \p nh are successfully created to wrap around \p
+ * l; `false` otherwise.
+ *
+ * On success, \p nh needs to be deleted by the caller with
+ * `native_handle_delete()` after \p t and \p nh are no longer needed.
+ *
+ * On failure, \p nh will not need to be deleted, and \p t will hold an invalid
+ * value.
+ */
+// wrap: OMXFenceParcelable -> hidl_handle, native_handle_t*
+inline bool wrapAs(hidl_handle* t, native_handle_t** nh,
+ OMXFenceParcelable const& l) {
+ *nh = native_handle_create_from_fd(l.get());
+ if (!*nh) {
+ return false;
+ }
+ *t = *nh;
+ return true;
+}
+
+/**
+ * \brief Wrap a `hidl_handle` inside an `OMXFenceParcelable`.
+ *
+ * \param[out] l The wrapper of type `OMXFenceParcelable`.
+ * \param[in] t The source `hidl_handle`.
+ */
+// wrap: hidl_handle -> OMXFenceParcelable
+inline void wrapAs(OMXFenceParcelable* l, hidl_handle const& t) {
+ l->mFenceFd = native_handle_read_fd(t);
+}
+
+/**
+ * \brief Convert a `hidl_handle` to `OMXFenceParcelable`. If `hidl_handle`
+ * contains file descriptors, the first file descriptor will be duplicated and
+ * stored in the output `OMXFenceParcelable`.
+ *
+ * \param[out] l The output `OMXFenceParcelable`.
+ * \param[in] t The input `hidl_handle`.
+ * \return `false` if \p t contains a valid file descriptor but duplication
+ * fails; `true` otherwise.
+ */
+// convert: hidl_handle -> OMXFenceParcelable
+inline bool convertTo(OMXFenceParcelable* l, hidl_handle const& t) {
+ int fd = native_handle_read_fd(t);
+ if (fd != -1) {
+ fd = dup(fd);
+ if (fd == -1) {
+ return false;
+ }
+ }
+ l->mFenceFd = fd;
+ return true;
+}
+
+/**
+ * \brief Convert `::android::ColorAspects` to `ColorAspects`.
+ *
+ * \param[in] l The source `::android::ColorAspects`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: ::android::ColorAspects -> ColorAspects
+inline ColorAspects toHardwareColorAspects(::android::ColorAspects const& l) {
+ return ColorAspects{
+ static_cast<ColorAspects::Range>(l.mRange),
+ static_cast<ColorAspects::Primaries>(l.mPrimaries),
+ static_cast<ColorAspects::Transfer>(l.mTransfer),
+ static_cast<ColorAspects::MatrixCoeffs>(l.mMatrixCoeffs)};
+}
+
+/**
+ * \brief Convert `int32_t` to `ColorAspects`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \return The corresponding `ColorAspects`.
+ */
+// convert: int32_t -> ColorAspects
+inline ColorAspects toHardwareColorAspects(int32_t l) {
+ return ColorAspects{
+ static_cast<ColorAspects::Range>((l >> 24) & 0xFF),
+ static_cast<ColorAspects::Primaries>((l >> 16) & 0xFF),
+ static_cast<ColorAspects::Transfer>(l & 0xFF),
+ static_cast<ColorAspects::MatrixCoeffs>((l >> 8) & 0xFF)};
+}
+
+/**
+ * \brief Convert `ColorAspects` to `::android::ColorAspects`.
+ *
+ * \param[in] t The source `ColorAspects`.
+ * \return The corresponding `::android::ColorAspects`.
+ */
+// convert: ColorAspects -> ::android::ColorAspects
+inline int32_t toCompactColorAspects(ColorAspects const& t) {
+ return static_cast<int32_t>(
+ (static_cast<uint32_t>(t.range) << 24) |
+ (static_cast<uint32_t>(t.primaries) << 16) |
+ (static_cast<uint32_t>(t.transfer)) |
+ (static_cast<uint32_t>(t.matrixCoeffs) << 8));
+}
+
+/**
+ * \brief Convert `int32_t` to `Dataspace`.
+ *
+ * \param[in] l The source `int32_t`.
+ * \result The corresponding `Dataspace`.
+ */
+// convert: int32_t -> Dataspace
+inline Dataspace toHardwareDataspace(int32_t l) {
+ return static_cast<Dataspace>(l);
+}
+
+/**
+ * \brief Convert `Dataspace` to `int32_t`.
+ *
+ * \param[in] t The source `Dataspace`.
+ * \result The corresponding `int32_t`.
+ */
+// convert: Dataspace -> int32_t
+inline int32_t toRawDataspace(Dataspace const& t) {
+ return static_cast<int32_t>(t);
+}
+
+/**
+ * \brief Wrap an opaque buffer inside a `hidl_vec<uint8_t>`.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that points to the buffer.
+ */
+// wrap: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> inHidlBytes(void const* l, size_t size) {
+ hidl_vec<uint8_t> t;
+ t.setToExternal(static_cast<uint8_t*>(const_cast<void*>(l)), size, false);
+ return t;
+}
+
+/**
+ * \brief Create a `hidl_vec<uint8_t>` that is a copy of an opaque buffer.
+ *
+ * \param[in] l The pointer to the beginning of the opaque buffer.
+ * \param[in] size The size of the buffer.
+ * \return A `hidl_vec<uint8_t>` that is a copy of the input buffer.
+ */
+// convert: void*, size_t -> hidl_vec<uint8_t>
+inline hidl_vec<uint8_t> toHidlBytes(void const* l, size_t size) {
+ hidl_vec<uint8_t> t;
+ t.resize(size);
+ uint8_t const* src = static_cast<uint8_t const*>(l);
+ std::copy(src, src + size, t.data());
+ return t;
+}
+
+/**
+ * \brief Wrap `GraphicBuffer` in `AnwBuffer`.
+ *
+ * \param[out] t The wrapper of type `AnwBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: GraphicBuffer -> AnwBuffer
+inline void wrapAs(AnwBuffer* t, GraphicBuffer const& l) {
+ t->attr.width = l.getWidth();
+ t->attr.height = l.getHeight();
+ t->attr.stride = l.getStride();
+ t->attr.format = static_cast<PixelFormat>(l.getPixelFormat());
+ t->attr.layerCount = l.getLayerCount();
+ t->attr.usage = l.getUsage();
+ t->attr.id = l.getId();
+ t->attr.generationNumber = l.getGenerationNumber();
+ t->nativeHandle = hidl_handle(l.handle);
+}
+
+/**
+ * \brief Convert `AnwBuffer` to `GraphicBuffer`.
+ *
+ * \param[out] l The destination `GraphicBuffer`.
+ * \param[in] t The source `AnwBuffer`.
+ *
+ * This function will duplicate all file descriptors in \p t.
+ */
+// convert: AnwBuffer -> GraphicBuffer
+// Ref: frameworks/native/libs/ui/GraphicBuffer.cpp: GraphicBuffer::flatten
+inline bool convertTo(GraphicBuffer* l, AnwBuffer const& t) {
+ native_handle_t* handle = t.nativeHandle == nullptr ?
+ nullptr : native_handle_clone(t.nativeHandle);
+
+ size_t const numInts = 12 + (handle ? handle->numInts : 0);
+ int32_t* ints = new int32_t[numInts];
+
+ size_t numFds = static_cast<size_t>(handle ? handle->numFds : 0);
+ int* fds = new int[numFds];
+
+ ints[0] = 'GBFR';
+ ints[1] = static_cast<int32_t>(t.attr.width);
+ ints[2] = static_cast<int32_t>(t.attr.height);
+ ints[3] = static_cast<int32_t>(t.attr.stride);
+ ints[4] = static_cast<int32_t>(t.attr.format);
+ ints[5] = static_cast<int32_t>(t.attr.layerCount);
+ ints[6] = static_cast<int32_t>(t.attr.usage);
+ ints[7] = static_cast<int32_t>(t.attr.id >> 32);
+ ints[8] = static_cast<int32_t>(t.attr.id & 0xFFFFFFFF);
+ ints[9] = static_cast<int32_t>(t.attr.generationNumber);
+ ints[10] = 0;
+ ints[11] = 0;
+ if (handle) {
+ ints[10] = static_cast<int32_t>(handle->numFds);
+ ints[11] = static_cast<int32_t>(handle->numInts);
+ int* intsStart = handle->data + handle->numFds;
+ std::copy(handle->data, intsStart, fds);
+ std::copy(intsStart, intsStart + handle->numInts, &ints[12]);
+ }
+
+ void const* constBuffer = static_cast<void const*>(ints);
+ size_t size = numInts * sizeof(int32_t);
+ int const* constFds = static_cast<int const*>(fds);
+ status_t status = l->unflatten(constBuffer, size, constFds, numFds);
+
+ delete [] fds;
+ delete [] ints;
+ native_handle_delete(handle);
+ return status == NO_ERROR;
+}
+
+/**
+ * \brief Wrap `GraphicBuffer` in `CodecBuffer`.
+ *
+ * \param[out] t The wrapper of type `CodecBuffer`.
+ * \param[in] l The source `GraphicBuffer`.
+ */
+// wrap: OMXBuffer -> CodecBuffer
+inline CodecBuffer *wrapAs(CodecBuffer *t, sp<GraphicBuffer> const& graphicBuffer) {
+ t->sharedMemory = hidl_memory();
+ t->nativeHandle = hidl_handle();
+ t->type = CodecBuffer::Type::ANW_BUFFER;
+ if (graphicBuffer == nullptr) {
+ t->attr.anwBuffer.width = 0;
+ t->attr.anwBuffer.height = 0;
+ t->attr.anwBuffer.stride = 0;
+ t->attr.anwBuffer.format = static_cast<PixelFormat>(1);
+ t->attr.anwBuffer.layerCount = 0;
+ t->attr.anwBuffer.usage = 0;
+ return t;
+ }
+ t->attr.anwBuffer.width = graphicBuffer->getWidth();
+ t->attr.anwBuffer.height = graphicBuffer->getHeight();
+ t->attr.anwBuffer.stride = graphicBuffer->getStride();
+ t->attr.anwBuffer.format = static_cast<PixelFormat>(
+ graphicBuffer->getPixelFormat());
+ t->attr.anwBuffer.layerCount = graphicBuffer->getLayerCount();
+ t->attr.anwBuffer.usage = graphicBuffer->getUsage();
+ t->nativeHandle = graphicBuffer->handle;
+ return t;
+}
+
+/**
+ * \brief Wrap `OMXBuffer` in `CodecBuffer`.
+ *
+ * \param[out] t The wrapper of type `CodecBuffer`.
+ * \param[in] l The source `OMXBuffer`.
+ * \return `true` if the wrapping is successful; `false` otherwise.
+ */
+// wrap: OMXBuffer -> CodecBuffer
+inline bool wrapAs(CodecBuffer* t, OMXBuffer const& l) {
+ t->sharedMemory = hidl_memory();
+ t->nativeHandle = hidl_handle();
+ switch (l.mBufferType) {
+ case OMXBuffer::kBufferTypeInvalid: {
+ t->type = CodecBuffer::Type::INVALID;
+ return true;
+ }
+ case OMXBuffer::kBufferTypePreset: {
+ t->type = CodecBuffer::Type::PRESET;
+ t->attr.preset.rangeLength = static_cast<uint32_t>(l.mRangeLength);
+ t->attr.preset.rangeOffset = static_cast<uint32_t>(l.mRangeOffset);
+ return true;
+ }
+ case OMXBuffer::kBufferTypeHidlMemory: {
+ t->type = CodecBuffer::Type::SHARED_MEM;
+ t->sharedMemory = l.mHidlMemory;
+ return true;
+ }
+ case OMXBuffer::kBufferTypeSharedMem: {
+ // This is not supported.
+ return false;
+ }
+ case OMXBuffer::kBufferTypeANWBuffer: {
+ wrapAs(t, l.mGraphicBuffer);
+ return true;
+ }
+ case OMXBuffer::kBufferTypeNativeHandle: {
+ t->type = CodecBuffer::Type::NATIVE_HANDLE;
+ t->nativeHandle = l.mNativeHandle->handle();
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * \brief Convert `CodecBuffer` to `OMXBuffer`.
+ *
+ * \param[out] l The destination `OMXBuffer`.
+ * \param[in] t The source `CodecBuffer`.
+ * \return `true` if successful; `false` otherwise.
+ */
+// convert: CodecBuffer -> OMXBuffer
+inline bool convertTo(OMXBuffer* l, CodecBuffer const& t) {
+ switch (t.type) {
+ case CodecBuffer::Type::INVALID: {
+ *l = OMXBuffer();
+ return true;
+ }
+ case CodecBuffer::Type::PRESET: {
+ *l = OMXBuffer(
+ t.attr.preset.rangeOffset,
+ t.attr.preset.rangeLength);
+ return true;
+ }
+ case CodecBuffer::Type::SHARED_MEM: {
+ *l = OMXBuffer(t.sharedMemory);
+ return true;
+ }
+ case CodecBuffer::Type::ANW_BUFFER: {
+ if (t.nativeHandle.getNativeHandle() == nullptr) {
+ *l = OMXBuffer(sp<GraphicBuffer>(nullptr));
+ return true;
+ }
+ AnwBuffer anwBuffer;
+ anwBuffer.nativeHandle = t.nativeHandle;
+ anwBuffer.attr = t.attr.anwBuffer;
+ sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
+ if (!convertTo(graphicBuffer.get(), anwBuffer)) {
+ return false;
+ }
+ *l = OMXBuffer(graphicBuffer);
+ return true;
+ }
+ case CodecBuffer::Type::NATIVE_HANDLE: {
+ *l = OMXBuffer(NativeHandle::create(
+ native_handle_clone(t.nativeHandle), true));
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * \brief Convert `IOMX::ComponentInfo` to `IOmx::ComponentInfo`.
+ *
+ * \param[out] t The destination `IOmx::ComponentInfo`.
+ * \param[in] l The source `IOMX::ComponentInfo`.
+ */
+// convert: IOMX::ComponentInfo -> IOmx::ComponentInfo
+inline bool convertTo(IOmx::ComponentInfo* t, IOMX::ComponentInfo const& l) {
+ t->mName = l.mName.string();
+ t->mRoles.resize(l.mRoles.size());
+ size_t i = 0;
+ for (auto& role : l.mRoles) {
+ t->mRoles[i++] = role.string();
+ }
+ return true;
+}
+
+/**
+ * \brief Convert `IOmx::ComponentInfo` to `IOMX::ComponentInfo`.
+ *
+ * \param[out] l The destination `IOMX::ComponentInfo`.
+ * \param[in] t The source `IOmx::ComponentInfo`.
+ */
+// convert: IOmx::ComponentInfo -> IOMX::ComponentInfo
+inline bool convertTo(IOMX::ComponentInfo* l, IOmx::ComponentInfo const& t) {
+ l->mName = t.mName.c_str();
+ l->mRoles.clear();
+ for (size_t i = 0; i < t.mRoles.size(); ++i) {
+ l->mRoles.push_back(String8(t.mRoles[i].c_str()));
+ }
+ return true;
+}
+
+/**
+ * \brief Convert `OMX_BOOL` to `bool`.
+ *
+ * \param[in] l The source `OMX_BOOL`.
+ * \return The destination `bool`.
+ */
+// convert: OMX_BOOL -> bool
+inline bool toRawBool(OMX_BOOL l) {
+ return l == OMX_FALSE ? false : true;
+}
+
+/**
+ * \brief Convert `bool` to `OMX_BOOL`.
+ *
+ * \param[in] t The source `bool`.
+ * \return The destination `OMX_BOOL`.
+ */
+// convert: bool -> OMX_BOOL
+inline OMX_BOOL toEnumBool(bool t) {
+ return t ? OMX_TRUE : OMX_FALSE;
+}
+
+/**
+ * \brief Convert `OMX_COMMANDTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_COMMANDTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_COMMANDTYPE -> uint32_t
+inline uint32_t toRawCommandType(OMX_COMMANDTYPE l) {
+ return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_COMMANDTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_COMMANDTYPE`.
+ *
+ * `OMX_COMMANDTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_COMMANDTYPE
+inline OMX_COMMANDTYPE toEnumCommandType(uint32_t t) {
+ return static_cast<OMX_COMMANDTYPE>(t);
+}
+
+/**
+ * \brief Convert `OMX_INDEXTYPE` to `uint32_t`.
+ *
+ * \param[in] l The source `OMX_INDEXTYPE`.
+ * \return The underlying value of type `uint32_t`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: OMX_INDEXTYPE -> uint32_t
+inline uint32_t toRawIndexType(OMX_INDEXTYPE l) {
+ return static_cast<uint32_t>(l);
+}
+
+/**
+ * \brief Convert `uint32_t` to `OMX_INDEXTYPE`.
+ *
+ * \param[in] t The source `uint32_t`.
+ * \return The corresponding enum value of type `OMX_INDEXTYPE`.
+ *
+ * `OMX_INDEXTYPE` is an enum type whose underlying type is `uint32_t`.
+ */
+// convert: uint32_t -> OMX_INDEXTYPE
+inline OMX_INDEXTYPE toEnumIndexType(uint32_t t) {
+ return static_cast<OMX_INDEXTYPE>(t);
+}
+
+/**
+ * \brief Convert `IOMX::PortMode` to `PortMode`.
+ *
+ * \param[in] l The source `IOMX::PortMode`.
+ * \return The destination `PortMode`.
+ */
+// convert: IOMX::PortMode -> PortMode
+inline PortMode toHardwarePortMode(IOMX::PortMode l) {
+ return static_cast<PortMode>(l);
+}
+
+/**
+ * \brief Convert `PortMode` to `IOMX::PortMode`.
+ *
+ * \param[in] t The source `PortMode`.
+ * \return The destination `IOMX::PortMode`.
+ */
+// convert: PortMode -> IOMX::PortMode
+inline IOMX::PortMode toIOMXPortMode(PortMode t) {
+ return static_cast<IOMX::PortMode>(t);
+}
+
+/**
+ * \brief Convert `OMX_TICKS` to `uint64_t`.
+ *
+ * \param[in] l The source `OMX_TICKS`.
+ * \return The destination `uint64_t`.
+ */
+// convert: OMX_TICKS -> uint64_t
+inline uint64_t toRawTicks(OMX_TICKS l) {
+#ifndef OMX_SKIP64BIT
+ return static_cast<uint64_t>(l);
+#else
+ return static_cast<uint64_t>(l.nLowPart) |
+ static_cast<uint64_t>(l.nHighPart << 32);
+#endif
+}
+
+/**
+ * \brief Convert `uint64_t` to `OMX_TICKS`.
+ *
+ * \param[in] l The source `uint64_t`.
+ * \return The destination `OMX_TICKS`.
+ */
+// convert: uint64_t -> OMX_TICKS
+inline OMX_TICKS toOMXTicks(uint64_t t) {
+#ifndef OMX_SKIP64BIT
+ return static_cast<OMX_TICKS>(t);
+#else
+ return OMX_TICKS{
+ static_cast<uint32_t>(t & 0xFFFFFFFF),
+ static_cast<uint32_t>(t >> 32)};
+#endif
+}
+
+/**
+ * Conversion functions for types outside media
+ * ============================================
+ *
+ * Some objects in libui and libgui that were made to go through binder calls do
+ * not expose ways to read or write their fields to the public. To pass an
+ * object of this kind through the HIDL boundary, translation functions need to
+ * work around the access restriction by using the publicly available
+ * `flatten()` and `unflatten()` functions.
+ *
+ * All `flatten()` and `unflatten()` overloads follow the same convention as
+ * follows:
+ *
+ * status_t flatten(ObjectType const& object,
+ * [OtherType const& other, ...]
+ * void*& buffer, size_t& size,
+ * int*& fds, size_t& numFds)
+ *
+ * status_t unflatten(ObjectType* object,
+ * [OtherType* other, ...,]
+ * void*& buffer, size_t& size,
+ * int*& fds, size_t& numFds)
+ *
+ * The number of `other` parameters varies depending on the `ObjectType`. For
+ * example, in the process of unflattening an object that contains
+ * `hidl_handle`, `other` is needed to hold `native_handle_t` objects that will
+ * be created.
+ *
+ * The last four parameters always work the same way in all overloads of
+ * `flatten()` and `unflatten()`:
+ * - For `flatten()`, `buffer` is the pointer to the non-fd buffer to be filled,
+ * `size` is the size (in bytes) of the non-fd buffer pointed to by `buffer`,
+ * `fds` is the pointer to the fd buffer to be filled, and `numFds` is the
+ * size (in ints) of the fd buffer pointed to by `fds`.
+ * - For `unflatten()`, `buffer` is the pointer to the non-fd buffer to be read
+ * from, `size` is the size (in bytes) of the non-fd buffer pointed to by
+ * `buffer`, `fds` is the pointer to the fd buffer to be read from, and
+ * `numFds` is the size (in ints) of the fd buffer pointed to by `fds`.
+ * - After a successful call to `flatten()` or `unflatten()`, `buffer` and `fds`
+ * will be advanced, while `size` and `numFds` will be decreased to reflect
+ * how much storage/data of the two buffers (fd and non-fd) have been used.
+ * - After an unsuccessful call, the values of `buffer`, `size`, `fds` and
+ * `numFds` are invalid.
+ *
+ * The return value of a successful `flatten()` or `unflatten()` call will be
+ * `OK` (also aliased as `NO_ERROR`). Any other values indicate a failure.
+ *
+ * For each object type that supports flattening, there will be two accompanying
+ * functions: `getFlattenedSize()` and `getFdCount()`. `getFlattenedSize()` will
+ * return the size of the non-fd buffer that the object will need for
+ * flattening. `getFdCount()` will return the size of the fd buffer that the
+ * object will need for flattening.
+ *
+ * The set of these four functions, `getFlattenedSize()`, `getFdCount()`,
+ * `flatten()` and `unflatten()`, are similar to functions of the same name in
+ * the abstract class `Flattenable`. The only difference is that functions in
+ * this file are not member functions of the object type. For example, we write
+ *
+ * flatten(x, buffer, size, fds, numFds)
+ *
+ * instead of
+ *
+ * x.flatten(buffer, size, fds, numFds)
+ *
+ * because we cannot modify the type of `x`.
+ *
+ * There is one exception to the naming convention: `hidl_handle` that
+ * represents a fence. The four functions for this "Fence" type have the word
+ * "Fence" attched to their names because the object type, which is
+ * `hidl_handle`, does not carry the special meaning that the object itself can
+ * only contain zero or one file descriptor.
+ */
+
+// Ref: frameworks/native/libs/ui/Fence.cpp
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return The required size of the flat buffer.
+ *
+ * The current version of this function always returns 4, which is the number of
+ * bytes required to store the number of file descriptors contained in the fd
+ * part of the flat buffer.
+ */
+inline size_t getFenceFlattenedSize(hidl_handle const& /* fence */) {
+ return 4;
+};
+
+/**
+ * \brief Return the number of file descriptors contained in a fence.
+ *
+ * \param[in] fence The input fence of type `hidl_handle`.
+ * \return `0` if \p fence does not contain a valid file descriptor, or `1`
+ * otherwise.
+ */
+inline size_t getFenceFdCount(hidl_handle const& fence) {
+ return native_handle_read_fd(fence) == -1 ? 0 : 1;
+}
+
+/**
+ * \brief Unflatten `Fence` to `hidl_handle`.
+ *
+ * \param[out] fence The destination `hidl_handle`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will point to a newly created
+ * native handle, which needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline status_t unflattenFence(hidl_handle* fence, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < 4) {
+ return NO_MEMORY;
+ }
+
+ uint32_t numFdsInHandle;
+ FlattenableUtils::read(buffer, size, numFdsInHandle);
+
+ if (numFdsInHandle > 1) {
+ return BAD_VALUE;
+ }
+
+ if (numFds < numFdsInHandle) {
+ return NO_MEMORY;
+ }
+
+ if (numFdsInHandle) {
+ *nh = native_handle_create_from_fd(*fds);
+ if (*nh == nullptr) {
+ return NO_MEMORY;
+ }
+ *fence = *nh;
+ ++fds;
+ --numFds;
+ } else {
+ *nh = nullptr;
+ *fence = hidl_handle();
+ }
+
+ return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `hidl_handle` as `Fence`.
+ *
+ * \param[in] t The source `hidl_handle`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t flattenFence(hidl_handle const& fence,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+ if (size < getFenceFlattenedSize(fence) ||
+ numFds < getFenceFdCount(fence)) {
+ return NO_MEMORY;
+ }
+ // Cast to uint32_t since the size of a size_t can vary between 32- and
+ // 64-bit processes
+ FlattenableUtils::write(buffer, size,
+ static_cast<uint32_t>(getFenceFdCount(fence)));
+ int fd = native_handle_read_fd(fence);
+ if (fd != -1) {
+ *fds = fd;
+ ++fds;
+ --numFds;
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Wrap `Fence` in `hidl_handle`.
+ *
+ * \param[out] t The wrapper of type `hidl_handle`.
+ * \param[out] nh The native handle pointed to by \p t.
+ * \param[in] l The source `Fence`.
+ *
+ * On success, \p nh will hold a newly created native handle, which must be
+ * deleted manually with `native_handle_delete()` afterwards.
+ */
+// wrap: Fence -> hidl_handle
+inline bool wrapAs(hidl_handle* t, native_handle_t** nh, Fence const& l) {
+ size_t const baseSize = l.getFlattenedSize();
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = l.getFdCount();
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = static_cast<int*>(baseFds.get());
+ size_t numFds = baseNumFds;
+ if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (unflattenFence(t, nh, constBuffer, size, constFds, numFds)
+ != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * \brief Convert `hidl_handle` to `Fence`.
+ *
+ * \param[out] l The destination `Fence`. `l` must not have been used
+ * (`l->isValid()` must return `false`) before this function is called.
+ * \param[in] t The source `hidl_handle`.
+ *
+ * If \p t contains a valid file descriptor, it will be duplicated.
+ */
+// convert: hidl_handle -> Fence
+inline bool convertTo(Fence* l, hidl_handle const& t) {
+ int fd = native_handle_read_fd(t);
+ if (fd != -1) {
+ fd = dup(fd);
+ if (fd == -1) {
+ return false;
+ }
+ }
+ native_handle_t* nh = native_handle_create_from_fd(fd);
+ if (nh == nullptr) {
+ if (fd != -1) {
+ close(fd);
+ }
+ return false;
+ }
+
+ size_t const baseSize = getFenceFlattenedSize(t);
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ native_handle_delete(nh);
+ return false;
+ }
+
+ size_t const baseNumFds = getFenceFdCount(t);
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ native_handle_delete(nh);
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = static_cast<int*>(baseFds.get());
+ size_t numFds = baseNumFds;
+ if (flattenFence(hidl_handle(nh), buffer, size, fds, numFds) != NO_ERROR) {
+ native_handle_delete(nh);
+ return false;
+ }
+ native_handle_delete(nh);
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+// Ref: frameworks/native/libs/ui/FenceTime.cpp: FenceTime::Snapshot
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+ HGraphicBufferProducer::FenceTimeSnapshot const& t) {
+ constexpr size_t min = sizeof(t.state);
+ switch (t.state) {
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY:
+ return min;
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE:
+ return min + getFenceFlattenedSize(t.fence);
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+ return min + sizeof(
+ ::android::FenceTime::Snapshot::signalTime);
+ }
+ return 0;
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FenceTimeSnapshot`.
+ *
+ * \param[in] t The input `FenceTimeSnapshot`.
+ * \return The number of file descriptors contained in \p snapshot.
+ */
+inline size_t getFdCount(
+ HGraphicBufferProducer::FenceTimeSnapshot const& t) {
+ return t.state ==
+ HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE ?
+ getFenceFdCount(t.fence) : 0;
+}
+
+/**
+ * \brief Flatten `FenceTimeSnapshot`.
+ *
+ * \param[in] t The source `FenceTimeSnapshot`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence` if `t.state ==
+ * FENCE`.
+ */
+inline status_t flatten(HGraphicBufferProducer::FenceTimeSnapshot const& t,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+ if (size < getFlattenedSize(t)) {
+ return NO_MEMORY;
+ }
+
+ switch (t.state) {
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY:
+ FlattenableUtils::write(buffer, size,
+ ::android::FenceTime::Snapshot::State::EMPTY);
+ return NO_ERROR;
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE:
+ FlattenableUtils::write(buffer, size,
+ ::android::FenceTime::Snapshot::State::FENCE);
+ return flattenFence(t.fence, buffer, size, fds, numFds);
+ case HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME:
+ FlattenableUtils::write(buffer, size,
+ ::android::FenceTime::Snapshot::State::SIGNAL_TIME);
+ FlattenableUtils::write(buffer, size, t.signalTimeNs);
+ return NO_ERROR;
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Unflatten `FenceTimeSnapshot`.
+ *
+ * \param[out] t The destination `FenceTimeSnapshot`.
+ * \param[out] nh The underlying native handle.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and the constructed snapshot contains a
+ * file descriptor, \p nh will be created to hold that file descriptor. In this
+ * case, \p nh needs to be deleted with `native_handle_delete()` afterwards.
+ */
+inline status_t unflatten(
+ HGraphicBufferProducer::FenceTimeSnapshot* t, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < sizeof(t->state)) {
+ return NO_MEMORY;
+ }
+
+ *nh = nullptr;
+ ::android::FenceTime::Snapshot::State state;
+ FlattenableUtils::read(buffer, size, state);
+ switch (state) {
+ case ::android::FenceTime::Snapshot::State::EMPTY:
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::EMPTY;
+ return NO_ERROR;
+ case ::android::FenceTime::Snapshot::State::FENCE:
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::FENCE;
+ return unflattenFence(&t->fence, nh, buffer, size, fds, numFds);
+ case ::android::FenceTime::Snapshot::State::SIGNAL_TIME:
+ t->state = HGraphicBufferProducer::FenceTimeSnapshot::State::SIGNAL_TIME;
+ if (size < sizeof(t->signalTimeNs)) {
+ return NO_MEMORY;
+ }
+ FlattenableUtils::read(buffer, size, t->signalTimeNs);
+ return NO_ERROR;
+ }
+ return NO_ERROR;
+}
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventsDelta
+
+/**
+ * \brief Return a lower bound on the size of the non-fd buffer required to
+ * flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return A lower bound on the size of the flat buffer.
+ */
+constexpr size_t minFlattenedSize(
+ HGraphicBufferProducer::FrameEventsDelta const& /* t */) {
+ return sizeof(uint64_t) + // mFrameNumber
+ sizeof(uint8_t) + // mIndex
+ sizeof(uint8_t) + // mAddPostCompositeCalled
+ sizeof(uint8_t) + // mAddRetireCalled
+ sizeof(uint8_t) + // mAddReleaseCalled
+ sizeof(nsecs_t) + // mPostedTime
+ sizeof(nsecs_t) + // mRequestedPresentTime
+ sizeof(nsecs_t) + // mLatchTime
+ sizeof(nsecs_t) + // mFirstRefreshStartTime
+ sizeof(nsecs_t); // mLastRefreshStartTime
+}
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+ HGraphicBufferProducer::FrameEventsDelta const& t) {
+ return minFlattenedSize(t) +
+ getFlattenedSize(t.gpuCompositionDoneFence) +
+ getFlattenedSize(t.displayPresentFence) +
+ getFlattenedSize(t.displayRetireFence) +
+ getFlattenedSize(t.releaseFence);
+};
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `FrameEventsDelta`.
+ *
+ * \param[in] t The input `FrameEventsDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+ HGraphicBufferProducer::FrameEventsDelta const& t) {
+ return getFdCount(t.gpuCompositionDoneFence) +
+ getFdCount(t.displayPresentFence) +
+ getFdCount(t.displayRetireFence) +
+ getFdCount(t.releaseFence);
+};
+
+/**
+ * \brief Unflatten `FrameEventsDelta`.
+ *
+ * \param[out] t The destination `FrameEventsDelta`.
+ * \param[out] nh The underlying array of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will have length 4, and it will be
+ * populated with `nullptr` or newly created handles. Each non-null slot in \p
+ * nh will need to be deleted manually with `native_handle_delete()`.
+ */
+inline status_t unflatten(HGraphicBufferProducer::FrameEventsDelta* t,
+ std::vector<native_handle_t*>* nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < minFlattenedSize(*t)) {
+ return NO_MEMORY;
+ }
+ FlattenableUtils::read(buffer, size, t->frameNumber);
+
+ // These were written as uint8_t for alignment.
+ uint8_t temp = 0;
+ FlattenableUtils::read(buffer, size, temp);
+ size_t index = static_cast<size_t>(temp);
+ if (index >= ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+ return BAD_VALUE;
+ }
+ t->index = static_cast<uint32_t>(index);
+
+ FlattenableUtils::read(buffer, size, temp);
+ t->addPostCompositeCalled = static_cast<bool>(temp);
+ FlattenableUtils::read(buffer, size, temp);
+ t->addRetireCalled = static_cast<bool>(temp);
+ FlattenableUtils::read(buffer, size, temp);
+ t->addReleaseCalled = static_cast<bool>(temp);
+
+ FlattenableUtils::read(buffer, size, t->postedTimeNs);
+ FlattenableUtils::read(buffer, size, t->requestedPresentTimeNs);
+ FlattenableUtils::read(buffer, size, t->latchTimeNs);
+ FlattenableUtils::read(buffer, size, t->firstRefreshStartTimeNs);
+ FlattenableUtils::read(buffer, size, t->lastRefreshStartTimeNs);
+ FlattenableUtils::read(buffer, size, t->dequeueReadyTime);
+
+ // Fences
+ HGraphicBufferProducer::FenceTimeSnapshot* tSnapshot[4];
+ tSnapshot[0] = &t->gpuCompositionDoneFence;
+ tSnapshot[1] = &t->displayPresentFence;
+ tSnapshot[2] = &t->displayRetireFence;
+ tSnapshot[3] = &t->releaseFence;
+ nh->resize(4);
+ for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
+ status_t status = unflatten(
+ tSnapshot[snapshotIndex], &((*nh)[snapshotIndex]),
+ buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ while (snapshotIndex > 0) {
+ --snapshotIndex;
+ if ((*nh)[snapshotIndex] != nullptr) {
+ native_handle_delete((*nh)[snapshotIndex]);
+ }
+ }
+ return status;
+ }
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `FrameEventsDelta`.
+ *
+ * \param[in] t The source `FrameEventsDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+// Ref: frameworks/native/libs/gui/FrameTimestamp.cpp:
+// FrameEventsDelta::flatten
+inline status_t flatten(HGraphicBufferProducer::FrameEventsDelta const& t,
+ void*& buffer, size_t& size, int*& fds, size_t numFds) {
+ // Check that t.index is within a valid range.
+ if (t.index >= static_cast<uint32_t>(FrameEventHistory::MAX_FRAME_HISTORY)
+ || t.index > std::numeric_limits<uint8_t>::max()) {
+ return BAD_VALUE;
+ }
+
+ FlattenableUtils::write(buffer, size, t.frameNumber);
+
+ // These are static_cast to uint8_t for alignment.
+ FlattenableUtils::write(buffer, size, static_cast<uint8_t>(t.index));
+ FlattenableUtils::write(
+ buffer, size, static_cast<uint8_t>(t.addPostCompositeCalled));
+ FlattenableUtils::write(
+ buffer, size, static_cast<uint8_t>(t.addRetireCalled));
+ FlattenableUtils::write(
+ buffer, size, static_cast<uint8_t>(t.addReleaseCalled));
+
+ FlattenableUtils::write(buffer, size, t.postedTimeNs);
+ FlattenableUtils::write(buffer, size, t.requestedPresentTimeNs);
+ FlattenableUtils::write(buffer, size, t.latchTimeNs);
+ FlattenableUtils::write(buffer, size, t.firstRefreshStartTimeNs);
+ FlattenableUtils::write(buffer, size, t.lastRefreshStartTimeNs);
+ FlattenableUtils::write(buffer, size, t.dequeueReadyTime);
+
+ // Fences
+ HGraphicBufferProducer::FenceTimeSnapshot const* tSnapshot[4];
+ tSnapshot[0] = &t.gpuCompositionDoneFence;
+ tSnapshot[1] = &t.displayPresentFence;
+ tSnapshot[2] = &t.displayRetireFence;
+ tSnapshot[3] = &t.releaseFence;
+ for (size_t snapshotIndex = 0; snapshotIndex < 4; ++snapshotIndex) {
+ status_t status = flatten(
+ *(tSnapshot[snapshotIndex]), buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ return NO_ERROR;
+}
+
+// Ref: frameworks/native/libs/gui/FrameTimestamps.cpp: FrameEventHistoryDelta
+
+/**
+ * \brief Return the size of the non-fd buffer required to flatten
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
+ size_t size = 4 + // mDeltas.size()
+ sizeof(t.compositorTiming);
+ for (size_t i = 0; i < t.deltas.size(); ++i) {
+ size += getFlattenedSize(t.deltas[i]);
+ }
+ return size;
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
+ size_t numFds = 0;
+ for (size_t i = 0; i < t.deltas.size(); ++i) {
+ numFds += getFdCount(t.deltas[i]);
+ }
+ return numFds;
+}
+
+/**
+ * \brief Unflatten `FrameEventHistoryDelta`.
+ *
+ * \param[out] t The destination `FrameEventHistoryDelta`.
+ * \param[out] nh The underlying array of arrays of native handles.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR`, \p nh will be populated with `nullptr` or
+ * newly created handles. The second dimension of \p nh will be 4. Each non-null
+ * slot in \p nh will need to be deleted manually with `native_handle_delete()`.
+ */
+inline status_t unflatten(
+ HGraphicBufferProducer::FrameEventHistoryDelta* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < 4) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::read(buffer, size, t->compositorTiming);
+
+ uint32_t deltaCount = 0;
+ FlattenableUtils::read(buffer, size, deltaCount);
+ if (static_cast<size_t>(deltaCount) >
+ ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+ return BAD_VALUE;
+ }
+ t->deltas.resize(deltaCount);
+ nh->resize(deltaCount);
+ for (size_t deltaIndex = 0; deltaIndex < deltaCount; ++deltaIndex) {
+ status_t status = unflatten(
+ &(t->deltas[deltaIndex]), &((*nh)[deltaIndex]),
+ buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `FrameEventHistoryDelta`.
+ *
+ * \param[in] t The source `FrameEventHistoryDelta`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate file descriptors contained in \p t.
+ */
+inline status_t flatten(
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+ if (t.deltas.size() > ::android::FrameEventHistory::MAX_FRAME_HISTORY) {
+ return BAD_VALUE;
+ }
+ if (size < getFlattenedSize(t)) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::write(buffer, size, t.compositorTiming);
+
+ FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.deltas.size()));
+ for (size_t deltaIndex = 0; deltaIndex < t.deltas.size(); ++deltaIndex) {
+ status_t status = flatten(t.deltas[deltaIndex], buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Wrap `::android::FrameEventHistoryData` in
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `::android::FrameEventHistoryDelta`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+inline bool wrapAs(HGraphicBufferProducer::FrameEventHistoryDelta* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ ::android::FrameEventHistoryDelta const& l) {
+
+ size_t const baseSize = l.getFlattenedSize();
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = l.getFdCount();
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = baseFds.get();
+ size_t numFds = baseNumFds;
+ if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * \brief Convert `HGraphicBufferProducer::FrameEventHistoryDelta` to
+ * `::android::FrameEventHistoryDelta`.
+ *
+ * \param[out] l The destination `::android::FrameEventHistoryDelta`.
+ * \param[in] t The source `HGraphicBufferProducer::FrameEventHistoryDelta`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+inline bool convertTo(
+ ::android::FrameEventHistoryDelta* l,
+ HGraphicBufferProducer::FrameEventHistoryDelta const& t) {
+
+ size_t const baseSize = getFlattenedSize(t);
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = getFdCount(t);
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = static_cast<int*>(baseFds.get());
+ size_t numFds = baseNumFds;
+ if (flatten(t, buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+// Ref: frameworks/native/libs/ui/Region.cpp
+
+/**
+ * \brief Return the size of the buffer required to flatten `Region`.
+ *
+ * \param[in] t The input `Region`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(Region const& t) {
+ return sizeof(uint32_t) + t.size() * sizeof(::android::Rect);
+}
+
+/**
+ * \brief Unflatten `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t unflatten(Region* t, void const*& buffer, size_t& size) {
+ if (size < sizeof(uint32_t)) {
+ return NO_MEMORY;
+ }
+
+ uint32_t numRects = 0;
+ FlattenableUtils::read(buffer, size, numRects);
+ if (size < numRects * sizeof(Rect)) {
+ return NO_MEMORY;
+ }
+ if (numRects > (UINT32_MAX / sizeof(Rect))) {
+ return NO_MEMORY;
+ }
+
+ t->resize(numRects);
+ for (size_t r = 0; r < numRects; ++r) {
+ ::android::Rect rect(::android::Rect::EMPTY_RECT);
+ status_t status = rect.unflatten(buffer, size);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ FlattenableUtils::advance(buffer, size, sizeof(rect));
+ (*t)[r] = Rect{
+ static_cast<int32_t>(rect.left),
+ static_cast<int32_t>(rect.top),
+ static_cast<int32_t>(rect.right),
+ static_cast<int32_t>(rect.bottom)};
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Flatten `Region`.
+ *
+ * \param[in] t The source `Region`.
+ * \param[in,out] buffer The pointer to the flat buffer.
+ * \param[in,out] size The size of the flat buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ */
+inline status_t flatten(Region const& t, void*& buffer, size_t& size) {
+ if (size < getFlattenedSize(t)) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::write(buffer, size, static_cast<uint32_t>(t.size()));
+ for (size_t r = 0; r < t.size(); ++r) {
+ ::android::Rect rect(
+ static_cast<int32_t>(t[r].left),
+ static_cast<int32_t>(t[r].top),
+ static_cast<int32_t>(t[r].right),
+ static_cast<int32_t>(t[r].bottom));
+ status_t status = rect.flatten(buffer, size);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ FlattenableUtils::advance(buffer, size, sizeof(rect));
+ }
+ return NO_ERROR;
+}
+
+/**
+ * \brief Convert `::android::Region` to `Region`.
+ *
+ * \param[out] t The destination `Region`.
+ * \param[in] l The source `::android::Region`.
+ */
+// convert: ::android::Region -> Region
+inline bool convertTo(Region* t, ::android::Region const& l) {
+ size_t const baseSize = l.getFlattenedSize();
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ if (l.flatten(buffer, size) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ if (unflatten(t, constBuffer, size) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * \brief Convert `Region` to `::android::Region`.
+ *
+ * \param[out] l The destination `::android::Region`.
+ * \param[in] t The source `Region`.
+ */
+// convert: Region -> ::android::Region
+inline bool convertTo(::android::Region* l, Region const& t) {
+ size_t const baseSize = getFlattenedSize(t);
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ if (flatten(t, buffer, size) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ if (l->unflatten(constBuffer, size) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
+// BGraphicBufferProducer::QueueBufferInput
+
+/**
+ * \brief Return a lower bound on the size of the buffer required to flatten
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
+ * \return A lower bound on the size of the flat buffer.
+ */
+constexpr size_t minFlattenedSize(
+ HGraphicBufferProducer::QueueBufferInput const& /* t */) {
+ return sizeof(int64_t) + // timestamp
+ sizeof(int) + // isAutoTimestamp
+ sizeof(android_dataspace) + // dataSpace
+ sizeof(::android::Rect) + // crop
+ sizeof(int) + // scalingMode
+ sizeof(uint32_t) + // transform
+ sizeof(uint32_t) + // stickyTransform
+ sizeof(bool); // getFrameTimestamps
+}
+
+/**
+ * \brief Return the size of the buffer required to flatten
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
+ * \return The required size of the flat buffer.
+ */
+inline size_t getFlattenedSize(HGraphicBufferProducer::QueueBufferInput const& t) {
+ return minFlattenedSize(t) +
+ getFenceFlattenedSize(t.fence) +
+ getFlattenedSize(t.surfaceDamage);
+}
+
+/**
+ * \brief Return the number of file descriptors contained in
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The input `HGraphicBufferProducer::QueueBufferInput`.
+ * \return The number of file descriptors contained in \p t.
+ */
+inline size_t getFdCount(
+ HGraphicBufferProducer::QueueBufferInput const& t) {
+ return getFenceFdCount(t.fence);
+}
+
+/**
+ * \brief Flatten `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The native handle cloned from `t.fence`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * This function will duplicate the file descriptor in `t.fence`. */
+inline status_t flatten(HGraphicBufferProducer::QueueBufferInput const& t,
+ native_handle_t** nh,
+ void*& buffer, size_t& size, int*& fds, size_t& numFds) {
+ if (size < getFlattenedSize(t)) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::write(buffer, size, t.timestamp);
+ FlattenableUtils::write(buffer, size, static_cast<int>(t.isAutoTimestamp));
+ FlattenableUtils::write(buffer, size,
+ static_cast<android_dataspace_t>(t.dataSpace));
+ FlattenableUtils::write(buffer, size, ::android::Rect(
+ static_cast<int32_t>(t.crop.left),
+ static_cast<int32_t>(t.crop.top),
+ static_cast<int32_t>(t.crop.right),
+ static_cast<int32_t>(t.crop.bottom)));
+ FlattenableUtils::write(buffer, size, static_cast<int>(t.scalingMode));
+ FlattenableUtils::write(buffer, size, t.transform);
+ FlattenableUtils::write(buffer, size, t.stickyTransform);
+ FlattenableUtils::write(buffer, size, t.getFrameTimestamps);
+
+ *nh = t.fence.getNativeHandle() == nullptr ?
+ nullptr : native_handle_clone(t.fence);
+ status_t status = flattenFence(hidl_handle(*nh), buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return flatten(t.surfaceDamage, buffer, size);
+}
+
+/**
+ * \brief Unflatten `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The destination `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in,out] buffer The pointer to the flat non-fd buffer.
+ * \param[in,out] size The size of the flat non-fd buffer.
+ * \param[in,out] fds The pointer to the flat fd buffer.
+ * \param[in,out] numFds The size of the flat fd buffer.
+ * \return `NO_ERROR` on success; other value on failure.
+ *
+ * If the return value is `NO_ERROR` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline status_t unflatten(
+ HGraphicBufferProducer::QueueBufferInput* t, native_handle_t** nh,
+ void const*& buffer, size_t& size, int const*& fds, size_t& numFds) {
+ if (size < minFlattenedSize(*t)) {
+ return NO_MEMORY;
+ }
+
+ FlattenableUtils::read(buffer, size, t->timestamp);
+ int lIsAutoTimestamp;
+ FlattenableUtils::read(buffer, size, lIsAutoTimestamp);
+ t->isAutoTimestamp = static_cast<int32_t>(lIsAutoTimestamp);
+ android_dataspace_t lDataSpace;
+ FlattenableUtils::read(buffer, size, lDataSpace);
+ t->dataSpace = static_cast<Dataspace>(lDataSpace);
+ Rect lCrop;
+ FlattenableUtils::read(buffer, size, lCrop);
+ t->crop = Rect{
+ static_cast<int32_t>(lCrop.left),
+ static_cast<int32_t>(lCrop.top),
+ static_cast<int32_t>(lCrop.right),
+ static_cast<int32_t>(lCrop.bottom)};
+ int lScalingMode;
+ FlattenableUtils::read(buffer, size, lScalingMode);
+ t->scalingMode = static_cast<int32_t>(lScalingMode);
+ FlattenableUtils::read(buffer, size, t->transform);
+ FlattenableUtils::read(buffer, size, t->stickyTransform);
+ FlattenableUtils::read(buffer, size, t->getFrameTimestamps);
+
+ status_t status = unflattenFence(&(t->fence), nh,
+ buffer, size, fds, numFds);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return unflatten(&(t->surfaceDamage), buffer, size);
+}
+
+/**
+ * \brief Wrap `BGraphicBufferProducer::QueueBufferInput` in
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::QueueBufferInput`.
+ * \param[out] nh The underlying native handle for `t->fence`.
+ * \param[in] l The source `BGraphicBufferProducer::QueueBufferInput`.
+ *
+ * If the return value is `true` and `t->fence` contains a valid file
+ * descriptor, \p nh will be a newly created native handle holding that file
+ * descriptor. \p nh needs to be deleted with `native_handle_delete()`
+ * afterwards.
+ */
+inline bool wrapAs(
+ HGraphicBufferProducer::QueueBufferInput* t,
+ native_handle_t** nh,
+ BGraphicBufferProducer::QueueBufferInput const& l) {
+
+ size_t const baseSize = l.getFlattenedSize();
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = l.getFdCount();
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = baseFds.get();
+ size_t numFds = baseNumFds;
+ if (l.flatten(buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (unflatten(t, nh, constBuffer, size, constFds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * \brief Convert `HGraphicBufferProducer::QueueBufferInput` to
+ * `BGraphicBufferProducer::QueueBufferInput`.
+ *
+ * \param[out] l The destination `BGraphicBufferProducer::QueueBufferInput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferInput`.
+ *
+ * If `t.fence` has a valid file descriptor, it will be duplicated.
+ */
+inline bool convertTo(
+ BGraphicBufferProducer::QueueBufferInput* l,
+ HGraphicBufferProducer::QueueBufferInput const& t) {
+
+ size_t const baseSize = getFlattenedSize(t);
+ std::unique_ptr<uint8_t[]> baseBuffer(
+ new (std::nothrow) uint8_t[baseSize]);
+ if (!baseBuffer) {
+ return false;
+ }
+
+ size_t const baseNumFds = getFdCount(t);
+ std::unique_ptr<int[]> baseFds(
+ new (std::nothrow) int[baseNumFds]);
+ if (!baseFds) {
+ return false;
+ }
+
+ void* buffer = static_cast<void*>(baseBuffer.get());
+ size_t size = baseSize;
+ int* fds = baseFds.get();
+ size_t numFds = baseNumFds;
+ native_handle_t* nh;
+ if (flatten(t, &nh, buffer, size, fds, numFds) != NO_ERROR) {
+ return false;
+ }
+
+ void const* constBuffer = static_cast<void const*>(baseBuffer.get());
+ size = baseSize;
+ int const* constFds = static_cast<int const*>(baseFds.get());
+ numFds = baseNumFds;
+ if (l->unflatten(constBuffer, size, constFds, numFds) != NO_ERROR) {
+ native_handle_close(nh);
+ native_handle_delete(nh);
+ return false;
+ }
+
+ native_handle_delete(nh);
+ return true;
+}
+
+// Ref: frameworks/native/libs/gui/BGraphicBufferProducer.cpp:
+// BGraphicBufferProducer::QueueBufferOutput
+
+/**
+ * \brief Wrap `BGraphicBufferProducer::QueueBufferOutput` in
+ * `HGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] t The wrapper of type
+ * `HGraphicBufferProducer::QueueBufferOutput`.
+ * \param[out] nh The array of array of native handles that are referred to by
+ * members of \p t.
+ * \param[in] l The source `BGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * On success, each member of \p nh will be either `nullptr` or a newly created
+ * native handle. All the non-`nullptr` elements must be deleted individually
+ * with `native_handle_delete()`.
+ */
+// wrap: BGraphicBufferProducer::QueueBufferOutput ->
+// HGraphicBufferProducer::QueueBufferOutput
+inline bool wrapAs(HGraphicBufferProducer::QueueBufferOutput* t,
+ std::vector<std::vector<native_handle_t*> >* nh,
+ BGraphicBufferProducer::QueueBufferOutput const& l) {
+ if (!wrapAs(&(t->frameTimestamps), nh, l.frameTimestamps)) {
+ return false;
+ }
+ t->width = l.width;
+ t->height = l.height;
+ t->transformHint = l.transformHint;
+ t->numPendingBuffers = l.numPendingBuffers;
+ t->nextFrameNumber = l.nextFrameNumber;
+ t->bufferReplaced = l.bufferReplaced;
+ return true;
+}
+
+/**
+ * \brief Convert `HGraphicBufferProducer::QueueBufferOutput` to
+ * `BGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * \param[out] l The destination `BGraphicBufferProducer::QueueBufferOutput`.
+ * \param[in] t The source `HGraphicBufferProducer::QueueBufferOutput`.
+ *
+ * This function will duplicate all file descriptors contained in \p t.
+ */
+// convert: HGraphicBufferProducer::QueueBufferOutput ->
+// BGraphicBufferProducer::QueueBufferOutput
+inline bool convertTo(
+ BGraphicBufferProducer::QueueBufferOutput* l,
+ HGraphicBufferProducer::QueueBufferOutput const& t) {
+ if (!convertTo(&(l->frameTimestamps), t.frameTimestamps)) {
+ return false;
+ }
+ l->width = t.width;
+ l->height = t.height;
+ l->transformHint = t.transformHint;
+ l->numPendingBuffers = t.numPendingBuffers;
+ l->nextFrameNumber = t.nextFrameNumber;
+ l->bufferReplaced = t.bufferReplaced;
+ return true;
+}
+
+/**
+ * \brief Convert `BGraphicBufferProducer::DisconnectMode` to
+ * `HGraphicBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `BGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `HGraphicBufferProducer::DisconnectMode`.
+ */
+inline HGraphicBufferProducer::DisconnectMode toOmxDisconnectMode(
+ BGraphicBufferProducer::DisconnectMode l) {
+ switch (l) {
+ case BGraphicBufferProducer::DisconnectMode::Api:
+ return HGraphicBufferProducer::DisconnectMode::API;
+ case BGraphicBufferProducer::DisconnectMode::AllLocal:
+ return HGraphicBufferProducer::DisconnectMode::ALL_LOCAL;
+ }
+ return HGraphicBufferProducer::DisconnectMode::API;
+}
+
+/**
+ * \brief Convert `HGraphicBufferProducer::DisconnectMode` to
+ * `BGraphicBufferProducer::DisconnectMode`.
+ *
+ * \param[in] l The source `HGraphicBufferProducer::DisconnectMode`.
+ * \return The corresponding `BGraphicBufferProducer::DisconnectMode`.
+ */
+inline BGraphicBufferProducer::DisconnectMode toGuiDisconnectMode(
+ HGraphicBufferProducer::DisconnectMode t) {
+ switch (t) {
+ case HGraphicBufferProducer::DisconnectMode::API:
+ return BGraphicBufferProducer::DisconnectMode::Api;
+ case HGraphicBufferProducer::DisconnectMode::ALL_LOCAL:
+ return BGraphicBufferProducer::DisconnectMode::AllLocal;
+ }
+ return BGraphicBufferProducer::DisconnectMode::Api;
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0__CONVERSION_H
diff --git a/media/libstagefright/omx/1.0/Omx.cpp b/media/libstagefright/omx/1.0/Omx.cpp
new file mode 100644
index 0000000..64b2c08
--- /dev/null
+++ b/media/libstagefright/omx/1.0/Omx.cpp
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ios>
+#include <list>
+
+#include <android-base/logging.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <OMX_Core.h>
+#include <OMX_AsString.h>
+
+#include "../OMXUtils.h"
+#include "../OMXMaster.h"
+#include "../GraphicBufferSource.h"
+
+#include "WOmxNode.h"
+#include "WOmxObserver.h"
+#include "WGraphicBufferProducer.h"
+#include "WGraphicBufferSource.h"
+#include "Conversion.h"
+
+#include "Omx.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+constexpr size_t kMaxNodeInstances = (1 << 16);
+
+Omx::Omx() :
+ mMaster(new OMXMaster()),
+ mParser() {
+}
+
+Omx::~Omx() {
+ delete mMaster;
+}
+
+Return<void> Omx::listNodes(listNodes_cb _hidl_cb) {
+ std::list<::android::IOMX::ComponentInfo> list;
+ char componentName[256];
+ for (OMX_U32 index = 0;
+ mMaster->enumerateComponents(
+ componentName, sizeof(componentName), index) == OMX_ErrorNone;
+ ++index) {
+ list.push_back(::android::IOMX::ComponentInfo());
+ ::android::IOMX::ComponentInfo& info = list.back();
+ info.mName = componentName;
+ ::android::Vector<::android::String8> roles;
+ OMX_ERRORTYPE err =
+ mMaster->getRolesOfComponent(componentName, &roles);
+ if (err == OMX_ErrorNone) {
+ for (OMX_U32 i = 0; i < roles.size(); ++i) {
+ info.mRoles.push_back(roles[i]);
+ }
+ }
+ }
+
+ hidl_vec<ComponentInfo> tList;
+ tList.resize(list.size());
+ size_t i = 0;
+ for (auto const& info : list) {
+ convertTo(&(tList[i++]), info);
+ }
+ _hidl_cb(toStatus(OK), tList);
+ return Void();
+}
+
+Return<void> Omx::allocateNode(
+ const hidl_string& name,
+ const sp<IOmxObserver>& observer,
+ allocateNode_cb _hidl_cb) {
+
+ using ::android::IOMXNode;
+ using ::android::IOMXObserver;
+
+ Mutex::Autolock autoLock(mLock);
+ if (mLiveNodes.size() == kMaxNodeInstances) {
+ _hidl_cb(toStatus(NO_MEMORY), nullptr);
+ return Void();
+ }
+
+ sp<OMXNodeInstance> instance = new OMXNodeInstance(
+ this, new LWOmxObserver(observer), name.c_str());
+
+ OMX_COMPONENTTYPE *handle;
+ OMX_ERRORTYPE err = mMaster->makeComponentInstance(
+ name.c_str(), &OMXNodeInstance::kCallbacks,
+ instance.get(), &handle);
+
+ if (err != OMX_ErrorNone) {
+ LOG(ERROR) << "Failed to allocate omx component "
+ "'" << name.c_str() << "' "
+ " err=" << asString(err) <<
+ "(0x" << std::hex << unsigned(err) << ")";
+ _hidl_cb(toStatus(StatusFromOMXError(err)), nullptr);
+ return Void();
+ }
+ instance->setHandle(handle);
+ std::vector<AString> quirkVector;
+ if (mParser.getQuirks(name.c_str(), &quirkVector) == OK) {
+ uint32_t quirks = 0;
+ for (const AString quirk : quirkVector) {
+ if (quirk == "requires-allocate-on-input-ports") {
+ quirks |= kRequiresAllocateBufferOnInputPorts;
+ }
+ if (quirk == "requires-allocate-on-output-ports") {
+ quirks |= kRequiresAllocateBufferOnOutputPorts;
+ }
+ }
+ instance->setQuirks(quirks);
+ }
+
+ mLiveNodes.add(observer.get(), instance);
+ observer->linkToDeath(this, 0);
+ mNode2Observer.add(instance.get(), observer.get());
+
+ _hidl_cb(toStatus(OK), new TWOmxNode(instance));
+ return Void();
+}
+
+Return<void> Omx::createInputSurface(createInputSurface_cb _hidl_cb) {
+ sp<::android::IGraphicBufferProducer> bufferProducer;
+
+ sp<GraphicBufferSource> graphicBufferSource = new GraphicBufferSource();
+ status_t err = graphicBufferSource->initCheck();
+ if (err != OK) {
+ LOG(ERROR) << "Failed to create persistent input surface: "
+ << strerror(-err) << " "
+ "(" << int(err) << ")";
+ _hidl_cb(toStatus(err), nullptr, nullptr);
+ return Void();
+ }
+ bufferProducer = graphicBufferSource->getIGraphicBufferProducer();
+
+ _hidl_cb(toStatus(OK),
+ new TWGraphicBufferProducer(bufferProducer),
+ new TWGraphicBufferSource(graphicBufferSource));
+ return Void();
+}
+
+void Omx::serviceDied(uint64_t /* cookie */, wp<IBase> const& who) {
+ sp<OMXNodeInstance> instance;
+ {
+ Mutex::Autolock autoLock(mLock);
+
+ ssize_t index = mLiveNodes.indexOfKey(who);
+
+ if (index < 0) {
+ LOG(ERROR) << "b/27597103, nonexistent observer on serviceDied";
+ android_errorWriteLog(0x534e4554, "27597103");
+ return;
+ }
+
+ instance = mLiveNodes.editValueAt(index);
+ mLiveNodes.removeItemsAt(index);
+ mNode2Observer.removeItem(instance.get());
+ }
+ instance->onObserverDied();
+}
+
+status_t Omx::freeNode(sp<OMXNodeInstance> const& instance) {
+ if (instance == NULL) {
+ return OK;
+ }
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ ssize_t observerIndex = mNode2Observer.indexOfKey(instance.get());
+ if (observerIndex >= 0) {
+ wp<IBase> observer = mNode2Observer.valueAt(observerIndex);
+ ssize_t nodeIndex = mLiveNodes.indexOfKey(observer);
+ if (nodeIndex >= 0) {
+ mNode2Observer.removeItemsAt(observerIndex);
+ mLiveNodes.removeItemsAt(nodeIndex);
+ sp<IBase> sObserver = observer.promote();
+ if (sObserver != nullptr) {
+ sObserver->unlinkToDeath(this);
+ }
+ } else {
+ LOG(WARNING) << "Inconsistent observer record";
+ }
+ }
+ }
+
+ OMX_ERRORTYPE err = OMX_ErrorNone;
+ if (instance->handle() != NULL) {
+ err = mMaster->destroyComponentInstance(
+ static_cast<OMX_COMPONENTTYPE*>(instance->handle()));
+ }
+ return StatusFromOMXError(err);
+}
+
+// Methods from ::android::hidl::base::V1_0::IBase follow.
+
+IOmx* HIDL_FETCH_IOmx(const char* /* name */) {
+ return new Omx();
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/Omx.h b/media/libstagefright/omx/1.0/Omx.h
new file mode 100644
index 0000000..23784aa
--- /dev/null
+++ b/media/libstagefright/omx/1.0/Omx.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include "../../include/OMXNodeInstance.h"
+
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h>
+
+namespace android {
+
+struct OMXMaster;
+
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_death_recipient;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+using ::android::wp;
+
+using ::android::OMXMaster;
+using ::android::OmxNodeOwner;
+using ::android::OMXNodeInstance;
+
+struct Omx : public IOmx, public hidl_death_recipient, public OmxNodeOwner {
+ Omx();
+ virtual ~Omx();
+
+ // Methods from IOmx
+ Return<void> listNodes(listNodes_cb _hidl_cb) override;
+ Return<void> allocateNode(
+ const hidl_string& name,
+ const sp<IOmxObserver>& observer,
+ allocateNode_cb _hidl_cb) override;
+ Return<void> createInputSurface(createInputSurface_cb _hidl_cb) override;
+
+ // Method from hidl_death_recipient
+ void serviceDied(uint64_t cookie, const wp<IBase>& who) override;
+
+ // Method from OmxNodeOwner
+ virtual status_t freeNode(sp<OMXNodeInstance> const& instance) override;
+
+protected:
+ OMXMaster* mMaster;
+ Mutex mLock;
+ KeyedVector<wp<IBase>, sp<OMXNodeInstance> > mLiveNodes;
+ KeyedVector<OMXNodeInstance*, wp<IBase> > mNode2Observer;
+ MediaCodecsXmlParser mParser;
+};
+
+extern "C" IOmx* HIDL_FETCH_IOmx(const char* name);
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMX_H
diff --git a/media/libstagefright/omx/1.0/OmxStore.cpp b/media/libstagefright/omx/1.0/OmxStore.cpp
new file mode 100644
index 0000000..0e37af9
--- /dev/null
+++ b/media/libstagefright/omx/1.0/OmxStore.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <ios>
+#include <list>
+
+#include <android-base/logging.h>
+
+#include "Conversion.h"
+#include "OmxStore.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+OmxStore::OmxStore() {
+}
+
+OmxStore::~OmxStore() {
+}
+
+Return<void> OmxStore::listServiceAttributes(listServiceAttributes_cb _hidl_cb) {
+ _hidl_cb(toStatus(NO_ERROR), hidl_vec<ServiceAttribute>());
+ return Void();
+}
+
+Return<void> OmxStore::getNodePrefix(getNodePrefix_cb _hidl_cb) {
+ _hidl_cb(hidl_string());
+ return Void();
+}
+
+Return<void> OmxStore::listRoles(listRoles_cb _hidl_cb) {
+ _hidl_cb(hidl_vec<RoleInfo>());
+ return Void();
+}
+
+Return<sp<IOmx>> OmxStore::getOmx(hidl_string const& omxName) {
+ return IOmx::tryGetService(omxName);
+}
+
+// Methods from ::android::hidl::base::V1_0::IBase follow.
+
+IOmxStore* HIDL_FETCH_IOmxStore(const char* /* name */) {
+ return new OmxStore();
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/OmxStore.h b/media/libstagefright/omx/1.0/OmxStore.h
new file mode 100644
index 0000000..f377f5a
--- /dev/null
+++ b/media/libstagefright/omx/1.0/OmxStore.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMXSTORE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMXSTORE_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <android/hardware/media/omx/1.0/IOmxStore.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmxStore;
+using ::android::hardware::media::omx::V1_0::IOmx;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+using ::android::wp;
+
+struct OmxStore : public IOmxStore {
+ OmxStore();
+ virtual ~OmxStore();
+
+ // Methods from IOmx
+ Return<void> listServiceAttributes(listServiceAttributes_cb) override;
+ Return<void> getNodePrefix(getNodePrefix_cb) override;
+ Return<void> listRoles(listRoles_cb) override;
+ Return<sp<IOmx>> getOmx(hidl_string const&) override;
+};
+
+extern "C" IOmxStore* HIDL_FETCH_IOmxStore(const char* name);
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_OMXSTORE_H
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp b/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
new file mode 100644
index 0000000..650db8e
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WGraphicBufferProducer.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "WGraphicBufferProducer-impl"
+
+#include <android-base/logging.h>
+
+#include "WGraphicBufferProducer.h"
+#include "WProducerListener.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// TWGraphicBufferProducer
+TWGraphicBufferProducer::TWGraphicBufferProducer(
+ sp<BGraphicBufferProducer> const& base):
+ mBase(base) {
+}
+
+Return<void> TWGraphicBufferProducer::requestBuffer(
+ int32_t slot, requestBuffer_cb _hidl_cb) {
+ sp<GraphicBuffer> buf;
+ status_t status = mBase->requestBuffer(slot, &buf);
+ AnwBuffer anwBuffer;
+ wrapAs(&anwBuffer, *buf);
+ _hidl_cb(static_cast<int32_t>(status), anwBuffer);
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::setMaxDequeuedBufferCount(
+ int32_t maxDequeuedBuffers) {
+ return static_cast<int32_t>(mBase->setMaxDequeuedBufferCount(
+ static_cast<int>(maxDequeuedBuffers)));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setAsyncMode(bool async) {
+ return static_cast<int32_t>(mBase->setAsyncMode(async));
+}
+
+Return<void> TWGraphicBufferProducer::dequeueBuffer(
+ uint32_t width, uint32_t height,
+ PixelFormat format, uint32_t usage,
+ bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) {
+ int slot;
+ sp<Fence> fence;
+ ::android::FrameEventHistoryDelta outTimestamps;
+ status_t status = mBase->dequeueBuffer(
+ &slot, &fence,
+ width, height,
+ static_cast<::android::PixelFormat>(format), usage,
+ getFrameTimestamps ? &outTimestamps : nullptr);
+ hidl_handle tFence;
+ FrameEventHistoryDelta tOutTimestamps;
+
+ native_handle_t* nh = nullptr;
+ if ((fence == nullptr) || !wrapAs(&tFence, &nh, *fence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::dequeueBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ return Void();
+ }
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (getFrameTimestamps && !wrapAs(&tOutTimestamps, &nhAA, outTimestamps)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::dequeueBuffer - "
+ "Invalid output timestamps";
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ native_handle_delete(nh);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status),
+ static_cast<int32_t>(slot),
+ tFence,
+ tOutTimestamps);
+ native_handle_delete(nh);
+ if (getFrameTimestamps) {
+ for (auto& nhA : nhAA) {
+ for (auto& handle : nhA) {
+ native_handle_delete(handle);
+ }
+ }
+ }
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::detachBuffer(int32_t slot) {
+ return static_cast<int32_t>(mBase->detachBuffer(slot));
+}
+
+Return<void> TWGraphicBufferProducer::detachNextBuffer(
+ detachNextBuffer_cb _hidl_cb) {
+ sp<GraphicBuffer> outBuffer;
+ sp<Fence> outFence;
+ status_t status = mBase->detachNextBuffer(&outBuffer, &outFence);
+ AnwBuffer tBuffer;
+ hidl_handle tFence;
+
+ if (outBuffer == nullptr) {
+ LOG(ERROR) << "TWGraphicBufferProducer::detachNextBuffer - "
+ "Invalid output buffer";
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ return Void();
+ }
+ wrapAs(&tBuffer, *outBuffer);
+ native_handle_t* nh = nullptr;
+ if ((outFence != nullptr) && !wrapAs(&tFence, &nh, *outFence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::detachNextBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tBuffer, tFence);
+ native_handle_delete(nh);
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::attachBuffer(
+ const AnwBuffer& buffer,
+ attachBuffer_cb _hidl_cb) {
+ int outSlot;
+ sp<GraphicBuffer> lBuffer = new GraphicBuffer();
+ if (!convertTo(lBuffer.get(), buffer)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::attachBuffer - "
+ "Invalid input native window buffer";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), -1);
+ return Void();
+ }
+ status_t status = mBase->attachBuffer(&outSlot, lBuffer);
+
+ _hidl_cb(static_cast<int32_t>(status), static_cast<int32_t>(outSlot));
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::queueBuffer(
+ int32_t slot, const QueueBufferInput& input,
+ queueBuffer_cb _hidl_cb) {
+ QueueBufferOutput tOutput;
+ BGraphicBufferProducer::QueueBufferInput lInput(
+ 0, false, HAL_DATASPACE_UNKNOWN,
+ ::android::Rect(0, 0, 1, 1),
+ NATIVE_WINDOW_SCALING_MODE_FREEZE,
+ 0, ::android::Fence::NO_FENCE);
+ if (!convertTo(&lInput, input)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::queueBuffer - "
+ "Invalid input";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), tOutput);
+ return Void();
+ }
+ BGraphicBufferProducer::QueueBufferOutput lOutput;
+ status_t status = mBase->queueBuffer(
+ static_cast<int>(slot), lInput, &lOutput);
+
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!wrapAs(&tOutput, &nhAA, lOutput)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::queueBuffer - "
+ "Invalid output";
+ _hidl_cb(static_cast<int32_t>(BAD_VALUE), tOutput);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::cancelBuffer(
+ int32_t slot, const hidl_handle& fence) {
+ sp<Fence> lFence = new Fence();
+ if (!convertTo(lFence.get(), fence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::cancelBuffer - "
+ "Invalid input fence";
+ return static_cast<int32_t>(BAD_VALUE);
+ }
+ return static_cast<int32_t>(mBase->cancelBuffer(static_cast<int>(slot), lFence));
+}
+
+Return<void> TWGraphicBufferProducer::query(int32_t what, query_cb _hidl_cb) {
+ int lValue;
+ int lReturn = mBase->query(static_cast<int>(what), &lValue);
+ _hidl_cb(static_cast<int32_t>(lReturn), static_cast<int32_t>(lValue));
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::connect(
+ const sp<HProducerListener>& listener,
+ int32_t api, bool producerControlledByApp, connect_cb _hidl_cb) {
+ sp<BProducerListener> lListener = listener == nullptr ?
+ nullptr : new LWProducerListener(listener);
+ BGraphicBufferProducer::QueueBufferOutput lOutput;
+ status_t status = mBase->connect(lListener,
+ static_cast<int>(api),
+ producerControlledByApp,
+ &lOutput);
+
+ QueueBufferOutput tOutput;
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!wrapAs(&tOutput, &nhAA, lOutput)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::connect - "
+ "Invalid output";
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ return Void();
+ }
+
+ _hidl_cb(static_cast<int32_t>(status), tOutput);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::disconnect(
+ int32_t api, DisconnectMode mode) {
+ return static_cast<int32_t>(mBase->disconnect(
+ static_cast<int>(api),
+ toGuiDisconnectMode(mode)));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setSidebandStream(const hidl_handle& stream) {
+ return static_cast<int32_t>(mBase->setSidebandStream(NativeHandle::create(
+ stream ? native_handle_clone(stream) : NULL, true)));
+}
+
+Return<void> TWGraphicBufferProducer::allocateBuffers(
+ uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
+ mBase->allocateBuffers(
+ width, height,
+ static_cast<::android::PixelFormat>(format),
+ usage);
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::allowAllocation(bool allow) {
+ return static_cast<int32_t>(mBase->allowAllocation(allow));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setGenerationNumber(uint32_t generationNumber) {
+ return static_cast<int32_t>(mBase->setGenerationNumber(generationNumber));
+}
+
+Return<void> TWGraphicBufferProducer::getConsumerName(getConsumerName_cb _hidl_cb) {
+ _hidl_cb(mBase->getConsumerName().string());
+ return Void();
+}
+
+Return<int32_t> TWGraphicBufferProducer::setSharedBufferMode(bool sharedBufferMode) {
+ return static_cast<int32_t>(mBase->setSharedBufferMode(sharedBufferMode));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setAutoRefresh(bool autoRefresh) {
+ return static_cast<int32_t>(mBase->setAutoRefresh(autoRefresh));
+}
+
+Return<int32_t> TWGraphicBufferProducer::setDequeueTimeout(int64_t timeoutNs) {
+ return static_cast<int32_t>(mBase->setDequeueTimeout(timeoutNs));
+}
+
+Return<void> TWGraphicBufferProducer::getLastQueuedBuffer(
+ getLastQueuedBuffer_cb _hidl_cb) {
+ sp<GraphicBuffer> lOutBuffer = new GraphicBuffer();
+ sp<Fence> lOutFence = new Fence();
+ float lOutTransformMatrix[16];
+ status_t status = mBase->getLastQueuedBuffer(
+ &lOutBuffer, &lOutFence, lOutTransformMatrix);
+
+ AnwBuffer tOutBuffer;
+ if (lOutBuffer != nullptr) {
+ wrapAs(&tOutBuffer, *lOutBuffer);
+ }
+ hidl_handle tOutFence;
+ native_handle_t* nh = nullptr;
+ if ((lOutFence == nullptr) || !wrapAs(&tOutFence, &nh, *lOutFence)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::getLastQueuedBuffer - "
+ "Invalid output fence";
+ _hidl_cb(static_cast<int32_t>(status),
+ tOutBuffer,
+ tOutFence,
+ hidl_array<float, 16>());
+ return Void();
+ }
+ hidl_array<float, 16> tOutTransformMatrix(lOutTransformMatrix);
+
+ _hidl_cb(static_cast<int32_t>(status), tOutBuffer, tOutFence, tOutTransformMatrix);
+ native_handle_delete(nh);
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::getFrameTimestamps(
+ getFrameTimestamps_cb _hidl_cb) {
+ ::android::FrameEventHistoryDelta lDelta;
+ mBase->getFrameTimestamps(&lDelta);
+
+ FrameEventHistoryDelta tDelta;
+ std::vector<std::vector<native_handle_t*> > nhAA;
+ if (!wrapAs(&tDelta, &nhAA, lDelta)) {
+ LOG(ERROR) << "TWGraphicBufferProducer::getFrameTimestamps - "
+ "Invalid output frame timestamps";
+ _hidl_cb(tDelta);
+ return Void();
+ }
+
+ _hidl_cb(tDelta);
+ for (auto& nhA : nhAA) {
+ for (auto& nh : nhA) {
+ native_handle_delete(nh);
+ }
+ }
+ return Void();
+}
+
+Return<void> TWGraphicBufferProducer::getUniqueId(getUniqueId_cb _hidl_cb) {
+ uint64_t outId;
+ status_t status = mBase->getUniqueId(&outId);
+ _hidl_cb(static_cast<int32_t>(status), outId);
+ return Void();
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferProducer.h b/media/libstagefright/omx/1.0/WGraphicBufferProducer.h
new file mode 100644
index 0000000..4a3fe0c
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WGraphicBufferProducer.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERPRODUCER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERPRODUCER_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/Binder.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IProducerListener.h>
+
+#include <android/hardware/graphics/bufferqueue/1.0/IGraphicBufferProducer.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
+using ::android::hardware::media::V1_0::AnwBuffer;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+typedef ::android::hardware::graphics::bufferqueue::V1_0::
+ IGraphicBufferProducer HGraphicBufferProducer;
+typedef ::android::hardware::graphics::bufferqueue::V1_0::
+ IProducerListener HProducerListener;
+
+typedef ::android::IGraphicBufferProducer BGraphicBufferProducer;
+typedef ::android::IProducerListener BProducerListener;
+using ::android::BnGraphicBufferProducer;
+
+struct TWGraphicBufferProducer : public HGraphicBufferProducer {
+ sp<BGraphicBufferProducer> mBase;
+ TWGraphicBufferProducer(sp<BGraphicBufferProducer> const& base);
+ Return<void> requestBuffer(int32_t slot, requestBuffer_cb _hidl_cb)
+ override;
+ Return<int32_t> setMaxDequeuedBufferCount(int32_t maxDequeuedBuffers)
+ override;
+ Return<int32_t> setAsyncMode(bool async) override;
+ Return<void> dequeueBuffer(
+ uint32_t width, uint32_t height, PixelFormat format, uint32_t usage,
+ bool getFrameTimestamps, dequeueBuffer_cb _hidl_cb) override;
+ Return<int32_t> detachBuffer(int32_t slot) override;
+ Return<void> detachNextBuffer(detachNextBuffer_cb _hidl_cb) override;
+ Return<void> attachBuffer(const AnwBuffer& buffer, attachBuffer_cb _hidl_cb)
+ override;
+ Return<void> queueBuffer(
+ int32_t slot, const HGraphicBufferProducer::QueueBufferInput& input,
+ queueBuffer_cb _hidl_cb) override;
+ Return<int32_t> cancelBuffer(int32_t slot, const hidl_handle& fence)
+ override;
+ Return<void> query(int32_t what, query_cb _hidl_cb) override;
+ Return<void> connect(const sp<HProducerListener>& listener,
+ int32_t api, bool producerControlledByApp,
+ connect_cb _hidl_cb) override;
+ Return<int32_t> disconnect(
+ int32_t api,
+ HGraphicBufferProducer::DisconnectMode mode) override;
+ Return<int32_t> setSidebandStream(const hidl_handle& stream) override;
+ Return<void> allocateBuffers(
+ uint32_t width, uint32_t height,
+ PixelFormat format, uint32_t usage) override;
+ Return<int32_t> allowAllocation(bool allow) override;
+ Return<int32_t> setGenerationNumber(uint32_t generationNumber) override;
+ Return<void> getConsumerName(getConsumerName_cb _hidl_cb) override;
+ Return<int32_t> setSharedBufferMode(bool sharedBufferMode) override;
+ Return<int32_t> setAutoRefresh(bool autoRefresh) override;
+ Return<int32_t> setDequeueTimeout(int64_t timeoutNs) override;
+ Return<void> getLastQueuedBuffer(getLastQueuedBuffer_cb _hidl_cb) override;
+ Return<void> getFrameTimestamps(getFrameTimestamps_cb _hidl_cb) override;
+ Return<void> getUniqueId(getUniqueId_cb _hidl_cb) override;
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERPRODUCER_H
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
new file mode 100644
index 0000000..e876306
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TWGraphicBufferSource"
+
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <OMX_Component.h>
+#include <OMX_IndexExt.h>
+
+#include "omx/OMXUtils.h"
+#include "WGraphicBufferSource.h"
+#include "WOmxNode.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+static const OMX_U32 kPortIndexInput = 0;
+
+struct TWGraphicBufferSource::TWOmxNodeWrapper : public IOmxNodeWrapper {
+ sp<IOmxNode> mOmxNode;
+
+ TWOmxNodeWrapper(const sp<IOmxNode> &omxNode): mOmxNode(omxNode) {
+ }
+
+ virtual status_t emptyBuffer(
+ int32_t bufferId, uint32_t flags,
+ const sp<GraphicBuffer> &buffer,
+ int64_t timestamp, int fenceFd) override {
+ CodecBuffer tBuffer;
+ native_handle_t* fenceNh = native_handle_create_from_fd(fenceFd);
+ status_t err = toStatusT(mOmxNode->emptyBuffer(
+ bufferId,
+ *wrapAs(&tBuffer, buffer),
+ flags,
+ toRawTicks(timestamp),
+ fenceNh));
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
+ return err;
+ }
+
+ virtual void dispatchDataSpaceChanged(
+ int32_t dataSpace, int32_t aspects, int32_t pixelFormat) override {
+ Message tMsg;
+ tMsg.type = Message::Type::EVENT;
+ tMsg.fence = native_handle_create(0, 0);
+ tMsg.data.eventData.event = uint32_t(OMX_EventDataSpaceChanged);
+ tMsg.data.eventData.data1 = dataSpace;
+ tMsg.data.eventData.data2 = aspects;
+ tMsg.data.eventData.data3 = pixelFormat;
+ mOmxNode->dispatchMessage(tMsg);
+ }
+};
+
+struct TWGraphicBufferSource::TWOmxBufferSource : public IOmxBufferSource {
+ sp<GraphicBufferSource> mSource;
+
+ TWOmxBufferSource(const sp<GraphicBufferSource> &source): mSource(source) {
+ }
+
+ Return<void> onOmxExecuting() override {
+ mSource->onOmxExecuting();
+ return Void();
+ }
+
+ Return<void> onOmxIdle() override {
+ mSource->onOmxIdle();
+ return Void();
+ }
+
+ Return<void> onOmxLoaded() override {
+ mSource->onOmxLoaded();
+ return Void();
+ }
+
+ Return<void> onInputBufferAdded(uint32_t bufferId) override {
+ mSource->onInputBufferAdded(static_cast<int32_t>(bufferId));
+ return Void();
+ }
+
+ Return<void> onInputBufferEmptied(
+ uint32_t bufferId, hidl_handle const& tFence) override {
+ mSource->onInputBufferEmptied(
+ static_cast<int32_t>(bufferId),
+ native_handle_read_fd(tFence));
+ return Void();
+ }
+};
+
+// TWGraphicBufferSource
+TWGraphicBufferSource::TWGraphicBufferSource(
+ sp<GraphicBufferSource> const& base) :
+ mBase(base),
+ mOmxBufferSource(new TWOmxBufferSource(base)) {
+}
+
+Return<Status> TWGraphicBufferSource::configure(
+ const sp<IOmxNode>& omxNode, Dataspace dataspace) {
+ if (omxNode == NULL) {
+ return toStatus(BAD_VALUE);
+ }
+
+ // Do setInputSurface() first, the node will try to enable metadata
+ // mode on input, and does necessary error checking. If this fails,
+ // we can't use this input surface on the node.
+ Return<Status> err(omxNode->setInputSurface(mOmxBufferSource));
+ status_t fnStatus = toStatusT(err);
+ if (fnStatus != NO_ERROR) {
+ ALOGE("Unable to set input surface: %d", fnStatus);
+ return err;
+ }
+
+ // use consumer usage bits queried from encoder, but always add
+ // HW_VIDEO_ENCODER for backward compatibility.
+ uint32_t consumerUsage;
+ void *_params = &consumerUsage;
+ uint8_t *params = static_cast<uint8_t*>(_params);
+ fnStatus = UNKNOWN_ERROR;
+ IOmxNode::getParameter_cb _hidl_cb(
+ [&fnStatus, ¶ms](Status status, hidl_vec<uint8_t> const& outParams) {
+ fnStatus = toStatusT(status);
+ std::copy(
+ outParams.data(),
+ outParams.data() + outParams.size(),
+ params);
+ });
+ omxNode->getParameter(
+ static_cast<uint32_t>(OMX_IndexParamConsumerUsageBits),
+ inHidlBytes(&consumerUsage, sizeof(consumerUsage)),
+ _hidl_cb);
+ if (fnStatus != OK) {
+ consumerUsage = 0;
+ }
+
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexInput;
+
+ _params = &def;
+ params = static_cast<uint8_t*>(_params);
+ omxNode->getParameter(
+ static_cast<uint32_t>(OMX_IndexParamPortDefinition),
+ inHidlBytes(&def, sizeof(def)),
+ _hidl_cb);
+ if (fnStatus != NO_ERROR) {
+ ALOGE("Failed to get port definition: %d", fnStatus);
+ return toStatus(fnStatus);
+ }
+
+
+ return toStatus(mBase->configure(
+ new TWOmxNodeWrapper(omxNode),
+ toRawDataspace(dataspace),
+ def.nBufferCountActual,
+ def.format.video.nFrameWidth,
+ def.format.video.nFrameHeight,
+ consumerUsage));
+}
+
+Return<Status> TWGraphicBufferSource::setSuspend(
+ bool suspend, int64_t timeUs) {
+ return toStatus(mBase->setSuspend(suspend, timeUs));
+}
+
+Return<Status> TWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
+ int64_t repeatAfterUs) {
+ return toStatus(mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs));
+}
+
+Return<Status> TWGraphicBufferSource::setMaxFps(float maxFps) {
+ return toStatus(mBase->setMaxFps(maxFps));
+}
+
+Return<Status> TWGraphicBufferSource::setTimeLapseConfig(
+ double fps, double captureFps) {
+ return toStatus(mBase->setTimeLapseConfig(fps, captureFps));
+}
+
+Return<Status> TWGraphicBufferSource::setStartTimeUs(int64_t startTimeUs) {
+ return toStatus(mBase->setStartTimeUs(startTimeUs));
+}
+
+Return<Status> TWGraphicBufferSource::setStopTimeUs(int64_t stopTimeUs) {
+ return toStatus(mBase->setStopTimeUs(stopTimeUs));
+}
+
+Return<void> TWGraphicBufferSource::getStopTimeOffsetUs(
+ getStopTimeOffsetUs_cb _hidl_cb) {
+ // TODO: Implement this when needed.
+ _hidl_cb(Status::OK, 0);
+ return Void();
+}
+
+Return<Status> TWGraphicBufferSource::setColorAspects(
+ const ColorAspects& aspects) {
+ return toStatus(mBase->setColorAspects(toCompactColorAspects(aspects)));
+}
+
+Return<Status> TWGraphicBufferSource::setTimeOffsetUs(int64_t timeOffsetUs) {
+ return toStatus(mBase->setTimeOffsetUs(timeOffsetUs));
+}
+
+Return<Status> TWGraphicBufferSource::signalEndOfInputStream() {
+ return toStatus(mBase->signalEndOfInputStream());
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.h b/media/libstagefright/omx/1.0/WGraphicBufferSource.h
new file mode 100644
index 0000000..4549c97
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/graphics/common/1.0/types.h>
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+
+#include <android/BnGraphicBufferSource.h>
+
+#include "../GraphicBufferSource.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::GraphicBufferSource;
+using ::android::hardware::graphics::common::V1_0::Dataspace;
+using ::android::hardware::media::omx::V1_0::ColorAspects;
+using ::android::hardware::media::omx::V1_0::IGraphicBufferSource;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IOMXNode;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+typedef ::android::hardware::media::omx::V1_0::IGraphicBufferSource
+ TGraphicBufferSource;
+
+struct TWGraphicBufferSource : public TGraphicBufferSource {
+ struct TWOmxNodeWrapper;
+ struct TWOmxBufferSource;
+ sp<GraphicBufferSource> mBase;
+ sp<IOmxBufferSource> mOmxBufferSource;
+
+ TWGraphicBufferSource(sp<GraphicBufferSource> const& base);
+ Return<Status> configure(
+ const sp<IOmxNode>& omxNode, Dataspace dataspace) override;
+ Return<Status> setSuspend(bool suspend, int64_t timeUs) override;
+ Return<Status> setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
+ Return<Status> setMaxFps(float maxFps) override;
+ Return<Status> setTimeLapseConfig(double fps, double captureFps) override;
+ Return<Status> setStartTimeUs(int64_t startTimeUs) override;
+ Return<Status> setStopTimeUs(int64_t stopTimeUs) override;
+ Return<void> getStopTimeOffsetUs(getStopTimeOffsetUs_cb _hidl_cb) override;
+ Return<Status> setColorAspects(const ColorAspects& aspects) override;
+ Return<Status> setTimeOffsetUs(int64_t timeOffsetUs) override;
+ Return<Status> signalEndOfInputStream() override;
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
diff --git a/media/libstagefright/omx/1.0/WOmxBufferSource.cpp b/media/libstagefright/omx/1.0/WOmxBufferSource.cpp
new file mode 100644
index 0000000..803283a
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WOmxBufferSource.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <utils/String8.h>
+
+#include "WOmxBufferSource.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// LWOmxBufferSource
+LWOmxBufferSource::LWOmxBufferSource(sp<IOmxBufferSource> const& base) :
+ mBase(base) {
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxExecuting() {
+ return toBinderStatus(mBase->onOmxExecuting());
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxIdle() {
+ return toBinderStatus(mBase->onOmxIdle());
+}
+
+::android::binder::Status LWOmxBufferSource::onOmxLoaded() {
+ return toBinderStatus(mBase->onOmxLoaded());
+}
+
+::android::binder::Status LWOmxBufferSource::onInputBufferAdded(
+ int32_t bufferId) {
+ return toBinderStatus(mBase->onInputBufferAdded(
+ static_cast<uint32_t>(bufferId)));
+}
+
+::android::binder::Status LWOmxBufferSource::onInputBufferEmptied(
+ int32_t bufferId, OMXFenceParcelable const& fenceParcel) {
+ hidl_handle fence;
+ native_handle_t* fenceNh;
+ if (!wrapAs(&fence, &fenceNh, fenceParcel)) {
+ return ::android::binder::Status::fromExceptionCode(
+ ::android::binder::Status::EX_BAD_PARCELABLE,
+ "Invalid fence");
+ }
+ ::android::binder::Status status = toBinderStatus(
+ mBase->onInputBufferEmptied(
+ static_cast<uint32_t>(bufferId), fence));
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
+ return status;
+}
+
+// TWOmxBufferSource
+TWOmxBufferSource::TWOmxBufferSource(sp<IOMXBufferSource> const& base) :
+ mBase(base) {
+}
+
+Return<void> TWOmxBufferSource::onOmxExecuting() {
+ mBase->onOmxExecuting();
+ return Void();
+}
+
+Return<void> TWOmxBufferSource::onOmxIdle() {
+ mBase->onOmxIdle();
+ return Void();
+}
+
+Return<void> TWOmxBufferSource::onOmxLoaded() {
+ mBase->onOmxLoaded();
+ return Void();
+}
+
+Return<void> TWOmxBufferSource::onInputBufferAdded(uint32_t buffer) {
+ mBase->onInputBufferAdded(int32_t(buffer));
+ return Void();
+}
+
+Return<void> TWOmxBufferSource::onInputBufferEmptied(
+ uint32_t buffer, hidl_handle const& fence) {
+ OMXFenceParcelable fenceParcelable;
+ if (!convertTo(&fenceParcelable, fence)) {
+ return Void();
+ }
+ mBase->onInputBufferEmptied(int32_t(buffer), fenceParcelable);
+ return Void();
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/WOmxBufferSource.h b/media/libstagefright/omx/1.0/WOmxBufferSource.h
new file mode 100644
index 0000000..9b27796
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WOmxBufferSource.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/Binder.h>
+#include <media/OMXFenceParcelable.h>
+
+#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
+#include <android/BnOMXBufferSource.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::hidl_handle;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::OMXFenceParcelable;
+using ::android::IOMXBufferSource;
+using ::android::BnOMXBufferSource;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxBufferSource : public BnOMXBufferSource {
+ sp<IOmxBufferSource> mBase;
+ LWOmxBufferSource(sp<IOmxBufferSource> const& base);
+ ::android::binder::Status onOmxExecuting() override;
+ ::android::binder::Status onOmxIdle() override;
+ ::android::binder::Status onOmxLoaded() override;
+ ::android::binder::Status onInputBufferAdded(int32_t bufferID) override;
+ ::android::binder::Status onInputBufferEmptied(
+ int32_t bufferID, OMXFenceParcelable const& fenceParcel) override;
+};
+
+struct TWOmxBufferSource : public IOmxBufferSource {
+ sp<IOMXBufferSource> mBase;
+ TWOmxBufferSource(sp<IOMXBufferSource> const& base);
+ Return<void> onOmxExecuting() override;
+ Return<void> onOmxIdle() override;
+ Return<void> onOmxLoaded() override;
+ Return<void> onInputBufferAdded(uint32_t buffer) override;
+ Return<void> onInputBufferEmptied(
+ uint32_t buffer, hidl_handle const& fence) override;
+};
+
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXBUFFERSOURCE_H
diff --git a/media/libstagefright/omx/1.0/WOmxNode.cpp b/media/libstagefright/omx/1.0/WOmxNode.cpp
new file mode 100644
index 0000000..91d1010
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WOmxNode.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+
+#include "WOmxNode.h"
+#include "WOmxBufferSource.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::Void;
+
+// LWOmxNode
+LWOmxNode::LWOmxNode(sp<IOmxNode> const& base) : mBase(base) {
+}
+
+status_t LWOmxNode::freeNode() {
+ return toStatusT(mBase->freeNode());
+}
+
+status_t LWOmxNode::sendCommand(
+ OMX_COMMANDTYPE cmd, OMX_S32 param) {
+ return toStatusT(mBase->sendCommand(
+ toRawCommandType(cmd), param));
+}
+
+status_t LWOmxNode::getParameter(
+ OMX_INDEXTYPE index, void *params, size_t size) {
+ hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->getParameter(
+ toRawIndexType(index),
+ tParams,
+ [&fnStatus, params](
+ Status status, hidl_vec<uint8_t> const& outParams) {
+ fnStatus = toStatusT(status);
+ std::copy(
+ outParams.data(),
+ outParams.data() + outParams.size(),
+ static_cast<uint8_t*>(params));
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setParameter(
+ OMX_INDEXTYPE index, const void *params, size_t size) {
+ hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+ return toStatusT(mBase->setParameter(
+ toRawIndexType(index), tParams));
+}
+
+status_t LWOmxNode::getConfig(
+ OMX_INDEXTYPE index, void *params, size_t size) {
+ hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->getConfig(
+ toRawIndexType(index),
+ tParams,
+ [&fnStatus, params, size](
+ Status status, hidl_vec<uint8_t> const& outParams) {
+ fnStatus = toStatusT(status);
+ std::copy(
+ outParams.data(),
+ outParams.data() + size,
+ static_cast<uint8_t*>(params));
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setConfig(
+ OMX_INDEXTYPE index, const void *params, size_t size) {
+ hidl_vec<uint8_t> tParams = inHidlBytes(params, size);
+ return toStatusT(mBase->setConfig(toRawIndexType(index), tParams));
+}
+
+status_t LWOmxNode::setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) {
+ return toStatusT(mBase->setPortMode(port_index, toHardwarePortMode(mode)));
+}
+
+status_t LWOmxNode::prepareForAdaptivePlayback(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
+ return toStatusT(mBase->prepareForAdaptivePlayback(
+ portIndex, toRawBool(enable), maxFrameWidth, maxFrameHeight));
+}
+
+status_t LWOmxNode::configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->configureVideoTunnelMode(
+ portIndex,
+ toRawBool(tunneled),
+ audioHwSync,
+ [&fnStatus, sidebandHandle](
+ Status status, hidl_handle const& outSidebandHandle) {
+ fnStatus = toStatusT(status);
+ *sidebandHandle = outSidebandHandle == nullptr ?
+ nullptr : native_handle_clone(outSidebandHandle);
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::getGraphicBufferUsage(
+ OMX_U32 portIndex, OMX_U32* usage) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->getGraphicBufferUsage(
+ portIndex,
+ [&fnStatus, usage](
+ Status status, uint32_t outUsage) {
+ fnStatus = toStatusT(status);
+ *usage = outUsage;
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::setInputSurface(
+ const sp<IOMXBufferSource> &bufferSource) {
+ return toStatusT(mBase->setInputSurface(
+ new TWOmxBufferSource(bufferSource)));
+}
+
+status_t LWOmxNode::allocateSecureBuffer(
+ OMX_U32 portIndex, size_t size, buffer_id *buffer,
+ void **buffer_data, sp<NativeHandle> *native_handle) {
+ *buffer_data = nullptr;
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->allocateSecureBuffer(
+ portIndex,
+ static_cast<uint64_t>(size),
+ [&fnStatus, buffer, native_handle](
+ Status status,
+ uint32_t outBuffer,
+ hidl_handle const& outNativeHandle) {
+ fnStatus = toStatusT(status);
+ *buffer = outBuffer;
+ *native_handle = NativeHandle::create(
+ native_handle_clone(outNativeHandle), true);
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::useBuffer(
+ OMX_U32 portIndex, const OMXBuffer &omxBuffer, buffer_id *buffer) {
+ CodecBuffer codecBuffer;
+ if (!wrapAs(&codecBuffer, omxBuffer)) {
+ return BAD_VALUE;
+ }
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->useBuffer(
+ portIndex,
+ codecBuffer,
+ [&fnStatus, buffer](Status status, uint32_t outBuffer) {
+ fnStatus = toStatusT(status);
+ *buffer = outBuffer;
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::freeBuffer(
+ OMX_U32 portIndex, buffer_id buffer) {
+ return toStatusT(mBase->freeBuffer(portIndex, buffer));
+}
+
+status_t LWOmxNode::fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuffer, int fenceFd) {
+ CodecBuffer codecBuffer;
+ if (!wrapAs(&codecBuffer, omxBuffer)) {
+ return BAD_VALUE;
+ }
+ native_handle_t* fenceNh = native_handle_create_from_fd(fenceFd);
+ if (!fenceNh) {
+ return NO_MEMORY;
+ }
+ status_t status = toStatusT(mBase->fillBuffer(
+ buffer, codecBuffer, fenceNh));
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
+ return status;
+}
+
+status_t LWOmxNode::emptyBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuffer,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+ CodecBuffer codecBuffer;
+ if (!wrapAs(&codecBuffer, omxBuffer)) {
+ return BAD_VALUE;
+ }
+ native_handle_t* fenceNh = native_handle_create_from_fd(fenceFd);
+ if (!fenceNh) {
+ return NO_MEMORY;
+ }
+ status_t status = toStatusT(mBase->emptyBuffer(
+ buffer,
+ codecBuffer,
+ flags,
+ toRawTicks(timestamp),
+ fenceNh));
+ native_handle_close(fenceNh);
+ native_handle_delete(fenceNh);
+ return status;
+}
+status_t LWOmxNode::getExtensionIndex(
+ const char *parameter_name,
+ OMX_INDEXTYPE *index) {
+ status_t fnStatus;
+ status_t transStatus = toStatusT(mBase->getExtensionIndex(
+ hidl_string(parameter_name),
+ [&fnStatus, index](Status status, uint32_t outIndex) {
+ fnStatus = toStatusT(status);
+ *index = toEnumIndexType(outIndex);
+ }));
+ return transStatus == NO_ERROR ? fnStatus : transStatus;
+}
+
+status_t LWOmxNode::dispatchMessage(const omx_message &lMsg) {
+ Message tMsg;
+ native_handle_t* nh;
+ if (!wrapAs(&tMsg, &nh, lMsg)) {
+ return NO_MEMORY;
+ }
+ status_t status = toStatusT(mBase->dispatchMessage(tMsg));
+ native_handle_close(nh);
+ native_handle_delete(nh);
+ return status;
+}
+
+// TWOmxNode
+TWOmxNode::TWOmxNode(sp<IOMXNode> const& base) : mBase(base) {
+}
+
+Return<Status> TWOmxNode::freeNode() {
+ return toStatus(mBase->freeNode());
+}
+
+Return<Status> TWOmxNode::sendCommand(uint32_t cmd, int32_t param) {
+ return toStatus(mBase->sendCommand(toEnumCommandType(cmd), param));
+}
+
+Return<void> TWOmxNode::getParameter(
+ uint32_t index, hidl_vec<uint8_t> const& inParams,
+ getParameter_cb _hidl_cb) {
+ hidl_vec<uint8_t> params(inParams);
+ Status status = toStatus(mBase->getParameter(
+ toEnumIndexType(index),
+ static_cast<void*>(params.data()),
+ params.size()));
+ _hidl_cb(status, params);
+ return Void();
+}
+
+Return<Status> TWOmxNode::setParameter(
+ uint32_t index, hidl_vec<uint8_t> const& inParams) {
+ hidl_vec<uint8_t> params(inParams);
+ return toStatus(mBase->setParameter(
+ toEnumIndexType(index),
+ static_cast<void const*>(params.data()),
+ params.size()));
+}
+
+Return<void> TWOmxNode::getConfig(
+ uint32_t index, const hidl_vec<uint8_t>& inConfig,
+ getConfig_cb _hidl_cb) {
+ hidl_vec<uint8_t> config(inConfig);
+ Status status = toStatus(mBase->getConfig(
+ toEnumIndexType(index),
+ static_cast<void*>(config.data()),
+ config.size()));
+ _hidl_cb(status, config);
+ return Void();
+}
+
+Return<Status> TWOmxNode::setConfig(
+ uint32_t index, const hidl_vec<uint8_t>& inConfig) {
+ hidl_vec<uint8_t> config(inConfig);
+ return toStatus(mBase->setConfig(
+ toEnumIndexType(index),
+ static_cast<void const*>(config.data()),
+ config.size()));
+}
+
+Return<Status> TWOmxNode::setPortMode(uint32_t portIndex, PortMode mode) {
+ return toStatus(mBase->setPortMode(portIndex, toIOMXPortMode(mode)));
+}
+
+Return<Status> TWOmxNode::prepareForAdaptivePlayback(
+ uint32_t portIndex, bool enable,
+ uint32_t maxFrameWidth, uint32_t maxFrameHeight) {
+ return toStatus(mBase->prepareForAdaptivePlayback(
+ portIndex,
+ toEnumBool(enable),
+ maxFrameWidth,
+ maxFrameHeight));
+}
+
+Return<void> TWOmxNode::configureVideoTunnelMode(
+ uint32_t portIndex, bool tunneled, uint32_t audioHwSync,
+ configureVideoTunnelMode_cb _hidl_cb) {
+ native_handle_t* sidebandHandle = nullptr;
+ Status status = toStatus(mBase->configureVideoTunnelMode(
+ portIndex,
+ toEnumBool(tunneled),
+ audioHwSync,
+ &sidebandHandle));
+ _hidl_cb(status, hidl_handle(sidebandHandle));
+ return Void();
+}
+
+Return<void> TWOmxNode::getGraphicBufferUsage(
+ uint32_t portIndex, getGraphicBufferUsage_cb _hidl_cb) {
+ OMX_U32 usage;
+ Status status = toStatus(mBase->getGraphicBufferUsage(
+ portIndex, &usage));
+ _hidl_cb(status, usage);
+ return Void();
+}
+
+Return<Status> TWOmxNode::setInputSurface(
+ const sp<IOmxBufferSource>& bufferSource) {
+ return toStatus(mBase->setInputSurface(new LWOmxBufferSource(
+ bufferSource)));
+}
+
+Return<void> TWOmxNode::allocateSecureBuffer(
+ uint32_t portIndex, uint64_t size,
+ allocateSecureBuffer_cb _hidl_cb) {
+ IOMX::buffer_id buffer;
+ void* bufferData;
+ sp<NativeHandle> nativeHandle;
+ Status status = toStatus(mBase->allocateSecureBuffer(
+ portIndex,
+ static_cast<size_t>(size),
+ &buffer,
+ &bufferData,
+ &nativeHandle));
+ _hidl_cb(status, buffer, nativeHandle == nullptr ?
+ nullptr : nativeHandle->handle());
+ return Void();
+}
+
+Return<void> TWOmxNode::useBuffer(
+ uint32_t portIndex, const CodecBuffer& codecBuffer,
+ useBuffer_cb _hidl_cb) {
+ IOMX::buffer_id buffer;
+ OMXBuffer omxBuffer;
+ if (!convertTo(&omxBuffer, codecBuffer)) {
+ _hidl_cb(Status::BAD_VALUE, 0);
+ return Void();
+ }
+ Status status = toStatus(mBase->useBuffer(
+ portIndex, omxBuffer, &buffer));
+ _hidl_cb(status, buffer);
+ return Void();
+}
+
+Return<Status> TWOmxNode::freeBuffer(uint32_t portIndex, uint32_t buffer) {
+ return toStatus(mBase->freeBuffer(portIndex, buffer));
+}
+
+Return<Status> TWOmxNode::fillBuffer(
+ uint32_t buffer, const CodecBuffer& codecBuffer,
+ const hidl_handle& fence) {
+ OMXBuffer omxBuffer;
+ if (!convertTo(&omxBuffer, codecBuffer)) {
+ return Status::BAD_VALUE;
+ }
+ return toStatus(mBase->fillBuffer(
+ buffer,
+ omxBuffer,
+ dup(native_handle_read_fd(fence))));
+}
+
+Return<Status> TWOmxNode::emptyBuffer(
+ uint32_t buffer, const CodecBuffer& codecBuffer, uint32_t flags,
+ uint64_t timestampUs, const hidl_handle& fence) {
+ OMXBuffer omxBuffer;
+ if (!convertTo(&omxBuffer, codecBuffer)) {
+ return Status::BAD_VALUE;
+ }
+ return toStatus(mBase->emptyBuffer(
+ buffer,
+ omxBuffer,
+ flags,
+ toOMXTicks(timestampUs),
+ dup(native_handle_read_fd(fence))));
+}
+
+Return<void> TWOmxNode::getExtensionIndex(
+ const hidl_string& parameterName,
+ getExtensionIndex_cb _hidl_cb) {
+ OMX_INDEXTYPE index;
+ Status status = toStatus(mBase->getExtensionIndex(
+ parameterName.c_str(), &index));
+ _hidl_cb(status, toRawIndexType(index));
+ return Void();
+}
+
+Return<Status> TWOmxNode::dispatchMessage(const Message& tMsg) {
+ omx_message lMsg;
+ if (!convertTo(&lMsg, tMsg)) {
+ return Status::BAD_VALUE;
+ }
+ return toStatus(mBase->dispatchMessage(lMsg));
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/WOmxNode.h b/media/libstagefright/omx/1.0/WOmxNode.h
new file mode 100644
index 0000000..d715374
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WOmxNode.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <utils/Errors.h>
+
+#include "../../include/OMXNodeInstance.h"
+
+#include <android/hardware/media/omx/1.0/IOmxNode.h>
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::CodecBuffer;
+using ::android::hardware::media::omx::V1_0::IOmxBufferSource;
+using ::android::hardware::media::omx::V1_0::IOmxNode;
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::hardware::media::omx::V1_0::PortMode;
+using ::android::hardware::media::omx::V1_0::Status;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxNode : public BnOMXNode {
+ sp<IOmxNode> mBase;
+ LWOmxNode(sp<IOmxNode> const& base);
+ status_t freeNode() override;
+ status_t sendCommand(
+ OMX_COMMANDTYPE cmd, OMX_S32 param) override;
+ status_t getParameter(
+ OMX_INDEXTYPE index, void *params, size_t size) override;
+ status_t setParameter(
+ OMX_INDEXTYPE index, const void *params, size_t size) override;
+ status_t getConfig(
+ OMX_INDEXTYPE index, void *params, size_t size) override;
+ status_t setConfig(
+ OMX_INDEXTYPE index, const void *params, size_t size) override;
+ status_t setPortMode(
+ OMX_U32 port_index, IOMX::PortMode mode) override;
+ status_t prepareForAdaptivePlayback(
+ OMX_U32 portIndex, OMX_BOOL enable,
+ OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) override;
+ status_t configureVideoTunnelMode(
+ OMX_U32 portIndex, OMX_BOOL tunneled,
+ OMX_U32 audioHwSync, native_handle_t **sidebandHandle) override;
+ status_t getGraphicBufferUsage(
+ OMX_U32 port_index, OMX_U32* usage) override;
+ status_t setInputSurface(
+ const sp<IOMXBufferSource> &bufferSource) override;
+ status_t allocateSecureBuffer(
+ OMX_U32 port_index, size_t size, buffer_id *buffer,
+ void **buffer_data, sp<NativeHandle> *native_handle) override;
+ status_t useBuffer(
+ OMX_U32 port_index, const OMXBuffer &omxBuf,
+ buffer_id *buffer) override;
+ status_t freeBuffer(
+ OMX_U32 port_index, buffer_id buffer) override;
+ status_t fillBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ int fenceFd = -1) override;
+ status_t emptyBuffer(
+ buffer_id buffer, const OMXBuffer &omxBuf,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) override;
+ status_t getExtensionIndex(
+ const char *parameter_name,
+ OMX_INDEXTYPE *index) override;
+ status_t dispatchMessage(const omx_message &msg) override;
+};
+
+struct TWOmxNode : public IOmxNode {
+ sp<IOMXNode> mBase;
+ TWOmxNode(sp<IOMXNode> const& base);
+
+ Return<Status> freeNode() override;
+ Return<Status> sendCommand(uint32_t cmd, int32_t param) override;
+ Return<void> getParameter(
+ uint32_t index, hidl_vec<uint8_t> const& inParams,
+ getParameter_cb _hidl_cb) override;
+ Return<Status> setParameter(
+ uint32_t index, hidl_vec<uint8_t> const& params) override;
+ Return<void> getConfig(
+ uint32_t index, hidl_vec<uint8_t> const& inConfig,
+ getConfig_cb _hidl_cb) override;
+ Return<Status> setConfig(
+ uint32_t index, hidl_vec<uint8_t> const& config) override;
+ Return<Status> setPortMode(uint32_t portIndex, PortMode mode) override;
+ Return<Status> prepareForAdaptivePlayback(
+ uint32_t portIndex, bool enable,
+ uint32_t maxFrameWidth, uint32_t maxFrameHeight) override;
+ Return<void> configureVideoTunnelMode(
+ uint32_t portIndex, bool tunneled, uint32_t audioHwSync,
+ configureVideoTunnelMode_cb _hidl_cb) override;
+ Return<void> getGraphicBufferUsage(
+ uint32_t portIndex,
+ getGraphicBufferUsage_cb _hidl_cb) override;
+ Return<Status> setInputSurface(
+ sp<IOmxBufferSource> const& bufferSource) override;
+ Return<void> allocateSecureBuffer(
+ uint32_t portIndex, uint64_t size,
+ allocateSecureBuffer_cb _hidl_cb) override;
+ Return<void> useBuffer(
+ uint32_t portIndex, CodecBuffer const& codecBuffer,
+ useBuffer_cb _hidl_cb) override;
+ Return<Status> freeBuffer(uint32_t portIndex, uint32_t buffer) override;
+ Return<Status> fillBuffer(
+ uint32_t buffer, CodecBuffer const& codecBuffer,
+ const hidl_handle& fence) override;
+ Return<Status> emptyBuffer(
+ uint32_t buffer, CodecBuffer const& codecBuffer,
+ uint32_t flags, uint64_t timestampUs,
+ hidl_handle const& fence) override;
+ Return<void> getExtensionIndex(
+ hidl_string const& parameterName,
+ getExtensionIndex_cb _hidl_cb) override;
+ Return<Status> dispatchMessage(Message const& msg) override;
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXNODE_H
diff --git a/media/libstagefright/omx/1.0/WOmxObserver.cpp b/media/libstagefright/omx/1.0/WOmxObserver.cpp
new file mode 100644
index 0000000..354db29
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WOmxObserver.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "WOmxObserver-impl"
+
+#include <vector>
+
+#include <android-base/logging.h>
+#include <cutils/native_handle.h>
+#include <binder/Binder.h>
+
+#include "WOmxObserver.h"
+#include "Conversion.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// LWOmxObserver
+LWOmxObserver::LWOmxObserver(sp<IOmxObserver> const& base) : mBase(base) {
+}
+
+void LWOmxObserver::onMessages(std::list<omx_message> const& lMessages) {
+ hidl_vec<Message> tMessages;
+ std::vector<native_handle_t*> handles(lMessages.size());
+ tMessages.resize(lMessages.size());
+ size_t i = 0;
+ for (auto const& message : lMessages) {
+ wrapAs(&tMessages[i], &handles[i], message);
+ ++i;
+ }
+ auto transResult = mBase->onMessages(tMessages);
+ if (!transResult.isOk()) {
+ LOG(ERROR) << "LWOmxObserver::onMessages - Transaction failed";
+ }
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+}
+
+// TWOmxObserver
+TWOmxObserver::TWOmxObserver(sp<IOMXObserver> const& base) : mBase(base) {
+}
+
+Return<void> TWOmxObserver::onMessages(const hidl_vec<Message>& tMessages) {
+ std::list<omx_message> lMessages;
+ for (size_t i = 0; i < tMessages.size(); ++i) {
+ lMessages.push_back(omx_message{});
+ convertTo(&lMessages.back(), tMessages[i]);
+ }
+ mBase->onMessages(lMessages);
+ return Return<void>();
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/WOmxObserver.h b/media/libstagefright/omx/1.0/WOmxObserver.h
new file mode 100644
index 0000000..7075513
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WOmxObserver.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
+
+#include <list>
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <media/IOMX.h>
+
+#include <android/hardware/media/omx/1.0/IOmxObserver.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::media::omx::V1_0::IOmxObserver;
+using ::android::hardware::media::omx::V1_0::Message;
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+using ::android::IOMXObserver;
+using ::android::BnOMXObserver;
+using ::android::omx_message;
+
+/**
+ * Wrapper classes for conversion
+ * ==============================
+ *
+ * Naming convention:
+ * - LW = Legacy Wrapper --- It wraps a Treble object inside a legacy object.
+ * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
+ */
+
+struct LWOmxObserver : public BnOMXObserver {
+ sp<IOmxObserver> mBase;
+ LWOmxObserver(sp<IOmxObserver> const& base);
+ void onMessages(std::list<omx_message> const& lMessages) override;
+};
+
+struct TWOmxObserver : public IOmxObserver {
+ sp<IOMXObserver> mBase;
+ TWOmxObserver(sp<IOMXObserver> const& base);
+ Return<void> onMessages(const hidl_vec<Message>& tMessages) override;
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXOBSERVER_H
diff --git a/media/libstagefright/omx/1.0/WProducerListener.cpp b/media/libstagefright/omx/1.0/WProducerListener.cpp
new file mode 100644
index 0000000..be0d4d5
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WProducerListener.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WProducerListener.h"
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+// TWProducerListener
+TWProducerListener::TWProducerListener(
+ sp<BProducerListener> const& base):
+ mBase(base) {
+}
+
+Return<void> TWProducerListener::onBufferReleased() {
+ mBase->onBufferReleased();
+ return Void();
+}
+
+Return<bool> TWProducerListener::needsReleaseNotify() {
+ return mBase->needsReleaseNotify();
+}
+
+// LWProducerListener
+LWProducerListener::LWProducerListener(
+ sp<HProducerListener> const& base):
+ mBase(base) {
+}
+
+void LWProducerListener::onBufferReleased() {
+ mBase->onBufferReleased();
+}
+
+bool LWProducerListener::needsReleaseNotify() {
+ return static_cast<bool>(mBase->needsReleaseNotify());
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
diff --git a/media/libstagefright/omx/1.0/WProducerListener.h b/media/libstagefright/omx/1.0/WProducerListener.h
new file mode 100644
index 0000000..a75e48a
--- /dev/null
+++ b/media/libstagefright/omx/1.0/WProducerListener.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
+#define ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
+
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+
+#include <binder/IBinder.h>
+#include <gui/IProducerListener.h>
+
+#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hidl::base::V1_0::IBase;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+typedef ::android::hardware::graphics::bufferqueue::V1_0::IProducerListener
+ HProducerListener;
+typedef ::android::IProducerListener
+ BProducerListener;
+using ::android::BnProducerListener;
+
+struct TWProducerListener : public HProducerListener {
+ sp<BProducerListener> mBase;
+ TWProducerListener(sp<BProducerListener> const& base);
+ Return<void> onBufferReleased() override;
+ Return<bool> needsReleaseNotify() override;
+};
+
+class LWProducerListener : public BnProducerListener {
+public:
+ sp<HProducerListener> mBase;
+ LWProducerListener(sp<HProducerListener> const& base);
+ void onBufferReleased() override;
+ bool needsReleaseNotify() override;
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WOMXPRODUCERLISTENER_H
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index a76b99e..ff5b841 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -4,6 +4,7 @@
srcs: [
"FrameDropper.cpp",
"GraphicBufferSource.cpp",
+ "BWGraphicBufferSource.cpp",
"OMX.cpp",
"OMXMaster.cpp",
"OMXNodeInstance.cpp",
@@ -13,6 +14,14 @@
"SoftOMXPlugin.cpp",
"SoftVideoDecoderOMXComponent.cpp",
"SoftVideoEncoderOMXComponent.cpp",
+ "1.0/Omx.cpp",
+ "1.0/OmxStore.cpp",
+ "1.0/WGraphicBufferProducer.cpp",
+ "1.0/WProducerListener.cpp",
+ "1.0/WGraphicBufferSource.cpp",
+ "1.0/WOmxNode.cpp",
+ "1.0/WOmxObserver.cpp",
+ "1.0/WOmxBufferSource.cpp",
],
include_dirs: [
@@ -23,6 +32,7 @@
],
shared_libs: [
+ "libbase",
"libbinder",
"libmedia",
"libutils",
@@ -32,11 +42,24 @@
"libcutils",
"libstagefright_foundation",
"libdl",
+ "libhidlbase",
+ "libhidlmemory",
+ "libhidltransport",
+ "libstagefright_xmlparser@1.0",
+ "android.hidl.memory@1.0",
+ "android.hardware.media@1.0",
+ "android.hardware.media.omx@1.0",
+ "android.hardware.graphics.common@1.0",
+ "android.hardware.graphics.bufferqueue@1.0",
],
+ export_shared_lib_headers: ["android.hidl.memory@1.0"],
+
cflags: [
"-Werror",
"-Wall",
+ "-Wno-unused-parameter",
+ "-Wno-documentation",
],
sanitize: {
@@ -44,6 +67,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/omx/BWGraphicBufferSource.cpp b/media/libstagefright/omx/BWGraphicBufferSource.cpp
new file mode 100644
index 0000000..2e2c461
--- /dev/null
+++ b/media/libstagefright/omx/BWGraphicBufferSource.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "BWGraphicBufferSource"
+
+#include <OMX_Component.h>
+#include <OMX_IndexExt.h>
+
+#include <media/OMXBuffer.h>
+#include <media/IOMX.h>
+
+#include "OMXUtils.h"
+#include "BWGraphicBufferSource.h"
+
+namespace android {
+
+static const OMX_U32 kPortIndexInput = 0;
+
+struct BWGraphicBufferSource::BWOmxNodeWrapper : public IOmxNodeWrapper {
+ sp<IOMXNode> mOMXNode;
+
+ BWOmxNodeWrapper(const sp<IOMXNode> &omxNode): mOMXNode(omxNode) {
+ }
+
+ virtual status_t emptyBuffer(
+ int32_t bufferId, uint32_t flags,
+ const sp<GraphicBuffer> &buffer,
+ int64_t timestamp, int fenceFd) override {
+ return mOMXNode->emptyBuffer(bufferId, buffer, flags, timestamp, fenceFd);
+ }
+
+ virtual void dispatchDataSpaceChanged(
+ int32_t dataSpace, int32_t aspects, int32_t pixelFormat) override {
+ omx_message msg;
+ msg.type = omx_message::EVENT;
+ msg.fenceFd = -1;
+ msg.u.event_data.event = OMX_EventDataSpaceChanged;
+ msg.u.event_data.data1 = dataSpace;
+ msg.u.event_data.data2 = aspects;
+ msg.u.event_data.data3 = pixelFormat;
+ mOMXNode->dispatchMessage(msg);
+ }
+};
+
+struct BWGraphicBufferSource::BWOMXBufferSource : public BnOMXBufferSource {
+ sp<GraphicBufferSource> mSource;
+
+ BWOMXBufferSource(const sp<GraphicBufferSource> &source): mSource(source) {
+ }
+
+ Status onOmxExecuting() override {
+ return mSource->onOmxExecuting();
+ }
+
+ Status onOmxIdle() override {
+ return mSource->onOmxIdle();
+ }
+
+ Status onOmxLoaded() override {
+ return mSource->onOmxLoaded();
+ }
+
+ Status onInputBufferAdded(int bufferId) override {
+ return mSource->onInputBufferAdded(bufferId);
+ }
+
+ Status onInputBufferEmptied(
+ int bufferId, const OMXFenceParcelable& fenceParcel) override {
+ return mSource->onInputBufferEmptied(bufferId, fenceParcel.get());
+ }
+};
+
+BWGraphicBufferSource::BWGraphicBufferSource(
+ sp<GraphicBufferSource> const& base) :
+ mBase(base),
+ mOMXBufferSource(new BWOMXBufferSource(base)) {
+}
+
+::android::binder::Status BWGraphicBufferSource::configure(
+ const sp<IOMXNode>& omxNode, int32_t dataSpace) {
+ // Do setInputSurface() first, the node will try to enable metadata
+ // mode on input, and does necessary error checking. If this fails,
+ // we can't use this input surface on the node.
+ status_t err = omxNode->setInputSurface(mOMXBufferSource);
+ if (err != NO_ERROR) {
+ ALOGE("Unable to set input surface: %d", err);
+ return Status::fromStatusT(err);
+ }
+
+ // use consumer usage bits queried from encoder, but always add
+ // HW_VIDEO_ENCODER for backward compatibility.
+ uint32_t consumerUsage;
+ if (omxNode->getParameter(
+ (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+ &consumerUsage, sizeof(consumerUsage)) != OK) {
+ consumerUsage = 0;
+ }
+
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexInput;
+
+ err = omxNode->getParameter(
+ OMX_IndexParamPortDefinition, &def, sizeof(def));
+ if (err != NO_ERROR) {
+ ALOGE("Failed to get port definition: %d", err);
+ return Status::fromStatusT(UNKNOWN_ERROR);
+ }
+
+ return Status::fromStatusT(mBase->configure(
+ new BWOmxNodeWrapper(omxNode),
+ dataSpace,
+ def.nBufferCountActual,
+ def.format.video.nFrameWidth,
+ def.format.video.nFrameHeight,
+ consumerUsage));
+}
+
+::android::binder::Status BWGraphicBufferSource::setSuspend(
+ bool suspend, int64_t timeUs) {
+ return Status::fromStatusT(mBase->setSuspend(suspend, timeUs));
+}
+
+::android::binder::Status BWGraphicBufferSource::setRepeatPreviousFrameDelayUs(
+ int64_t repeatAfterUs) {
+ return Status::fromStatusT(mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs));
+}
+
+::android::binder::Status BWGraphicBufferSource::setMaxFps(float maxFps) {
+ return Status::fromStatusT(mBase->setMaxFps(maxFps));
+}
+
+::android::binder::Status BWGraphicBufferSource::setTimeLapseConfig(
+ double fps, double captureFps) {
+ return Status::fromStatusT(mBase->setTimeLapseConfig(
+ fps, captureFps));
+}
+
+::android::binder::Status BWGraphicBufferSource::setStartTimeUs(
+ int64_t startTimeUs) {
+ return Status::fromStatusT(mBase->setStartTimeUs(startTimeUs));
+}
+
+::android::binder::Status BWGraphicBufferSource::setStopTimeUs(
+ int64_t stopTimeUs) {
+ return Status::fromStatusT(mBase->setStopTimeUs(stopTimeUs));
+}
+
+::android::binder::Status BWGraphicBufferSource::setColorAspects(
+ int32_t aspects) {
+ return Status::fromStatusT(mBase->setColorAspects(aspects));
+}
+
+::android::binder::Status BWGraphicBufferSource::setTimeOffsetUs(
+ int64_t timeOffsetsUs) {
+ return Status::fromStatusT(mBase->setTimeOffsetUs(timeOffsetsUs));
+}
+
+::android::binder::Status BWGraphicBufferSource::signalEndOfInputStream() {
+ return Status::fromStatusT(mBase->signalEndOfInputStream());
+}
+
+} // namespace android
diff --git a/media/libstagefright/omx/BWGraphicBufferSource.h b/media/libstagefright/omx/BWGraphicBufferSource.h
new file mode 100644
index 0000000..6f69d39
--- /dev/null
+++ b/media/libstagefright/omx/BWGraphicBufferSource.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BWGRAPHIC_BUFFER_SOURCE_H_
+#define BWGRAPHIC_BUFFER_SOURCE_H_
+
+#include <binder/Binder.h>
+#include <binder/Status.h>
+#include <android/BnGraphicBufferSource.h>
+#include <android/BnOMXBufferSource.h>
+#include <media/IOMX.h>
+
+#include "GraphicBufferSource.h"
+#include "IOmxNodeWrapper.h"
+
+namespace android {
+
+using ::android::binder::Status;
+using ::android::BnGraphicBufferSource;
+using ::android::GraphicBufferSource;
+using ::android::IOMXNode;
+using ::android::sp;
+
+struct BWGraphicBufferSource : public BnGraphicBufferSource {
+ struct BWOMXBufferSource;
+ struct BWOmxNodeWrapper;
+
+ sp<GraphicBufferSource> mBase;
+ sp<IOMXBufferSource> mOMXBufferSource;
+
+ BWGraphicBufferSource(sp<GraphicBufferSource> const &base);
+
+ Status configure(
+ const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
+ Status setSuspend(bool suspend, int64_t timeUs) override;
+ Status setRepeatPreviousFrameDelayUs(
+ int64_t repeatAfterUs) override;
+ Status setMaxFps(float maxFps) override;
+ Status setTimeLapseConfig(
+ double fps, double captureFps) override;
+ Status setStartTimeUs(int64_t startTimeUs) override;
+ Status setStopTimeUs(int64_t stopTimeUs) override;
+ Status setColorAspects(int32_t aspects) override;
+ Status setTimeOffsetUs(int64_t timeOffsetsUs) override;
+ Status signalEndOfInputStream() override;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 267f24d..bc02738 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -23,171 +23,277 @@
#define STRINGIFY_ENUMS // for asString in HardwareAPI.h/VideoAPI.h
#include "GraphicBufferSource.h"
-#include "OMXUtils.h"
-
-#include <OMX_Core.h>
-#include <OMX_IndexExt.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/stagefright/foundation/FileDescriptor.h>
#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
#include <gui/BufferItem.h>
#include <HardwareAPI.h>
+#include "omx/OMXUtils.h"
+#include <OMX_Component.h>
+#include <OMX_IndexExt.h>
+#include "media/OMXBuffer.h"
#include <inttypes.h>
#include "FrameDropper.h"
+#include <functional>
+#include <memory>
+#include <cmath>
+
namespace android {
-static const bool EXTRA_CHECK = true;
+/**
+ * A copiable object managing a buffer in the buffer cache managed by the producer. This object
+ * holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
+ * whether it is still in a buffer slot. It also maintains whether there are any outstanging acquire
+ * references to it (by buffers acquired from the slot) mainly so that we can keep a debug
+ * count of how many buffers we need to still release back to the producer.
+ */
+struct GraphicBufferSource::CachedBuffer {
+ /**
+ * Token that is used to track acquire counts (as opposed to all references to this object).
+ */
+ struct Acquirable { };
-static const OMX_U32 kPortIndexInput = 0;
+ /**
+ * Create using a buffer cached in a slot.
+ */
+ CachedBuffer(slot_id slot, const sp<GraphicBuffer> &graphicBuffer)
+ : mIsCached(true),
+ mSlot(slot),
+ mGraphicBuffer(graphicBuffer),
+ mAcquirable(std::make_shared<Acquirable>()) {
+ }
-GraphicBufferSource::PersistentProxyListener::PersistentProxyListener(
- const wp<IGraphicBufferConsumer> &consumer,
- const wp<ConsumerListener>& consumerListener) :
- mConsumerListener(consumerListener),
- mConsumer(consumer) {}
+ /**
+ * Returns the cache slot that this buffer is cached in, or -1 if it is no longer cached.
+ *
+ * This assumes that -1 slot id is invalid; though, it is just a benign collision used for
+ * debugging. This object explicitly manages whether it is still cached.
+ */
+ slot_id getSlot() const {
+ return mIsCached ? mSlot : -1;
+ }
-GraphicBufferSource::PersistentProxyListener::~PersistentProxyListener() {}
+ /**
+ * Returns the cached buffer.
+ */
+ sp<GraphicBuffer> getGraphicBuffer() const {
+ return mGraphicBuffer;
+ }
-void GraphicBufferSource::PersistentProxyListener::onFrameAvailable(
- const BufferItem& item) {
- sp<ConsumerListener> listener(mConsumerListener.promote());
- if (listener != NULL) {
- listener->onFrameAvailable(item);
- } else {
- sp<IGraphicBufferConsumer> consumer(mConsumer.promote());
- if (consumer == NULL) {
- return;
+ /**
+ * Checks whether this buffer is still in the buffer cache.
+ */
+ bool isCached() const {
+ return mIsCached;
+ }
+
+ /**
+ * Checks whether this buffer has an acquired reference.
+ */
+ bool isAcquired() const {
+ return mAcquirable.use_count() > 1;
+ }
+
+ /**
+ * Gets and returns a shared acquired reference.
+ */
+ std::shared_ptr<Acquirable> getAcquirable() {
+ return mAcquirable;
+ }
+
+private:
+ friend void GraphicBufferSource::discardBufferAtSlotIndex_l(ssize_t);
+
+ /**
+ * This method to be called when the buffer is no longer in the buffer cache.
+ * Called from discardBufferAtSlotIndex_l.
+ */
+ void onDroppedFromCache() {
+ CHECK_DBG(mIsCached);
+ mIsCached = false;
+ }
+
+ bool mIsCached;
+ slot_id mSlot;
+ sp<GraphicBuffer> mGraphicBuffer;
+ std::shared_ptr<Acquirable> mAcquirable;
+};
+
+/**
+ * A copiable object managing a buffer acquired from the producer. This must always be a cached
+ * buffer. This objects also manages its acquire fence and any release fences that may be returned
+ * by the encoder for this buffer (this buffer may be queued to the encoder multiple times).
+ * If no release fences are added by the encoder, the acquire fence is returned as the release
+ * fence for this - as it is assumed that noone waited for the acquire fence. Otherwise, it is
+ * assumed that the encoder has waited for the acquire fence (or returned it as the release
+ * fence).
+ */
+struct GraphicBufferSource::AcquiredBuffer {
+ AcquiredBuffer(
+ const std::shared_ptr<CachedBuffer> &buffer,
+ std::function<void(AcquiredBuffer *)> onReleased,
+ const sp<Fence> &acquireFence)
+ : mBuffer(buffer),
+ mAcquirable(buffer->getAcquirable()),
+ mAcquireFence(acquireFence),
+ mGotReleaseFences(false),
+ mOnReleased(onReleased) {
+ }
+
+ /**
+ * Adds a release fence returned by the encoder to this object. If this is called with an
+ * valid file descriptor, it is added to the list of release fences. These are returned to the
+ * producer on release() as a merged fence. Regardless of the validity of the file descriptor,
+ * we take note that a release fence was attempted to be added and the acquire fence can now be
+ * assumed as acquired.
+ */
+ void addReleaseFenceFd(int fenceFd) {
+ // save all release fences - these will be propagated to the producer if this buffer is
+ // ever released to it
+ if (fenceFd >= 0) {
+ mReleaseFenceFds.push_back(fenceFd);
}
- BufferItem bi;
- status_t err = consumer->acquireBuffer(&bi, 0);
- if (err != OK) {
- ALOGE("PersistentProxyListener: acquireBuffer failed (%d)", err);
- return;
- }
+ mGotReleaseFences = true;
+ }
- err = consumer->detachBuffer(bi.mSlot);
- if (err != OK) {
- ALOGE("PersistentProxyListener: detachBuffer failed (%d)", err);
- return;
+ /**
+ * Returns the acquire fence file descriptor associated with this object.
+ */
+ int getAcquireFenceFd() {
+ if (mAcquireFence == nullptr || !mAcquireFence->isValid()) {
+ return -1;
}
+ return mAcquireFence->dup();
+ }
- err = consumer->attachBuffer(&bi.mSlot, bi.mGraphicBuffer);
- if (err != OK) {
- ALOGE("PersistentProxyListener: attachBuffer failed (%d)", err);
- return;
+ /**
+ * Returns whether the buffer is still in the buffer cache.
+ */
+ bool isCached() const {
+ return mBuffer->isCached();
+ }
+
+ /**
+ * Returns the acquired buffer.
+ */
+ sp<GraphicBuffer> getGraphicBuffer() const {
+ return mBuffer->getGraphicBuffer();
+ }
+
+ /**
+ * Returns the slot that this buffer is cached at, or -1 otherwise.
+ *
+ * This assumes that -1 slot id is invalid; though, it is just a benign collision used for
+ * debugging. This object explicitly manages whether it is still cached.
+ */
+ slot_id getSlot() const {
+ return mBuffer->getSlot();
+ }
+
+ /**
+ * Creates and returns a release fence object from the acquire fence and/or any release fences
+ * added. If no release fences were added (even if invalid), returns the acquire fence.
+ * Otherwise, it returns a merged fence from all the valid release fences added.
+ */
+ sp<Fence> getReleaseFence() {
+ // If did not receive release fences, we assume this buffer was not consumed (it was
+ // discarded or dropped). In this case release the acquire fence as the release fence.
+ // We do this here to avoid a dup, close and recreation of the Fence object.
+ if (!mGotReleaseFences) {
+ return mAcquireFence;
}
+ sp<Fence> ret = getReleaseFence(0, mReleaseFenceFds.size());
+ // clear fds as fence took ownership of them
+ mReleaseFenceFds.clear();
+ return ret;
+ }
- err = consumer->releaseBuffer(bi.mSlot, 0,
- EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, bi.mFence);
- if (err != OK) {
- ALOGE("PersistentProxyListener: releaseBuffer failed (%d)", err);
+ // this video buffer is no longer referenced by the codec (or kept for later encoding)
+ // it is now safe to release to the producer
+ ~AcquiredBuffer() {
+ //mAcquirable.clear();
+ mOnReleased(this);
+ // mOnRelease method should call getReleaseFence() that releases all fds but just in case
+ ALOGW_IF(!mReleaseFenceFds.empty(), "release fences were not obtained, closing fds");
+ for (int fildes : mReleaseFenceFds) {
+ ::close(fildes);
+ TRESPASS_DBG();
}
}
-}
-void GraphicBufferSource::PersistentProxyListener::onFrameReplaced(
- const BufferItem& item) {
- sp<ConsumerListener> listener(mConsumerListener.promote());
- if (listener != NULL) {
- listener->onFrameReplaced(item);
+private:
+ std::shared_ptr<GraphicBufferSource::CachedBuffer> mBuffer;
+ std::shared_ptr<GraphicBufferSource::CachedBuffer::Acquirable> mAcquirable;
+ sp<Fence> mAcquireFence;
+ Vector<int> mReleaseFenceFds;
+ bool mGotReleaseFences;
+ std::function<void(AcquiredBuffer *)> mOnReleased;
+
+ /**
+ * Creates and returns a release fence from 0 or more release fence file descriptors in from
+ * the specified range in the array.
+ *
+ * @param start start index
+ * @param num number of release fds to merge
+ */
+ sp<Fence> getReleaseFence(size_t start, size_t num) const {
+ if (num == 0) {
+ return Fence::NO_FENCE;
+ } else if (num == 1) {
+ return new Fence(mReleaseFenceFds[start]);
+ } else {
+ return Fence::merge("GBS::AB",
+ getReleaseFence(start, num >> 1),
+ getReleaseFence(start + (num >> 1), num - (num >> 1)));
+ }
}
-}
+};
-void GraphicBufferSource::PersistentProxyListener::onBuffersReleased() {
- sp<ConsumerListener> listener(mConsumerListener.promote());
- if (listener != NULL) {
- listener->onBuffersReleased();
- }
-}
-
-void GraphicBufferSource::PersistentProxyListener::onSidebandStreamChanged() {
- sp<ConsumerListener> listener(mConsumerListener.promote());
- if (listener != NULL) {
- listener->onSidebandStreamChanged();
- }
-}
-
-GraphicBufferSource::GraphicBufferSource(
- OMXNodeInstance* nodeInstance,
- uint32_t bufferWidth,
- uint32_t bufferHeight,
- uint32_t bufferCount,
- uint32_t consumerUsage,
- const sp<IGraphicBufferConsumer> &consumer) :
+GraphicBufferSource::GraphicBufferSource() :
mInitCheck(UNKNOWN_ERROR),
- mNodeInstance(nodeInstance),
- mExecuting(false),
- mSuspended(false),
- mLastDataSpace(HAL_DATASPACE_UNKNOWN),
- mIsPersistent(false),
- mConsumer(consumer),
- mNumFramesAvailable(0),
- mNumBufferAcquired(0),
+ mNumAvailableUnacquiredBuffers(0),
+ mNumOutstandingAcquires(0),
mEndOfStream(false),
mEndOfStreamSent(false),
- mMaxTimestampGapUs(-1ll),
- mPrevOriginalTimeUs(-1ll),
- mPrevModifiedTimeUs(-1ll),
+ mLastDataspace(HAL_DATASPACE_UNKNOWN),
+ mExecuting(false),
+ mSuspended(false),
+ mStopTimeUs(-1),
+ mLastActionTimeUs(-1ll),
mSkipFramesBeforeNs(-1ll),
- mRepeatAfterUs(-1ll),
+ mFrameRepeatIntervalUs(-1ll),
mRepeatLastFrameGeneration(0),
- mRepeatLastFrameTimestamp(-1ll),
- mLatestBufferId(-1),
- mLatestBufferFrameNum(0),
- mLatestBufferFence(Fence::NO_FENCE),
- mRepeatBufferDeferred(false),
- mTimePerCaptureUs(-1ll),
- mTimePerFrameUs(-1ll),
+ mOutstandingFrameRepeatCount(0),
+ mFrameRepeatBlockedOnCodecBuffer(false),
+ mFps(-1.0),
+ mCaptureFps(-1.0),
+ mBaseCaptureUs(-1ll),
+ mBaseFrameUs(-1ll),
+ mFrameCount(0),
mPrevCaptureUs(-1ll),
mPrevFrameUs(-1ll),
mInputBufferTimeOffsetUs(0ll) {
+ ALOGV("GraphicBufferSource");
- ALOGV("GraphicBufferSource w=%u h=%u c=%u",
- bufferWidth, bufferHeight, bufferCount);
+ String8 name("GraphicBufferSource");
- if (bufferWidth == 0 || bufferHeight == 0) {
- ALOGE("Invalid dimensions %ux%u", bufferWidth, bufferHeight);
- mInitCheck = BAD_VALUE;
- return;
- }
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+ mConsumer->setConsumerName(name);
- if (mConsumer == NULL) {
- String8 name("GraphicBufferSource");
-
- BufferQueue::createBufferQueue(&mProducer, &mConsumer);
- mConsumer->setConsumerName(name);
-
- // use consumer usage bits queried from encoder, but always add HW_VIDEO_ENCODER
- // for backward compatibility.
- consumerUsage |= GRALLOC_USAGE_HW_VIDEO_ENCODER;
- mConsumer->setConsumerUsageBits(consumerUsage);
-
- mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
- if (mInitCheck != NO_ERROR) {
- ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
- bufferCount, mInitCheck);
- return;
- }
- } else {
- mIsPersistent = true;
- }
- mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
// Note that we can't create an sp<...>(this) in a ctor that will not keep a
// reference once the ctor ends, as that would cause the refcount of 'this'
// dropping to 0 at the end of the ctor. Since all we need is a wp<...>
// that's what we create.
- wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
- sp<IConsumerListener> proxy;
- if (!mIsPersistent) {
- proxy = new BufferQueue::ProxyConsumerListener(listener);
- } else {
- proxy = new PersistentProxyListener(mConsumer, listener);
- }
+ wp<BufferQueue::ConsumerListener> listener =
+ static_cast<BufferQueue::ConsumerListener*>(this);
+ sp<IConsumerListener> proxy =
+ new BufferQueue::ProxyConsumerListener(listener);
mInitCheck = mConsumer->consumerConnect(proxy, false);
if (mInitCheck != NO_ERROR) {
@@ -196,21 +302,27 @@
return;
}
- memset(&mColorAspects, 0, sizeof(mColorAspects));
+ memset(&mDefaultColorAspectsPacked, 0, sizeof(mDefaultColorAspectsPacked));
CHECK(mInitCheck == NO_ERROR);
}
GraphicBufferSource::~GraphicBufferSource() {
- if (mLatestBufferId >= 0) {
- releaseBuffer(
- mLatestBufferId, mLatestBufferFrameNum,
- mBufferSlot[mLatestBufferId], mLatestBufferFence);
+ ALOGV("~GraphicBufferSource");
+ {
+ // all acquired buffers must be freed with the mutex locked otherwise our debug assertion
+ // may trigger
+ Mutex::Autolock autoLock(mMutex);
+ mAvailableBuffers.clear();
+ mSubmittedCodecBuffers.clear();
+ mLatestBuffer.mBuffer.reset();
}
- if (mNumBufferAcquired != 0) {
- ALOGW("potential buffer leak (acquired %d)", mNumBufferAcquired);
+
+ if (mNumOutstandingAcquires != 0) {
+ ALOGW("potential buffer leak: acquired=%d", mNumOutstandingAcquires);
+ TRESPASS_DBG();
}
- if (mConsumer != NULL && !mIsPersistent) {
+ if (mConsumer != NULL) {
status_t err = mConsumer->consumerDisconnect();
if (err != NO_ERROR) {
ALOGW("consumerDisconnect failed: %d", err);
@@ -218,13 +330,13 @@
}
}
-void GraphicBufferSource::omxExecuting() {
+Status GraphicBufferSource::onOmxExecuting() {
Mutex::Autolock autoLock(mMutex);
- ALOGV("--> executing; avail=%zu, codec vec size=%zd",
- mNumFramesAvailable, mCodecBuffers.size());
+ ALOGV("--> executing; available=%zu, submittable=%zd",
+ mAvailableBuffers.size(), mFreeCodecBuffers.size());
CHECK(!mExecuting);
mExecuting = true;
- mLastDataSpace = HAL_DATASPACE_UNKNOWN;
+ mLastDataspace = HAL_DATASPACE_UNKNOWN;
ALOGV("clearing last dataSpace");
// Start by loading up as many buffers as possible. We want to do this,
@@ -236,40 +348,39 @@
// one codec buffer simultaneously. (We could instead try to submit
// all BQ buffers whenever any codec buffer is freed, but if we get the
// initial conditions right that will never be useful.)
- while (mNumFramesAvailable) {
+ while (haveAvailableBuffers_l()) {
if (!fillCodecBuffer_l()) {
- ALOGV("stop load with frames available (codecAvail=%d)",
- isCodecBufferAvailable_l());
+ ALOGV("stop load with available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
break;
}
}
- ALOGV("done loading initial frames, avail=%zu", mNumFramesAvailable);
+ ALOGV("done loading initial frames, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
// If EOS has already been signaled, and there are no more frames to
// submit, try to send EOS now as well.
- if (mEndOfStream && mNumFramesAvailable == 0) {
+ if (mStopTimeUs == -1 && mEndOfStream && !haveAvailableBuffers_l()) {
submitEndOfInputStream_l();
}
- if (mRepeatAfterUs > 0ll && mLooper == NULL) {
+ if (mFrameRepeatIntervalUs > 0ll && mLooper == NULL) {
mReflector = new AHandlerReflector<GraphicBufferSource>(this);
mLooper = new ALooper;
mLooper->registerHandler(mReflector);
mLooper->start();
- if (mLatestBufferId >= 0) {
- sp<AMessage> msg =
- new AMessage(kWhatRepeatLastFrame, mReflector);
-
- msg->setInt32("generation", ++mRepeatLastFrameGeneration);
- msg->post(mRepeatAfterUs);
+ if (mLatestBuffer.mBuffer != nullptr) {
+ queueFrameRepeat_l();
}
}
+
+ return Status::ok();
}
-void GraphicBufferSource::omxIdle() {
+Status GraphicBufferSource::onOmxIdle() {
ALOGV("omxIdle");
Mutex::Autolock autoLock(mMutex);
@@ -279,15 +390,11 @@
// not loaded->idle.
mExecuting = false;
}
+ return Status::ok();
}
-void GraphicBufferSource::omxLoaded(){
+Status GraphicBufferSource::onOmxLoaded(){
Mutex::Autolock autoLock(mMutex);
- if (!mExecuting) {
- // This can happen if something failed very early.
- ALOGW("Dropped back down to Loaded without Executing");
- }
-
if (mLooper != NULL) {
mLooper->unregisterHandler(mReflector->id());
mReflector.clear();
@@ -296,586 +403,404 @@
mLooper.clear();
}
- ALOGV("--> loaded; avail=%zu eos=%d eosSent=%d",
- mNumFramesAvailable, mEndOfStream, mEndOfStreamSent);
+ ALOGV("--> loaded; available=%zu+%d eos=%d eosSent=%d acquired=%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers,
+ mEndOfStream, mEndOfStreamSent, mNumOutstandingAcquires);
- // Codec is no longer executing. Discard all codec-related state.
- mCodecBuffers.clear();
- // TODO: scan mCodecBuffers to verify that all mGraphicBuffer entries
- // are null; complain if not
-
+ // Codec is no longer executing. Releasing all buffers to bq.
+ mFreeCodecBuffers.clear();
+ mSubmittedCodecBuffers.clear();
+ mLatestBuffer.mBuffer.reset();
+ mOMXNode.clear();
mExecuting = false;
+
+ return Status::ok();
}
-void GraphicBufferSource::addCodecBuffer(OMX_BUFFERHEADERTYPE* header) {
+Status GraphicBufferSource::onInputBufferAdded(codec_buffer_id bufferId) {
Mutex::Autolock autoLock(mMutex);
if (mExecuting) {
// This should never happen -- buffers can only be allocated when
// transitioning from "loaded" to "idle".
ALOGE("addCodecBuffer: buffer added while executing");
- return;
+ return Status::fromServiceSpecificError(INVALID_OPERATION);
}
- ALOGV("addCodecBuffer h=%p size=%" PRIu32 " p=%p",
- header, header->nAllocLen, header->pBuffer);
- CodecBuffer codecBuffer;
- codecBuffer.mHeader = header;
- mCodecBuffers.add(codecBuffer);
+ ALOGV("addCodecBuffer: bufferId=%u", bufferId);
+
+ mFreeCodecBuffers.push_back(bufferId);
+ return Status::ok();
}
-void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd) {
+Status GraphicBufferSource::onInputBufferEmptied(codec_buffer_id bufferId, int fenceFd) {
Mutex::Autolock autoLock(mMutex);
- if (!mExecuting) {
- return;
- }
+ FileDescriptor::Autoclose fence(fenceFd);
- int cbi = findMatchingCodecBuffer_l(header);
+ ssize_t cbi = mSubmittedCodecBuffers.indexOfKey(bufferId);
if (cbi < 0) {
// This should never happen.
- ALOGE("codecBufferEmptied: buffer not recognized (h=%p)", header);
- if (fenceFd >= 0) {
- ::close(fenceFd);
- }
- return;
+ ALOGE("onInputBufferEmptied: buffer not recognized (bufferId=%u)", bufferId);
+ return Status::fromServiceSpecificError(BAD_VALUE);
}
- ALOGV("codecBufferEmptied h=%p size=%" PRIu32 " filled=%" PRIu32 " p=%p",
- header, header->nAllocLen, header->nFilledLen,
- header->pBuffer);
- CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
+ std::shared_ptr<AcquiredBuffer> buffer = mSubmittedCodecBuffers.valueAt(cbi);
+
+ // Move buffer to available buffers
+ mSubmittedCodecBuffers.removeItemsAt(cbi);
+ mFreeCodecBuffers.push_back(bufferId);
// header->nFilledLen may not be the original value, so we can't compare
// that to zero to see of this was the EOS buffer. Instead we just
- // see if the GraphicBuffer reference was null, which should only ever
- // happen for EOS.
- if (codecBuffer.mGraphicBuffer == NULL) {
+ // see if there is a null AcquiredBuffer, which should only ever happen for EOS.
+ if (buffer == nullptr) {
if (!(mEndOfStream && mEndOfStreamSent)) {
- // This can happen when broken code sends us the same buffer
- // twice in a row.
- ALOGE("ERROR: codecBufferEmptied on non-EOS null buffer "
- "(buffer emptied twice?)");
+ // This can happen when broken code sends us the same buffer twice in a row.
+ ALOGE("onInputBufferEmptied: non-EOS null buffer (bufferId=%u)", bufferId);
+ } else {
+ ALOGV("onInputBufferEmptied: EOS null buffer (bufferId=%u@%zd)", bufferId, cbi);
}
- // No GraphicBuffer to deal with, no additional input or output is
- // expected, so just return.
- if (fenceFd >= 0) {
- ::close(fenceFd);
- }
- return;
+ // No GraphicBuffer to deal with, no additional input or output is expected, so just return.
+ return Status::fromServiceSpecificError(BAD_VALUE);
}
- if (EXTRA_CHECK && header->nAllocLen >= sizeof(MetadataBufferType)) {
- // Pull the graphic buffer handle back out of the buffer, and confirm
- // that it matches expectations.
- OMX_U8* data = header->pBuffer;
- MetadataBufferType type = *(MetadataBufferType *)data;
- if (type == kMetadataBufferTypeGrallocSource
- && header->nAllocLen >= sizeof(VideoGrallocMetadata)) {
- VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)data;
- if (grallocMeta.pHandle != codecBuffer.mGraphicBuffer->handle) {
- // should never happen
- ALOGE("codecBufferEmptied: buffer's handle is %p, expected %p",
- grallocMeta.pHandle, codecBuffer.mGraphicBuffer->handle);
- CHECK(!"codecBufferEmptied: mismatched buffer");
- }
- } else if (type == kMetadataBufferTypeANWBuffer
- && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
- VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)data;
- if (nativeMeta.pBuffer != codecBuffer.mGraphicBuffer->getNativeBuffer()) {
- // should never happen
- ALOGE("codecBufferEmptied: buffer is %p, expected %p",
- nativeMeta.pBuffer, codecBuffer.mGraphicBuffer->getNativeBuffer());
- CHECK(!"codecBufferEmptied: mismatched buffer");
- }
- }
+ if (!mExecuting) {
+ // this is fine since this could happen when going from Idle to Loaded
+ ALOGV("onInputBufferEmptied: no longer executing (bufferId=%u@%zd)", bufferId, cbi);
+ return Status::fromServiceSpecificError(OK);
}
- // Find matching entry in our cached copy of the BufferQueue slots.
- // If we find a match, release that slot. If we don't, the BufferQueue
- // has dropped that GraphicBuffer, and there's nothing for us to release.
- int id = codecBuffer.mSlot;
- sp<Fence> fence = new Fence(fenceFd);
- if (mBufferSlot[id] != NULL &&
- mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
- mBufferUseCount[id]--;
+ ALOGV("onInputBufferEmptied: bufferId=%d@%zd [slot=%d, useCount=%ld, handle=%p] acquired=%d",
+ bufferId, cbi, buffer->getSlot(), buffer.use_count(), buffer->getGraphicBuffer()->handle,
+ mNumOutstandingAcquires);
- ALOGV("codecBufferEmptied: slot=%d, cbi=%d, useCount=%d, handle=%p",
- id, cbi, mBufferUseCount[id], mBufferSlot[id]->handle);
+ buffer->addReleaseFenceFd(fence.release());
+ // release codec reference for video buffer just in case remove does not it
+ buffer.reset();
- if (mBufferUseCount[id] < 0) {
- ALOGW("mBufferUseCount for bq slot %d < 0 (=%d)", id, mBufferUseCount[id]);
- mBufferUseCount[id] = 0;
- }
- if (id != mLatestBufferId && mBufferUseCount[id] == 0) {
- releaseBuffer(id, codecBuffer.mFrameNumber, mBufferSlot[id], fence);
- }
- } else {
- ALOGV("codecBufferEmptied: no match for emptied buffer in cbi %d",
- cbi);
- // we will not reuse codec buffer, so there is no need to wait for fence
- }
-
- // Mark the codec buffer as available by clearing the GraphicBuffer ref.
- codecBuffer.mGraphicBuffer = NULL;
-
- if (mNumFramesAvailable) {
+ if (haveAvailableBuffers_l()) {
// Fill this codec buffer.
CHECK(!mEndOfStreamSent);
- ALOGV("buffer freed, %zu frames avail (eos=%d)",
- mNumFramesAvailable, mEndOfStream);
+ ALOGV("onInputBufferEmptied: buffer freed, feeding codec (available=%zu+%d, eos=%d)",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers, mEndOfStream);
fillCodecBuffer_l();
- } else if (mEndOfStream) {
- // No frames available, but EOS is pending, so use this buffer to
+ } else if (mEndOfStream && mStopTimeUs == -1) {
+ // No frames available, but EOS is pending and no stop time, so use this buffer to
// send that.
- ALOGV("buffer freed, EOS pending");
+ ALOGV("onInputBufferEmptied: buffer freed, submitting EOS");
submitEndOfInputStream_l();
- } else if (mRepeatBufferDeferred) {
+ } else if (mFrameRepeatBlockedOnCodecBuffer) {
bool success = repeatLatestBuffer_l();
- if (success) {
- ALOGV("deferred repeatLatestBuffer_l SUCCESS");
- } else {
- ALOGV("deferred repeatLatestBuffer_l FAILURE");
- }
- mRepeatBufferDeferred = false;
+ ALOGV("onInputBufferEmptied: completing deferred repeatLatestBuffer_l %s",
+ success ? "SUCCESS" : "FAILURE");
+ mFrameRepeatBlockedOnCodecBuffer = false;
}
- return;
+ // releaseReleasableBuffers_l();
+ return Status::ok();
}
-void GraphicBufferSource::codecBufferFilled(OMX_BUFFERHEADERTYPE* header) {
- Mutex::Autolock autoLock(mMutex);
+void GraphicBufferSource::onDataspaceChanged_l(
+ android_dataspace dataspace, android_pixel_format pixelFormat) {
+ ALOGD("got buffer with new dataSpace #%x", dataspace);
+ mLastDataspace = dataspace;
- if (mMaxTimestampGapUs > 0ll
- && !(header->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
- ssize_t index = mOriginalTimeUs.indexOfKey(header->nTimeStamp);
- if (index >= 0) {
- ALOGV("OUT timestamp: %lld -> %lld",
- static_cast<long long>(header->nTimeStamp),
- static_cast<long long>(mOriginalTimeUs[index]));
- header->nTimeStamp = mOriginalTimeUs[index];
- mOriginalTimeUs.removeItemsAt(index);
- } else {
- // giving up the effort as encoder doesn't appear to preserve pts
- ALOGW("giving up limiting timestamp gap (pts = %lld)",
- header->nTimeStamp);
- mMaxTimestampGapUs = -1ll;
- }
- if (mOriginalTimeUs.size() > BufferQueue::NUM_BUFFER_SLOTS) {
- // something terribly wrong must have happened, giving up...
- ALOGE("mOriginalTimeUs has too many entries (%zu)",
- mOriginalTimeUs.size());
- mMaxTimestampGapUs = -1ll;
- }
- }
-}
-
-void GraphicBufferSource::suspend(bool suspend) {
- Mutex::Autolock autoLock(mMutex);
-
- if (suspend) {
- mSuspended = true;
-
- while (mNumFramesAvailable > 0) {
- BufferItem item;
- status_t err = mConsumer->acquireBuffer(&item, 0);
-
- if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
- // shouldn't happen.
- ALOGW("suspend: frame was not available");
- break;
- } else if (err != OK) {
- ALOGW("suspend: acquireBuffer returned err=%d", err);
- break;
- }
-
- ++mNumBufferAcquired;
- --mNumFramesAvailable;
-
- releaseBuffer(item.mSlot, item.mFrameNumber,
- item.mGraphicBuffer, item.mFence);
- }
- return;
- }
-
- mSuspended = false;
-
- if (mExecuting && mNumFramesAvailable == 0 && mRepeatBufferDeferred) {
- if (repeatLatestBuffer_l()) {
- ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
-
- mRepeatBufferDeferred = false;
- } else {
- ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
- }
- }
-}
-
-void GraphicBufferSource::onDataSpaceChanged_l(
- android_dataspace dataSpace, android_pixel_format pixelFormat) {
- ALOGD("got buffer with new dataSpace #%x", dataSpace);
- mLastDataSpace = dataSpace;
-
- if (ColorUtils::convertDataSpaceToV0(dataSpace)) {
- ColorAspects aspects = mColorAspects; // initially requested aspects
-
- // request color aspects to encode
- OMX_INDEXTYPE index;
- status_t err = mNodeInstance->getExtensionIndex(
- "OMX.google.android.index.describeColorAspects", &index);
- if (err == OK) {
- // V0 dataspace
- DescribeColorAspectsParams params;
- InitOMXParams(¶ms);
- params.nPortIndex = kPortIndexInput;
- params.nDataSpace = mLastDataSpace;
- params.nPixelFormat = pixelFormat;
- params.bDataSpaceChanged = OMX_TRUE;
- params.sAspects = mColorAspects;
-
- err = mNodeInstance->getConfig(index, ¶ms, sizeof(params));
- if (err == OK) {
- aspects = params.sAspects;
- ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
- params.sAspects.mRange, asString(params.sAspects.mRange),
- params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
- params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
- params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
- err, asString(err));
- } else {
- params.sAspects = aspects;
- err = OK;
- }
- params.bDataSpaceChanged = OMX_FALSE;
- for (int triesLeft = 2; --triesLeft >= 0; ) {
- status_t err = mNodeInstance->setConfig(index, ¶ms, sizeof(params));
- if (err == OK) {
- err = mNodeInstance->getConfig(index, ¶ms, sizeof(params));
- }
- if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
- params.sAspects, aspects)) {
- // if we can't set or get color aspects, still communicate dataspace to client
- break;
- }
-
- ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
- }
- }
-
- ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
- aspects.mRange, asString(aspects.mRange),
- aspects.mPrimaries, asString(aspects.mPrimaries),
- aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
- aspects.mTransfer, asString(aspects.mTransfer),
- err, asString(err));
-
- // signal client that the dataspace has changed; this will update the output format
- // TODO: we should tie this to an output buffer somehow, and signal the change
- // just before the output buffer is returned to the client, but there are many
- // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
-
- mNodeInstance->signalEvent(
- OMX_EventDataSpaceChanged, dataSpace,
- (aspects.mRange << 24) | (aspects.mPrimaries << 16)
- | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer);
+ if (ColorUtils::convertDataSpaceToV0(dataspace)) {
+ mOMXNode->dispatchDataSpaceChanged(mLastDataspace, mDefaultColorAspectsPacked, pixelFormat);
}
}
bool GraphicBufferSource::fillCodecBuffer_l() {
- CHECK(mExecuting && mNumFramesAvailable > 0);
+ CHECK(mExecuting && haveAvailableBuffers_l());
+
+ if (mFreeCodecBuffers.empty()) {
+ // No buffers available, bail.
+ ALOGV("fillCodecBuffer_l: no codec buffers, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ return false;
+ }
+
+ VideoBuffer item;
+ if (mAvailableBuffers.empty()) {
+ ALOGV("fillCodecBuffer_l: acquiring available buffer, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ if (acquireBuffer_l(&item) != OK) {
+ ALOGE("fillCodecBuffer_l: failed to acquire available buffer");
+ return false;
+ }
+ } else {
+ ALOGV("fillCodecBuffer_l: getting available buffer, available=%zu+%d",
+ mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ item = *mAvailableBuffers.begin();
+ mAvailableBuffers.erase(mAvailableBuffers.begin());
+ }
+
+ int64_t itemTimeUs = item.mTimestampNs / 1000;
+
+ // Process ActionItem in the Queue if there is any. If a buffer's timestamp
+ // is smaller than the first action's timestamp, no action need to be performed.
+ // If buffer's timestamp is larger or equal than the last action's timestamp,
+ // only the last action needs to be performed as all the acitions before the
+ // the action are overridden by the last action. For the other cases, traverse
+ // the Queue to find the newest action that with timestamp smaller or equal to
+ // the buffer's timestamp. For example, an action queue like
+ // [pause 1us], [resume 2us], [pause 3us], [resume 4us], [pause 5us].... Upon
+ // receiving a buffer with timestamp 3.5us, only the action [pause, 3us] needs
+ // to be handled and [pause, 1us], [resume 2us] will be discarded.
+ bool done = false;
+ bool seeStopAction = false;
+ if (!mActionQueue.empty()) {
+ // First scan to check if bufferTimestamp is smaller than first action's timestamp.
+ ActionItem nextAction = *(mActionQueue.begin());
+ if (itemTimeUs < nextAction.mActionTimeUs) {
+ ALOGV("No action. buffer timestamp %lld us < action timestamp: %lld us",
+ (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+ // All the actions are ahead. No action need to perform now.
+ // Release the buffer if is in suspended state, or process the buffer
+ // if not in suspended state.
+ done = true;
+ }
+
+ if (!done) {
+ // Find the newest action that with timestamp smaller than itemTimeUs. Then
+ // remove all the actions before and include the newest action.
+ List<ActionItem>::iterator it = mActionQueue.begin();
+ while (it != mActionQueue.end() && it->mActionTimeUs <= itemTimeUs
+ && nextAction.mAction != ActionItem::STOP) {
+ nextAction = *it;
+ ++it;
+ }
+ mActionQueue.erase(mActionQueue.begin(), it);
+
+ CHECK(itemTimeUs >= nextAction.mActionTimeUs);
+ switch (nextAction.mAction) {
+ case ActionItem::PAUSE:
+ {
+ mSuspended = true;
+ ALOGV("RUNNING/PAUSE -> PAUSE at buffer %lld us PAUSE Time: %lld us",
+ (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+ break;
+ }
+ case ActionItem::RESUME:
+ {
+ mSuspended = false;
+ ALOGV("PAUSE/RUNNING -> RUNNING at buffer %lld us RESUME Time: %lld us",
+ (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+ break;
+ }
+ case ActionItem::STOP:
+ {
+ ALOGV("RUNNING/PAUSE -> STOP at buffer %lld us STOP Time: %lld us",
+ (long long)itemTimeUs, (long long)nextAction.mActionTimeUs);
+ // Clear the whole ActionQueue as recording is done
+ mActionQueue.clear();
+ seeStopAction = true;
+ break;
+ }
+ default:
+ TRESPASS_DBG("Unknown action type");
+ // return true here because we did consume an available buffer, so the
+ // loop in onOmxExecuting will eventually terminate even if we hit this.
+ return false;
+ }
+ }
+ }
+
+ if (seeStopAction) {
+ // Clear all the buffers before setting mEndOfStream and signal EndOfInputStream.
+ releaseAllAvailableBuffers_l();
+ mEndOfStream = true;
+ submitEndOfInputStream_l();
+ return true;
+ }
if (mSuspended) {
- return false;
+ return true;
}
- int cbi = findAvailableCodecBuffer_l();
- if (cbi < 0) {
- // No buffers available, bail.
- ALOGV("fillCodecBuffer_l: no codec buffers, avail now %zu",
- mNumFramesAvailable);
- return false;
- }
-
- ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%zu",
- mNumFramesAvailable);
- BufferItem item;
- status_t err = mConsumer->acquireBuffer(&item, 0);
- if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
- // shouldn't happen
- ALOGW("fillCodecBuffer_l: frame was not available");
- return false;
- } else if (err != OK) {
- // now what? fake end-of-stream?
- ALOGW("fillCodecBuffer_l: acquireBuffer returned err=%d", err);
- return false;
- }
-
- mNumBufferAcquired++;
- mNumFramesAvailable--;
-
- // If this is the first time we're seeing this buffer, add it to our
- // slot table.
- if (item.mGraphicBuffer != NULL) {
- ALOGV("fillCodecBuffer_l: setting mBufferSlot %d", item.mSlot);
- mBufferSlot[item.mSlot] = item.mGraphicBuffer;
- mBufferUseCount[item.mSlot] = 0;
- }
-
- if (item.mDataSpace != mLastDataSpace) {
- onDataSpaceChanged_l(
- item.mDataSpace, (android_pixel_format)mBufferSlot[item.mSlot]->getPixelFormat());
- }
-
-
- err = UNKNOWN_ERROR;
+ int err = UNKNOWN_ERROR;
// only submit sample if start time is unspecified, or sample
// is queued after the specified start time
- bool dropped = false;
- if (mSkipFramesBeforeNs < 0ll || item.mTimestamp >= mSkipFramesBeforeNs) {
+ if (mSkipFramesBeforeNs < 0ll || item.mTimestampNs >= mSkipFramesBeforeNs) {
// if start time is set, offset time stamp by start time
if (mSkipFramesBeforeNs > 0) {
- item.mTimestamp -= mSkipFramesBeforeNs;
+ item.mTimestampNs -= mSkipFramesBeforeNs;
}
- int64_t timeUs = item.mTimestamp / 1000;
+ int64_t timeUs = item.mTimestampNs / 1000;
if (mFrameDropper != NULL && mFrameDropper->shouldDrop(timeUs)) {
ALOGV("skipping frame (%lld) to meet max framerate", static_cast<long long>(timeUs));
// set err to OK so that the skipped frame can still be saved as the lastest frame
err = OK;
- dropped = true;
} else {
- err = submitBuffer_l(item, cbi);
+ err = submitBuffer_l(item); // this takes shared ownership of the acquired buffer on succeess
}
}
if (err != OK) {
- ALOGV("submitBuffer_l failed, releasing bq slot %d", item.mSlot);
- releaseBuffer(item.mSlot, item.mFrameNumber, item.mGraphicBuffer, item.mFence);
+ ALOGV("submitBuffer_l failed, will release bq slot %d", item.mBuffer->getSlot());
+ return true;
} else {
- ALOGV("buffer submitted (bq %d, cbi %d)", item.mSlot, cbi);
- setLatestBuffer_l(item, dropped);
+ // Don't set the last buffer id if we're not repeating,
+ // we'll be holding on to the last buffer for nothing.
+ if (mFrameRepeatIntervalUs > 0ll) {
+ setLatestBuffer_l(item);
+ }
+ ALOGV("buffer submitted [slot=%d, useCount=%ld] acquired=%d",
+ item.mBuffer->getSlot(), item.mBuffer.use_count(), mNumOutstandingAcquires);
}
return true;
}
bool GraphicBufferSource::repeatLatestBuffer_l() {
- CHECK(mExecuting && mNumFramesAvailable == 0);
+ CHECK(mExecuting && !haveAvailableBuffers_l());
- if (mLatestBufferId < 0 || mSuspended) {
- return false;
- }
- if (mBufferSlot[mLatestBufferId] == NULL) {
- // This can happen if the remote side disconnects, causing
- // onBuffersReleased() to NULL out our copy of the slots. The
- // buffer is gone, so we have nothing to show.
- //
- // To be on the safe side we try to release the buffer.
- ALOGD("repeatLatestBuffer_l: slot was NULL");
- mConsumer->releaseBuffer(
- mLatestBufferId,
- mLatestBufferFrameNum,
- EGL_NO_DISPLAY,
- EGL_NO_SYNC_KHR,
- mLatestBufferFence);
- mLatestBufferId = -1;
- mLatestBufferFrameNum = 0;
- mLatestBufferFence = Fence::NO_FENCE;
+ if (mLatestBuffer.mBuffer == nullptr || mSuspended) {
return false;
}
- int cbi = findAvailableCodecBuffer_l();
- if (cbi < 0) {
+ if (mFreeCodecBuffers.empty()) {
// No buffers available, bail.
ALOGV("repeatLatestBuffer_l: no codec buffers.");
return false;
}
- BufferItem item;
- item.mSlot = mLatestBufferId;
- item.mFrameNumber = mLatestBufferFrameNum;
- item.mTimestamp = mRepeatLastFrameTimestamp;
- item.mFence = mLatestBufferFence;
+ if (!mLatestBuffer.mBuffer->isCached()) {
+ ALOGV("repeatLatestBuffer_l: slot was discarded, but repeating our own reference");
+ }
- status_t err = submitBuffer_l(item, cbi);
-
+ // it is ok to update the timestamp of latest buffer as it is only used for submission
+ status_t err = submitBuffer_l(mLatestBuffer);
if (err != OK) {
return false;
}
- ++mBufferUseCount[item.mSlot];
-
/* repeat last frame up to kRepeatLastFrameCount times.
* in case of static scene, a single repeat might not get rid of encoder
* ghosting completely, refresh a couple more times to get better quality
*/
- if (--mRepeatLastFrameCount > 0) {
- mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
-
- if (mReflector != NULL) {
- sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector);
- msg->setInt32("generation", ++mRepeatLastFrameGeneration);
- msg->post(mRepeatAfterUs);
- }
+ if (--mOutstandingFrameRepeatCount > 0) {
+ // set up timestamp for repeat frame
+ mLatestBuffer.mTimestampNs += mFrameRepeatIntervalUs * 1000;
+ queueFrameRepeat_l();
}
return true;
}
-void GraphicBufferSource::setLatestBuffer_l(
- const BufferItem &item, bool dropped) {
- if (mLatestBufferId >= 0) {
- if (mBufferUseCount[mLatestBufferId] == 0) {
- releaseBuffer(mLatestBufferId, mLatestBufferFrameNum,
- mBufferSlot[mLatestBufferId], mLatestBufferFence);
- // mLatestBufferFence will be set to new fence just below
- }
- }
+void GraphicBufferSource::setLatestBuffer_l(const VideoBuffer &item) {
+ mLatestBuffer = item;
- mLatestBufferId = item.mSlot;
- mLatestBufferFrameNum = item.mFrameNumber;
- mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
+ ALOGV("setLatestBuffer_l: [slot=%d, useCount=%ld]",
+ mLatestBuffer.mBuffer->getSlot(), mLatestBuffer.mBuffer.use_count());
- if (!dropped) {
- ++mBufferUseCount[item.mSlot];
- }
+ mOutstandingFrameRepeatCount = kRepeatLastFrameCount;
+ // set up timestamp for repeat frame
+ mLatestBuffer.mTimestampNs += mFrameRepeatIntervalUs * 1000;
+ queueFrameRepeat_l();
+}
- ALOGV("setLatestBuffer_l: slot=%d, useCount=%d",
- item.mSlot, mBufferUseCount[item.mSlot]);
-
- mRepeatBufferDeferred = false;
- mRepeatLastFrameCount = kRepeatLastFrameCount;
- mLatestBufferFence = item.mFence;
+void GraphicBufferSource::queueFrameRepeat_l() {
+ mFrameRepeatBlockedOnCodecBuffer = false;
if (mReflector != NULL) {
sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector);
msg->setInt32("generation", ++mRepeatLastFrameGeneration);
- msg->post(mRepeatAfterUs);
+ msg->post(mFrameRepeatIntervalUs);
}
}
-status_t GraphicBufferSource::signalEndOfInputStream() {
- Mutex::Autolock autoLock(mMutex);
- ALOGV("signalEndOfInputStream: exec=%d avail=%zu eos=%d",
- mExecuting, mNumFramesAvailable, mEndOfStream);
-
- if (mEndOfStream) {
- ALOGE("EOS was already signaled");
- return INVALID_OPERATION;
- }
-
- // Set the end-of-stream flag. If no frames are pending from the
- // BufferQueue, and a codec buffer is available, and we're executing,
- // we initiate the EOS from here. Otherwise, we'll let
- // codecBufferEmptied() (or omxExecuting) do it.
- //
- // Note: if there are no pending frames and all codec buffers are
- // available, we *must* submit the EOS from here or we'll just
- // stall since no future events are expected.
- mEndOfStream = true;
-
- if (mExecuting && mNumFramesAvailable == 0) {
- submitEndOfInputStream_l();
- }
-
- return OK;
-}
-
-int64_t GraphicBufferSource::getTimestamp(const BufferItem &item) {
- int64_t timeUs = item.mTimestamp / 1000;
+bool GraphicBufferSource::calculateCodecTimestamp_l(
+ nsecs_t bufferTimeNs, int64_t *codecTimeUs) {
+ int64_t timeUs = bufferTimeNs / 1000;
timeUs += mInputBufferTimeOffsetUs;
- if (mTimePerCaptureUs > 0ll
- && (mTimePerCaptureUs > 2 * mTimePerFrameUs
- || mTimePerFrameUs > 2 * mTimePerCaptureUs)) {
+ if (mCaptureFps > 0.
+ && (mFps > 2 * mCaptureFps
+ || mCaptureFps > 2 * mFps)) {
// Time lapse or slow motion mode
if (mPrevCaptureUs < 0ll) {
// first capture
- mPrevCaptureUs = timeUs;
+ mPrevCaptureUs = mBaseCaptureUs = timeUs;
// adjust the first sample timestamp.
- mPrevFrameUs = (timeUs * mTimePerFrameUs) / mTimePerCaptureUs;
+ mPrevFrameUs = mBaseFrameUs =
+ std::llround((timeUs * mCaptureFps) / mFps);
+ mFrameCount = 0;
} else {
// snap to nearest capture point
- int64_t nFrames = (timeUs + mTimePerCaptureUs / 2 - mPrevCaptureUs)
- / mTimePerCaptureUs;
+ int64_t nFrames = std::llround(
+ (timeUs - mPrevCaptureUs) * mCaptureFps);
if (nFrames <= 0) {
// skip this frame as it's too close to previous capture
ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
- return -1;
+ return false;
}
- mPrevCaptureUs = mPrevCaptureUs + nFrames * mTimePerCaptureUs;
- mPrevFrameUs += mTimePerFrameUs * nFrames;
+ mFrameCount += nFrames;
+ mPrevCaptureUs = mBaseCaptureUs + std::llround(
+ mFrameCount / mCaptureFps);
+ mPrevFrameUs = mBaseFrameUs + std::llround(
+ mFrameCount / mFps);
}
ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
static_cast<long long>(timeUs),
static_cast<long long>(mPrevCaptureUs),
static_cast<long long>(mPrevFrameUs));
-
- return mPrevFrameUs;
} else {
- int64_t originalTimeUs = timeUs;
- if (originalTimeUs <= mPrevOriginalTimeUs) {
- // Drop the frame if it's going backward in time. Bad timestamp
- // could disrupt encoder's rate control completely.
+ if (timeUs <= mPrevFrameUs) {
+ // Drop the frame if it's going backward in time. Bad timestamp
+ // could disrupt encoder's rate control completely.
ALOGW("Dropping frame that's going backward in time");
- return -1;
+ return false;
}
- if (mMaxTimestampGapUs > 0ll) {
- //TODO: Fix the case when mMaxTimestampGapUs and mTimePerCaptureUs are both set.
-
- /* Cap timestamp gap between adjacent frames to specified max
- *
- * In the scenario of cast mirroring, encoding could be suspended for
- * prolonged periods. Limiting the pts gap to workaround the problem
- * where encoder's rate control logic produces huge frames after a
- * long period of suspension.
- */
- if (mPrevOriginalTimeUs >= 0ll) {
- int64_t timestampGapUs = originalTimeUs - mPrevOriginalTimeUs;
- timeUs = (timestampGapUs < mMaxTimestampGapUs ?
- timestampGapUs : mMaxTimestampGapUs) + mPrevModifiedTimeUs;
- }
- mOriginalTimeUs.add(timeUs, originalTimeUs);
- ALOGV("IN timestamp: %lld -> %lld",
- static_cast<long long>(originalTimeUs),
- static_cast<long long>(timeUs));
- }
-
- mPrevOriginalTimeUs = originalTimeUs;
- mPrevModifiedTimeUs = timeUs;
+ mPrevFrameUs = timeUs;
}
- return timeUs;
+ *codecTimeUs = mPrevFrameUs;
+ return true;
}
-status_t GraphicBufferSource::submitBuffer_l(const BufferItem &item, int cbi) {
- ALOGV("submitBuffer_l: slot=%d, cbi=%d", item.mSlot, cbi);
+status_t GraphicBufferSource::submitBuffer_l(const VideoBuffer &item) {
+ CHECK(!mFreeCodecBuffers.empty());
+ IOMX::buffer_id codecBufferId = *mFreeCodecBuffers.begin();
- int64_t timeUs = getTimestamp(item);
- if (timeUs < 0ll) {
+ ALOGV("submitBuffer_l [slot=%d, bufferId=%d]", item.mBuffer->getSlot(), codecBufferId);
+
+ int64_t codecTimeUs;
+ if (!calculateCodecTimestamp_l(item.mTimestampNs, &codecTimeUs)) {
return UNKNOWN_ERROR;
}
- CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
- codecBuffer.mGraphicBuffer = mBufferSlot[item.mSlot];
- codecBuffer.mSlot = item.mSlot;
- codecBuffer.mFrameNumber = item.mFrameNumber;
+ if ((android_dataspace)item.mDataspace != mLastDataspace) {
+ onDataspaceChanged_l(
+ item.mDataspace,
+ (android_pixel_format)item.mBuffer->getGraphicBuffer()->format);
+ }
- OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
- sp<GraphicBuffer> buffer = codecBuffer.mGraphicBuffer;
- status_t err = mNodeInstance->emptyGraphicBuffer(
- header, buffer, OMX_BUFFERFLAG_ENDOFFRAME, timeUs,
- item.mFence->isValid() ? item.mFence->dup() : -1);
+ std::shared_ptr<AcquiredBuffer> buffer = item.mBuffer;
+ // use a GraphicBuffer for now as OMXNodeInstance is using GraphicBuffers to hold references
+ // and it requires this graphic buffer to be able to hold its reference
+ // and thus we would need to create a new GraphicBuffer from an ANWBuffer separate from the
+ // acquired GraphicBuffer.
+ // TODO: this can be reworked globally to use ANWBuffer references
+ sp<GraphicBuffer> graphicBuffer = buffer->getGraphicBuffer();
+ status_t err = mOMXNode->emptyBuffer(
+ codecBufferId, OMX_BUFFERFLAG_ENDOFFRAME, graphicBuffer, codecTimeUs,
+ buffer->getAcquireFenceFd());
+
if (err != OK) {
- ALOGW("WARNING: emptyNativeWindowBuffer failed: 0x%x", err);
- codecBuffer.mGraphicBuffer = NULL;
+ ALOGW("WARNING: emptyGraphicBuffer failed: 0x%x", err);
return err;
}
- ALOGV("emptyNativeWindowBuffer succeeded, h=%p p=%p buf=%p bufhandle=%p",
- header, header->pBuffer, buffer->getNativeBuffer(), buffer->handle);
+ mFreeCodecBuffers.erase(mFreeCodecBuffers.begin());
+
+ ssize_t cbix = mSubmittedCodecBuffers.add(codecBufferId, buffer);
+ ALOGV("emptyGraphicBuffer succeeded, bufferId=%u@%zd bufhandle=%p",
+ codecBufferId, cbix, graphicBuffer->handle);
return OK;
}
@@ -886,125 +811,141 @@
return;
}
- int cbi = findAvailableCodecBuffer_l();
- if (cbi < 0) {
+ if (mFreeCodecBuffers.empty()) {
ALOGV("submitEndOfInputStream_l: no codec buffers available");
return;
}
+ IOMX::buffer_id codecBufferId = *mFreeCodecBuffers.begin();
- // We reject any additional incoming graphic buffers, so there's no need
- // to stick a placeholder into codecBuffer.mGraphicBuffer to mark it as
- // in-use.
- CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
-
- OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
- status_t err = mNodeInstance->emptyGraphicBuffer(
- header, NULL /* buffer */, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS,
- 0 /* timestamp */, -1 /* fenceFd */);
+ // We reject any additional incoming graphic buffers. There is no acquired buffer used for EOS
+ status_t err = mOMXNode->emptyBuffer(
+ codecBufferId, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS);
if (err != OK) {
ALOGW("emptyDirectBuffer EOS failed: 0x%x", err);
} else {
- ALOGV("submitEndOfInputStream_l: buffer submitted, header=%p cbi=%d",
- header, cbi);
+ mFreeCodecBuffers.erase(mFreeCodecBuffers.begin());
+ ssize_t cbix = mSubmittedCodecBuffers.add(codecBufferId, nullptr);
+ ALOGV("submitEndOfInputStream_l: buffer submitted, bufferId=%u@%zd", codecBufferId, cbix);
mEndOfStreamSent = true;
+
+ // no need to hold onto any buffers for frame repeating
+ ++mRepeatLastFrameGeneration;
+ mLatestBuffer.mBuffer.reset();
}
}
-int GraphicBufferSource::findAvailableCodecBuffer_l() {
- CHECK(mCodecBuffers.size() > 0);
-
- for (int i = (int)mCodecBuffers.size() - 1; i>= 0; --i) {
- if (mCodecBuffers[i].mGraphicBuffer == NULL) {
- return i;
- }
+status_t GraphicBufferSource::acquireBuffer_l(VideoBuffer *ab) {
+ BufferItem bi;
+ status_t err = mConsumer->acquireBuffer(&bi, 0);
+ if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
+ // shouldn't happen
+ ALOGW("acquireBuffer_l: frame was not available");
+ return err;
+ } else if (err != OK) {
+ ALOGW("acquireBuffer_l: failed with err=%d", err);
+ return err;
}
- return -1;
-}
+ --mNumAvailableUnacquiredBuffers;
-int GraphicBufferSource::findMatchingCodecBuffer_l(
- const OMX_BUFFERHEADERTYPE* header) {
- for (int i = (int)mCodecBuffers.size() - 1; i>= 0; --i) {
- if (mCodecBuffers[i].mHeader == header) {
- return i;
+ // Manage our buffer cache.
+ std::shared_ptr<CachedBuffer> buffer;
+ ssize_t bsi = mBufferSlots.indexOfKey(bi.mSlot);
+ if (bi.mGraphicBuffer != NULL) {
+ // replace/initialize slot with new buffer
+ ALOGV("acquireBuffer_l: %s buffer slot %d", bsi < 0 ? "setting" : "UPDATING", bi.mSlot);
+ if (bsi >= 0) {
+ discardBufferAtSlotIndex_l(bsi);
+ } else {
+ bsi = mBufferSlots.add(bi.mSlot, nullptr);
}
- }
- return -1;
-}
-
-/*
- * Releases an acquired buffer back to the consumer for either persistent
- * or non-persistent surfaces.
- *
- * id: buffer slot to release (in persistent case the id might be changed)
- * frameNum: frame number of the frame being released
- * buffer: GraphicBuffer pointer to release (note this must not be & as we
- * will clear the original mBufferSlot in persistent case)
- * Use NOLINT to supress warning on the copy of 'buffer'.
- * fence: fence of the frame being released
- */
-void GraphicBufferSource::releaseBuffer(
- int &id, uint64_t frameNum,
- const sp<GraphicBuffer> buffer, const sp<Fence> &fence) { // NOLINT
- ALOGV("releaseBuffer: slot=%d", id);
- if (mIsPersistent) {
- mConsumer->detachBuffer(id);
- mBufferSlot[id] = NULL;
-
- if (mConsumer->attachBuffer(&id, buffer) == OK) {
- mConsumer->releaseBuffer(
- id, 0, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
- }
+ buffer = std::make_shared<CachedBuffer>(bi.mSlot, bi.mGraphicBuffer);
+ mBufferSlots.replaceValueAt(bsi, buffer);
} else {
- mConsumer->releaseBuffer(
- id, frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
+ buffer = mBufferSlots.valueAt(bsi);
}
- id = -1; // invalidate id
- mNumBufferAcquired--;
+ int64_t frameNum = bi.mFrameNumber;
+
+ std::shared_ptr<AcquiredBuffer> acquiredBuffer =
+ std::make_shared<AcquiredBuffer>(
+ buffer,
+ [frameNum, this](AcquiredBuffer *buffer){
+ // AcquiredBuffer's destructor should always be called when mMutex is locked.
+ // If we had a reentrant mutex, we could just lock it again to ensure this.
+ if (mMutex.tryLock() == 0) {
+ TRESPASS_DBG();
+ mMutex.unlock();
+ }
+
+ // we can release buffers immediately if not using adapters
+ // alternately, we could add them to mSlotsToRelease, but we would
+ // somehow need to propagate frame number to that queue
+ if (buffer->isCached()) {
+ --mNumOutstandingAcquires;
+ mConsumer->releaseBuffer(
+ buffer->getSlot(), frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
+ buffer->getReleaseFence());
+ }
+ },
+ bi.mFence);
+ VideoBuffer videoBuffer{acquiredBuffer, bi.mTimestamp, bi.mDataSpace};
+ *ab = videoBuffer;
+ ++mNumOutstandingAcquires;
+ return OK;
}
// BufferQueue::ConsumerListener callback
-void GraphicBufferSource::onFrameAvailable(const BufferItem& /*item*/) {
+void GraphicBufferSource::onFrameAvailable(const BufferItem& item __unused) {
Mutex::Autolock autoLock(mMutex);
- ALOGV("onFrameAvailable exec=%d avail=%zu",
- mExecuting, mNumFramesAvailable);
+ ALOGV("onFrameAvailable: executing=%d available=%zu+%d",
+ mExecuting, mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers);
+ ++mNumAvailableUnacquiredBuffers;
- if (mEndOfStream || mSuspended) {
- if (mEndOfStream) {
- // This should only be possible if a new buffer was queued after
- // EOS was signaled, i.e. the app is misbehaving.
+ // For BufferQueue we cannot acquire a buffer if we cannot immediately feed it to the codec
+ // UNLESS we are discarding this buffer (acquiring and immediately releasing it), which makes
+ // this an ugly logic.
+ // NOTE: We could also rely on our debug counter but that is meant only as a debug counter.
+ if (!areWeDiscardingAvailableBuffers_l() && mFreeCodecBuffers.empty()) {
+ // we may not be allowed to acquire a possibly encodable buffer, so just note that
+ // it is available
+ ALOGV("onFrameAvailable: cannot acquire buffer right now, do it later");
- ALOGW("onFrameAvailable: EOS is set, ignoring frame");
- } else {
- ALOGV("onFrameAvailable: suspended, ignoring frame");
- }
-
- BufferItem item;
- status_t err = mConsumer->acquireBuffer(&item, 0);
- if (err == OK) {
- mNumBufferAcquired++;
-
- // If this is the first time we're seeing this buffer, add it to our
- // slot table.
- if (item.mGraphicBuffer != NULL) {
- ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mSlot);
- mBufferSlot[item.mSlot] = item.mGraphicBuffer;
- mBufferUseCount[item.mSlot] = 0;
- }
-
- releaseBuffer(item.mSlot, item.mFrameNumber,
- item.mGraphicBuffer, item.mFence);
- }
+ ++mRepeatLastFrameGeneration; // cancel any pending frame repeat
return;
}
- mNumFramesAvailable++;
+ VideoBuffer buffer;
+ status_t err = acquireBuffer_l(&buffer);
+ if (err != OK) {
+ ALOGE("onFrameAvailable: acquireBuffer returned err=%d", err);
+ } else {
+ onBufferAcquired_l(buffer);
+ }
+}
- mRepeatBufferDeferred = false;
- ++mRepeatLastFrameGeneration;
+bool GraphicBufferSource::areWeDiscardingAvailableBuffers_l() {
+ return mEndOfStreamSent // already sent EOS to codec
+ || mOMXNode == nullptr // there is no codec connected
+ || (mSuspended && mActionQueue.empty()) // we are suspended and not waiting for
+ // any further action
+ || !mExecuting;
+}
- if (mExecuting) {
- fillCodecBuffer_l();
+void GraphicBufferSource::onBufferAcquired_l(const VideoBuffer &buffer) {
+ if (mEndOfStreamSent) {
+ // This should only be possible if a new buffer was queued after
+ // EOS was signaled, i.e. the app is misbehaving.
+ ALOGW("onFrameAvailable: EOS is sent, ignoring frame");
+ } else if (mOMXNode == NULL || (mSuspended && mActionQueue.empty())) {
+ // FIXME: if we are suspended but have a resume queued we will stop repeating the last
+ // frame. Is that the desired behavior?
+ ALOGV("onFrameAvailable: suspended, ignoring frame");
+ } else {
+ ++mRepeatLastFrameGeneration; // cancel any pending frame repeat
+ mAvailableBuffers.push_back(buffer);
+ if (mExecuting) {
+ fillCodecBuffer_l();
+ }
}
}
@@ -1022,51 +963,192 @@
for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
if ((slotMask & 0x01) != 0) {
- mBufferSlot[i] = NULL;
- mBufferUseCount[i] = 0;
+ discardBufferInSlot_l(i);
}
slotMask >>= 1;
}
}
+void GraphicBufferSource::discardBufferInSlot_l(GraphicBufferSource::slot_id i) {
+ ssize_t bsi = mBufferSlots.indexOfKey(i);
+ if (bsi < 0) {
+ ALOGW("releasing an unpopulated slot: %d", i);
+ } else {
+ discardBufferAtSlotIndex_l(bsi);
+ mBufferSlots.removeItemsAt(bsi);
+ }
+}
+
+void GraphicBufferSource::discardBufferAtSlotIndex_l(ssize_t bsi) {
+ const std::shared_ptr<CachedBuffer>& buffer = mBufferSlots.valueAt(bsi);
+ // use -2 if there is no latest buffer, and -1 if it is no longer cached
+ slot_id latestBufferSlot =
+ mLatestBuffer.mBuffer == nullptr ? -2 : mLatestBuffer.mBuffer->getSlot();
+ ALOGV("releasing acquired buffer: [slot=%d, useCount=%ld], latest: [slot=%d]",
+ mBufferSlots.keyAt(bsi), buffer.use_count(), latestBufferSlot);
+ mBufferSlots.valueAt(bsi)->onDroppedFromCache();
+
+ // If the slot of an acquired buffer is discarded, that buffer will not have to be
+ // released to the producer, so account it here. However, it is possible that the
+ // acquired buffer has already been discarded so check if it still is.
+ if (buffer->isAcquired()) {
+ --mNumOutstandingAcquires;
+ }
+
+ // clear the buffer reference (not technically needed as caller either replaces or deletes
+ // it; done here for safety).
+ mBufferSlots.editValueAt(bsi).reset();
+ CHECK_DBG(buffer == nullptr);
+}
+
+void GraphicBufferSource::releaseAllAvailableBuffers_l() {
+ mAvailableBuffers.clear();
+ while (mNumAvailableUnacquiredBuffers > 0) {
+ VideoBuffer item;
+ if (acquireBuffer_l(&item) != OK) {
+ ALOGW("releaseAllAvailableBuffers: failed to acquire available unacquired buffer");
+ break;
+ }
+ }
+}
+
// BufferQueue::ConsumerListener callback
void GraphicBufferSource::onSidebandStreamChanged() {
ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
}
-void GraphicBufferSource::setDefaultDataSpace(android_dataspace dataSpace) {
- // no need for mutex as we are not yet running
- ALOGD("setting dataspace: %#x", dataSpace);
- mConsumer->setDefaultBufferDataSpace(dataSpace);
- mLastDataSpace = dataSpace;
+status_t GraphicBufferSource::configure(
+ const sp<IOmxNodeWrapper>& omxNode,
+ int32_t dataSpace,
+ int32_t bufferCount,
+ uint32_t frameWidth,
+ uint32_t frameHeight,
+ uint32_t consumerUsage) {
+ if (omxNode == NULL) {
+ return BAD_VALUE;
+ }
+
+
+ // Call setMaxAcquiredBufferCount without lock.
+ // setMaxAcquiredBufferCount could call back to onBuffersReleased
+ // if the buffer count change results in releasing of existing buffers,
+ // which would lead to deadlock.
+ status_t err = mConsumer->setMaxAcquiredBufferCount(bufferCount);
+ if (err != NO_ERROR) {
+ ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+ bufferCount, err);
+ return err;
+ }
+
+ {
+ Mutex::Autolock autoLock(mMutex);
+ mOMXNode = omxNode;
+
+ err = mConsumer->setDefaultBufferSize(frameWidth, frameHeight);
+ if (err != NO_ERROR) {
+ ALOGE("Unable to set BQ default buffer size to %ux%u: %d",
+ frameWidth, frameHeight, err);
+ return err;
+ }
+
+ consumerUsage |= GRALLOC_USAGE_HW_VIDEO_ENCODER;
+ mConsumer->setConsumerUsageBits(consumerUsage);
+
+ // Sets the default buffer data space
+ ALOGD("setting dataspace: %#x, acquired=%d", dataSpace, mNumOutstandingAcquires);
+ mConsumer->setDefaultBufferDataSpace((android_dataspace)dataSpace);
+ mLastDataspace = (android_dataspace)dataSpace;
+
+ mExecuting = false;
+ mSuspended = false;
+ mEndOfStream = false;
+ mEndOfStreamSent = false;
+ mSkipFramesBeforeNs = -1ll;
+ mFrameRepeatIntervalUs = -1ll;
+ mRepeatLastFrameGeneration = 0;
+ mOutstandingFrameRepeatCount = 0;
+ mLatestBuffer.mBuffer.reset();
+ mFrameRepeatBlockedOnCodecBuffer = false;
+ mFps = -1.0;
+ mCaptureFps = -1.0;
+ mBaseCaptureUs = -1ll;
+ mBaseFrameUs = -1ll;
+ mPrevCaptureUs = -1ll;
+ mPrevFrameUs = -1ll;
+ mFrameCount = 0;
+ mInputBufferTimeOffsetUs = 0;
+ mStopTimeUs = -1;
+ mActionQueue.clear();
+ }
+
+ return OK;
}
-status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(
- int64_t repeatAfterUs) {
+status_t GraphicBufferSource::setSuspend(bool suspend, int64_t suspendStartTimeUs) {
+ ALOGV("setSuspend=%d at time %lld us", suspend, (long long)suspendStartTimeUs);
+
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mStopTimeUs != -1) {
+ ALOGE("setSuspend failed as STOP action is pending");
+ return INVALID_OPERATION;
+ }
+
+ // Push the action to the queue.
+ if (suspendStartTimeUs != -1) {
+ // suspendStartTimeUs must be smaller or equal to current systemTime.
+ int64_t currentSystemTimeUs = systemTime() / 1000;
+ if (suspendStartTimeUs > currentSystemTimeUs) {
+ ALOGE("setSuspend failed. %lld is larger than current system time %lld us",
+ (long long)suspendStartTimeUs, (long long)currentSystemTimeUs);
+ return INVALID_OPERATION;
+ }
+ if (mLastActionTimeUs != -1 && suspendStartTimeUs < mLastActionTimeUs) {
+ ALOGE("setSuspend failed. %lld is smaller than last action time %lld us",
+ (long long)suspendStartTimeUs, (long long)mLastActionTimeUs);
+ return INVALID_OPERATION;
+ }
+ mLastActionTimeUs = suspendStartTimeUs;
+ ActionItem action;
+ action.mAction = suspend ? ActionItem::PAUSE : ActionItem::RESUME;
+ action.mActionTimeUs = suspendStartTimeUs;
+ ALOGV("Push %s action into actionQueue", suspend ? "PAUSE" : "RESUME");
+ mActionQueue.push_back(action);
+ } else {
+ if (suspend) {
+ mSuspended = true;
+ releaseAllAvailableBuffers_l();
+ return OK;
+ } else {
+ mSuspended = false;
+ if (mExecuting && !haveAvailableBuffers_l()
+ && mFrameRepeatBlockedOnCodecBuffer) {
+ if (repeatLatestBuffer_l()) {
+ ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
+ mFrameRepeatBlockedOnCodecBuffer = false;
+ } else {
+ ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
+ }
+ }
+ }
+ }
+ return OK;
+}
+
+status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) {
+ ALOGV("setRepeatPreviousFrameDelayUs: delayUs=%lld", (long long)repeatAfterUs);
+
Mutex::Autolock autoLock(mMutex);
if (mExecuting || repeatAfterUs <= 0ll) {
return INVALID_OPERATION;
}
- mRepeatAfterUs = repeatAfterUs;
-
+ mFrameRepeatIntervalUs = repeatAfterUs;
return OK;
}
-status_t GraphicBufferSource::setMaxTimestampGapUs(int64_t maxGapUs) {
- Mutex::Autolock autoLock(mMutex);
-
- if (mExecuting || maxGapUs <= 0ll) {
- return INVALID_OPERATION;
- }
-
- mMaxTimestampGapUs = maxGapUs;
-
- return OK;
-}
-
-status_t GraphicBufferSource::setInputBufferTimeOffset(int64_t timeOffsetUs) {
+status_t GraphicBufferSource::setTimeOffsetUs(int64_t timeOffsetUs) {
Mutex::Autolock autoLock(mMutex);
// timeOffsetUs must be negative for adjustment.
@@ -1079,6 +1161,8 @@
}
status_t GraphicBufferSource::setMaxFps(float maxFps) {
+ ALOGV("setMaxFps: maxFps=%lld", (long long)maxFps);
+
Mutex::Autolock autoLock(mMutex);
if (mExecuting) {
@@ -1095,34 +1179,101 @@
return OK;
}
-void GraphicBufferSource::setSkipFramesBeforeUs(int64_t skipFramesBeforeUs) {
+status_t GraphicBufferSource::setStartTimeUs(int64_t skipFramesBeforeUs) {
+ ALOGV("setStartTimeUs: skipFramesBeforeUs=%lld", (long long)skipFramesBeforeUs);
+
Mutex::Autolock autoLock(mMutex);
mSkipFramesBeforeNs =
(skipFramesBeforeUs > 0) ? (skipFramesBeforeUs * 1000) : -1ll;
-}
-
-status_t GraphicBufferSource::setTimeLapseConfig(const TimeLapseConfig &config) {
- Mutex::Autolock autoLock(mMutex);
-
- if (mExecuting || config.mTimePerFrameUs <= 0ll || config.mTimePerCaptureUs <= 0ll) {
- return INVALID_OPERATION;
- }
-
- mTimePerFrameUs = config.mTimePerFrameUs;
- mTimePerCaptureUs = config.mTimePerCaptureUs;
return OK;
}
-void GraphicBufferSource::setColorAspects(const ColorAspects &aspects) {
+status_t GraphicBufferSource::setStopTimeUs(int64_t stopTimeUs) {
+ ALOGV("setStopTimeUs: %lld us", (long long)stopTimeUs);
Mutex::Autolock autoLock(mMutex);
- mColorAspects = aspects;
+
+ if (mStopTimeUs != -1) {
+ // Ignore if stop time has already been set
+ return OK;
+ }
+
+ // stopTimeUs must be smaller or equal to current systemTime.
+ int64_t currentSystemTimeUs = systemTime() / 1000;
+ if (stopTimeUs > currentSystemTimeUs) {
+ ALOGE("setStopTimeUs failed. %lld is larger than current system time %lld us",
+ (long long)stopTimeUs, (long long)currentSystemTimeUs);
+ return INVALID_OPERATION;
+ }
+ if (mLastActionTimeUs != -1 && stopTimeUs < mLastActionTimeUs) {
+ ALOGE("setSuspend failed. %lld is smaller than last action time %lld us",
+ (long long)stopTimeUs, (long long)mLastActionTimeUs);
+ return INVALID_OPERATION;
+ }
+ mLastActionTimeUs = stopTimeUs;
+ ActionItem action;
+ action.mAction = ActionItem::STOP;
+ action.mActionTimeUs = stopTimeUs;
+ mActionQueue.push_back(action);
+ mStopTimeUs = stopTimeUs;
+ return OK;
+}
+
+status_t GraphicBufferSource::setTimeLapseConfig(double fps, double captureFps) {
+ ALOGV("setTimeLapseConfig: fps=%lg, captureFps=%lg",
+ fps, captureFps);
+
+ Mutex::Autolock autoLock(mMutex);
+
+ if (mExecuting || !(fps > 0) || !(captureFps > 0)) {
+ return INVALID_OPERATION;
+ }
+
+ mFps = fps;
+ mCaptureFps = captureFps;
+
+ return OK;
+}
+
+status_t GraphicBufferSource::setColorAspects(int32_t aspectsPacked) {
+ Mutex::Autolock autoLock(mMutex);
+ mDefaultColorAspectsPacked = aspectsPacked;
+ ColorAspects colorAspects = ColorUtils::unpackToColorAspects(aspectsPacked);
ALOGD("requesting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s))",
- aspects.mRange, asString(aspects.mRange),
- aspects.mPrimaries, asString(aspects.mPrimaries),
- aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
- aspects.mTransfer, asString(aspects.mTransfer));
+ colorAspects.mRange, asString(colorAspects.mRange),
+ colorAspects.mPrimaries, asString(colorAspects.mPrimaries),
+ colorAspects.mMatrixCoeffs, asString(colorAspects.mMatrixCoeffs),
+ colorAspects.mTransfer, asString(colorAspects.mTransfer));
+
+ return OK;
+}
+
+status_t GraphicBufferSource::signalEndOfInputStream() {
+ Mutex::Autolock autoLock(mMutex);
+ ALOGV("signalEndOfInputStream: executing=%d available=%zu+%d eos=%d",
+ mExecuting, mAvailableBuffers.size(), mNumAvailableUnacquiredBuffers, mEndOfStream);
+
+ if (mEndOfStream) {
+ ALOGE("EOS was already signaled");
+ return INVALID_OPERATION;
+ }
+
+ // Set the end-of-stream flag. If no frames are pending from the
+ // BufferQueue, and a codec buffer is available, and we're executing,
+ // and there is no stop timestamp, we initiate the EOS from here.
+ // Otherwise, we'll let codecBufferEmptied() (or omxExecuting) do it.
+ //
+ // Note: if there are no pending frames and all codec buffers are
+ // available, we *must* submit the EOS from here or we'll just
+ // stall since no future events are expected.
+ mEndOfStream = true;
+
+ if (mStopTimeUs == -1 && mExecuting && !haveAvailableBuffers_l()) {
+ submitEndOfInputStream_l();
+ }
+
+ return OK;
}
void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
@@ -1139,17 +1290,16 @@
break;
}
- if (!mExecuting || mNumFramesAvailable > 0) {
+ if (!mExecuting || haveAvailableBuffers_l()) {
break;
}
bool success = repeatLatestBuffer_l();
-
if (success) {
ALOGV("repeatLatestBuffer_l SUCCESS");
} else {
ALOGV("repeatLatestBuffer_l FAILURE");
- mRepeatBufferDeferred = true;
+ mFrameRepeatBlockedOnCodecBuffer = true;
}
break;
}
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index aa4ceb3..3df1aa1 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -22,19 +22,27 @@
#include <gui/BufferQueue.h>
#include <utils/RefBase.h>
-#include <OMX_Core.h>
#include <VideoAPI.h>
-#include "../include/OMXNodeInstance.h"
+#include <media/IOMX.h>
+#include <media/OMXFenceParcelable.h>
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AHandlerReflector.h>
#include <media/stagefright/foundation/ALooper.h>
+#include <android/BnGraphicBufferSource.h>
+#include <android/BnOMXBufferSource.h>
+
+#include "IOmxNodeWrapper.h"
+
namespace android {
+using ::android::binder::Status;
+
struct FrameDropper;
/*
- * This class is used to feed OMX codecs from a Surface via BufferQueue.
+ * This class is used to feed OMX codecs from a Surface via BufferQueue or
+ * HW producer.
*
* Instances of the class don't run on a dedicated thread. Instead,
* various events trigger data movement:
@@ -48,17 +56,26 @@
* Frames of data (and, perhaps, the end-of-stream indication) can arrive
* before the codec is in the "executing" state, so we need to queue
* things up until we're ready to go.
+ *
+ * The GraphicBufferSource can be configure dynamically to discard frames
+ * from the source:
+ *
+ * - if their timestamp is less than a start time
+ * - if the source is suspended or stopped and the suspend/stop-time is reached
+ * - if EOS was signaled
+ * - if there is no encoder connected to it
+ *
+ * The source, furthermore, may choose to not encode (drop) frames if:
+ *
+ * - to throttle the frame rate (keep it under a certain limit)
+ *
+ * Finally the source may optionally hold onto the last non-discarded frame
+ * (even if it was dropped) to reencode it after an interval if no further
+ * frames are sent by the producer.
*/
class GraphicBufferSource : public BufferQueue::ConsumerListener {
public:
- GraphicBufferSource(
- OMXNodeInstance* nodeInstance,
- uint32_t bufferWidth,
- uint32_t bufferHeight,
- uint32_t bufferCount,
- uint32_t consumerUsage,
- const sp<IGraphicBufferConsumer> &consumer = NULL
- );
+ GraphicBufferSource();
virtual ~GraphicBufferSource();
@@ -74,44 +91,57 @@
return mProducer;
}
- // Sets the default buffer data space
- void setDefaultDataSpace(android_dataspace dataSpace);
+ // OmxBufferSource interface
+ // ------------------------------
// This is called when OMX transitions to OMX_StateExecuting, which means
// we can start handing it buffers. If we already have buffers of data
// sitting in the BufferQueue, this will send them to the codec.
- void omxExecuting();
+ Status onOmxExecuting();
// This is called when OMX transitions to OMX_StateIdle, indicating that
// the codec is meant to return all buffers back to the client for them
// to be freed. Do NOT submit any more buffers to the component.
- void omxIdle();
+ Status onOmxIdle();
// This is called when OMX transitions to OMX_StateLoaded, indicating that
// we are shutting down.
- void omxLoaded();
+ Status onOmxLoaded();
// A "codec buffer", i.e. a buffer that can be used to pass data into
// the encoder, has been allocated. (This call does not call back into
// OMXNodeInstance.)
- void addCodecBuffer(OMX_BUFFERHEADERTYPE* header);
+ Status onInputBufferAdded(int32_t bufferId);
// Called from OnEmptyBufferDone. If we have a BQ buffer available,
// fill it with a new frame of data; otherwise, just mark it as available.
- void codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd);
+ Status onInputBufferEmptied(int32_t bufferId, int fenceFd);
- // Called when omx_message::FILL_BUFFER_DONE is received. (Currently the
- // buffer source will fix timestamp in the header if needed.)
- void codecBufferFilled(OMX_BUFFERHEADERTYPE* header);
+ // IGraphicBufferSource interface
+ // ------------------------------
- // This is called after the last input frame has been submitted. We
- // need to submit an empty buffer with the EOS flag set. If we don't
- // have a codec buffer ready, we just set the mEndOfStream flag.
+ // Configure the buffer source to be used with an OMX node with the default
+ // data space.
+ status_t configure(
+ const sp<IOmxNodeWrapper> &omxNode,
+ int32_t dataSpace,
+ int32_t bufferCount,
+ uint32_t frameWidth,
+ uint32_t frameHeight,
+ uint32_t consumerUsage);
+
+ // This is called after the last input frame has been submitted or buffer
+ // timestamp is greater or equal than stopTimeUs. We need to submit an empty
+ // buffer with the EOS flag set. If we don't have a codec buffer ready,
+ // we just set the mEndOfStream flag.
status_t signalEndOfInputStream();
// If suspend is true, all incoming buffers (including those currently
- // in the BufferQueue) will be discarded until the suspension is lifted.
- void suspend(bool suspend);
+ // in the BufferQueue) with timestamp larger than timeUs will be discarded
+ // until the suspension is lifted. If suspend is false, all incoming buffers
+ // including those currently in the BufferQueue) with timestamp larger than
+ // timeUs will be processed. timeUs uses SYSTEM_TIME_MONOTONIC time base.
+ status_t setSuspend(bool suspend, int64_t timeUs);
// Specifies the interval after which we requeue the buffer previously
// queued to the encoder. This is useful in the case of surface flinger
@@ -122,198 +152,232 @@
// state and once this behaviour is specified it cannot be reset.
status_t setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs);
- // When set, the timestamp fed to the encoder will be modified such that
- // the gap between two adjacent frames is capped at maxGapUs. Timestamp
- // will be restored to the original when the encoded frame is returned to
- // the client.
- // This is to solve a problem in certain real-time streaming case, where
- // encoder's rate control logic produces huge frames after a long period
- // of suspension on input.
- status_t setMaxTimestampGapUs(int64_t maxGapUs);
-
// Sets the input buffer timestamp offset.
// When set, the sample's timestamp will be adjusted with the timeOffsetUs.
- status_t setInputBufferTimeOffset(int64_t timeOffsetUs);
+ status_t setTimeOffsetUs(int64_t timeOffsetUs);
// When set, the max frame rate fed to the encoder will be capped at maxFps.
status_t setMaxFps(float maxFps);
- struct TimeLapseConfig {
- int64_t mTimePerFrameUs; // the time (us) between two frames for playback
- int64_t mTimePerCaptureUs; // the time (us) between two frames for capture
- };
-
// Sets the time lapse (or slow motion) parameters.
// When set, the sample's timestamp will be modified to playback framerate,
// and capture timestamp will be modified to capture rate.
- status_t setTimeLapseConfig(const TimeLapseConfig &config);
+ status_t setTimeLapseConfig(double fps, double captureFps);
// Sets the start time us (in system time), samples before which should
// be dropped and not submitted to encoder
- void setSkipFramesBeforeUs(int64_t startTimeUs);
+ status_t setStartTimeUs(int64_t startTimeUs);
+
+ // Sets the stop time us (in system time), samples after which should be dropped
+ // and not submitted to encoder. timeUs uses SYSTEM_TIME_MONOTONIC time base.
+ status_t setStopTimeUs(int64_t stopTimeUs);
// Sets the desired color aspects, e.g. to be used when producer does not specify a dataspace.
- void setColorAspects(const ColorAspects &aspects);
+ status_t setColorAspects(int32_t aspectsPacked);
protected:
+ // BQ::ConsumerListener interface
+ // ------------------------------
+
// BufferQueue::ConsumerListener interface, called when a new frame of
// data is available. If we're executing and a codec buffer is
// available, we acquire the buffer, copy the GraphicBuffer reference
// into the codec buffer, and call Empty[This]Buffer. If we're not yet
// executing or there's no codec buffer available, we just increment
// mNumFramesAvailable and return.
- virtual void onFrameAvailable(const BufferItem& item);
+ void onFrameAvailable(const BufferItem& item) override;
// BufferQueue::ConsumerListener interface, called when the client has
// released one or more GraphicBuffers. We clear out the appropriate
// set of mBufferSlot entries.
- virtual void onBuffersReleased();
+ void onBuffersReleased() override;
// BufferQueue::ConsumerListener interface, called when the client has
// changed the sideband stream. GraphicBufferSource doesn't handle sideband
// streams so this is a no-op (and should never be called).
- virtual void onSidebandStreamChanged();
+ void onSidebandStreamChanged() override;
private:
- // PersistentProxyListener is similar to BufferQueue::ProxyConsumerListener
- // except that it returns (acquire/detach/re-attache/release) buffers
- // in onFrameAvailable() if the actual consumer object is no longer valid.
- //
- // This class is used in persistent input surface case to prevent buffer
- // loss when onFrameAvailable() is received while we don't have a valid
- // consumer around.
- class PersistentProxyListener : public BnConsumerListener {
- public:
- PersistentProxyListener(
- const wp<IGraphicBufferConsumer> &consumer,
- const wp<ConsumerListener>& consumerListener);
- virtual ~PersistentProxyListener();
- virtual void onFrameAvailable(const BufferItem& item) override;
- virtual void onFrameReplaced(const BufferItem& item) override;
- virtual void onBuffersReleased() override;
- virtual void onSidebandStreamChanged() override;
- private:
- // mConsumerListener is a weak reference to the IConsumerListener.
- wp<ConsumerListener> mConsumerListener;
- // mConsumer is a weak reference to the IGraphicBufferConsumer, use
- // a weak ref to avoid circular ref between mConsumer and this class
- wp<IGraphicBufferConsumer> mConsumer;
- };
-
- // Keep track of codec input buffers. They may either be available
- // (mGraphicBuffer == NULL) or in use by the codec.
- struct CodecBuffer {
- OMX_BUFFERHEADERTYPE* mHeader;
-
- // buffer producer's frame-number for buffer
- uint64_t mFrameNumber;
-
- // buffer producer's buffer slot for buffer
- int mSlot;
-
- sp<GraphicBuffer> mGraphicBuffer;
- };
-
- // Returns the index of an available codec buffer. If none are
- // available, returns -1. Mutex must be held by caller.
- int findAvailableCodecBuffer_l();
-
- // Returns true if a codec buffer is available.
- bool isCodecBufferAvailable_l() {
- return findAvailableCodecBuffer_l() >= 0;
- }
-
- // Finds the mCodecBuffers entry that matches. Returns -1 if not found.
- int findMatchingCodecBuffer_l(const OMX_BUFFERHEADERTYPE* header);
-
- // Fills a codec buffer with a frame from the BufferQueue. This must
- // only be called when we know that a frame of data is ready (i.e. we're
- // in the onFrameAvailable callback, or if we're in codecBufferEmptied
- // and mNumFramesAvailable is nonzero). Returns without doing anything if
- // we don't have a codec buffer available.
- //
- // Returns true if we successfully filled a codec buffer with a BQ buffer.
- bool fillCodecBuffer_l();
-
- // Marks the mCodecBuffers entry as in-use, copies the GraphicBuffer
- // reference into the codec buffer, and submits the data to the codec.
- status_t submitBuffer_l(const BufferItem &item, int cbi);
-
- // Submits an empty buffer, with the EOS flag set. Returns without
- // doing anything if we don't have a codec buffer available.
- void submitEndOfInputStream_l();
-
- // Release buffer to the consumer
- void releaseBuffer(
- int &id, uint64_t frameNum,
- const sp<GraphicBuffer> buffer, const sp<Fence> &fence);
-
- void setLatestBuffer_l(const BufferItem &item, bool dropped);
- bool repeatLatestBuffer_l();
- int64_t getTimestamp(const BufferItem &item);
-
- // called when the data space of the input buffer changes
- void onDataSpaceChanged_l(android_dataspace dataSpace, android_pixel_format pixelFormat);
-
// Lock, covers all member variables.
mutable Mutex mMutex;
// Used to report constructor failure.
status_t mInitCheck;
- // Pointer back to the object that contains us. We send buffers here.
- OMXNodeInstance* mNodeInstance;
+ // Graphic buffer reference objects
+ // --------------------------------
+
+ // These are used to keep a shared reference to GraphicBuffers and gralloc handles owned by the
+ // GraphicBufferSource as well as to manage the cache slots. Separate references are owned by
+ // the buffer cache (controlled by the buffer queue/buffer producer) and the codec.
+
+ // When we get a buffer from the producer (BQ) it designates them to be cached into specific
+ // slots. Each slot owns a shared reference to the graphic buffer (we track these using
+ // CachedBuffer) that is in that slot, but the producer controls the slots.
+ struct CachedBuffer;
+
+ // When we acquire a buffer, we must release it back to the producer once we (or the codec)
+ // no longer uses it (as long as the buffer is still in the cache slot). We use shared
+ // AcquiredBuffer instances for this purpose - and we call release buffer when the last
+ // reference is relinquished.
+ struct AcquiredBuffer;
+
+ // We also need to keep some extra metadata (other than the buffer reference) for acquired
+ // buffers. These are tracked in VideoBuffer struct.
+ struct VideoBuffer {
+ std::shared_ptr<AcquiredBuffer> mBuffer;
+ nsecs_t mTimestampNs;
+ android_dataspace_t mDataspace;
+ };
+
+ // Cached and aquired buffers
+ // --------------------------------
+
+ typedef int slot_id;
+
+ // Maps a slot to the cached buffer in that slot
+ KeyedVector<slot_id, std::shared_ptr<CachedBuffer>> mBufferSlots;
+
+ // Queue of buffers acquired in chronological order that are not yet submitted to the codec
+ List<VideoBuffer> mAvailableBuffers;
+
+ // Number of buffers that have been signaled by the producer that they are available, but
+ // we've been unable to acquire them due to our max acquire count
+ int32_t mNumAvailableUnacquiredBuffers;
+
+ // Number of frames acquired from consumer (debug only)
+ // (as in aquireBuffer called, and release needs to be called)
+ int32_t mNumOutstandingAcquires;
+
+ // Acquire a buffer from the BQ and store it in |item| if successful
+ // \return OK on success, or error on failure.
+ status_t acquireBuffer_l(VideoBuffer *item);
+
+ // Called when a buffer was acquired from the producer
+ void onBufferAcquired_l(const VideoBuffer &buffer);
+
+ // marks the buffer at the slot no longer cached, and accounts for the outstanding
+ // acquire count
+ void discardBufferInSlot_l(slot_id i);
+
+ // marks the buffer at the slot index no longer cached, and accounts for the outstanding
+ // acquire count
+ void discardBufferAtSlotIndex_l(ssize_t bsi);
+
+ // release all acquired and unacquired available buffers
+ // This method will return if it fails to acquire an unacquired available buffer, which will
+ // leave mNumAvailableUnacquiredBuffers positive on return.
+ void releaseAllAvailableBuffers_l();
+
+ // returns whether we have any available buffers (acquired or not-yet-acquired)
+ bool haveAvailableBuffers_l() const {
+ return !mAvailableBuffers.empty() || mNumAvailableUnacquiredBuffers > 0;
+ }
+
+ // Codec buffers
+ // -------------
+
+ // When we queue buffers to the encoder, we must hold the references to the graphic buffers
+ // in those buffers - as the producer may free the slots.
+
+ typedef int32_t codec_buffer_id;
+
+ // set of codec buffer ID-s of buffers available to fill
+ List<codec_buffer_id> mFreeCodecBuffers;
+
+ // maps codec buffer ID-s to buffer info submitted to the codec. Used to keep a reference for
+ // the graphics buffer.
+ KeyedVector<codec_buffer_id, std::shared_ptr<AcquiredBuffer>> mSubmittedCodecBuffers;
+
+ // Processes the next acquired frame. If there is no available codec buffer, it returns false
+ // without any further action.
+ //
+ // Otherwise, it consumes the next acquired frame and determines if it needs to be discarded or
+ // dropped. If neither are needed, it submits it to the codec. It also saves the latest
+ // non-dropped frame and submits it for repeat encoding (if this is enabled).
+ //
+ // \require there must be an acquired frame (i.e. we're in the onFrameAvailable callback,
+ // or if we're in codecBufferEmptied and mNumFramesAvailable is nonzero).
+ // \require codec must be executing
+ // \returns true if acquired (and handled) the next frame. Otherwise, false.
+ bool fillCodecBuffer_l();
+
+ // Calculates the media timestamp for |item| and on success it submits the buffer to the codec,
+ // while also keeping a reference for it in mSubmittedCodecBuffers.
+ // Returns UNKNOWN_ERROR if the buffer was not submitted due to buffer timestamp. Otherwise,
+ // it returns any submit success or error value returned by the codec.
+ status_t submitBuffer_l(const VideoBuffer &item);
+
+ // Submits an empty buffer, with the EOS flag set if there is an available codec buffer and
+ // sets mEndOfStreamSent flag. Does nothing if there is no codec buffer available.
+ void submitEndOfInputStream_l();
+
+ // Set to true if we want to send end-of-stream after we run out of available frames from the
+ // producer
+ bool mEndOfStream;
+
+ // Flag that the EOS was submitted to the encoder
+ bool mEndOfStreamSent;
+
+ // Dataspace for the last frame submitted to the codec
+ android_dataspace mLastDataspace;
+
+ // Default color aspects for this source
+ int32_t mDefaultColorAspectsPacked;
+
+ // called when the data space of the input buffer changes
+ void onDataspaceChanged_l(android_dataspace dataspace, android_pixel_format pixelFormat);
+
+ // Pointer back to the Omx node that created us. We send buffers here.
+ sp<IOmxNodeWrapper> mOMXNode;
// Set by omxExecuting() / omxIdling().
bool mExecuting;
bool mSuspended;
- // Last dataspace seen
- android_dataspace mLastDataSpace;
+ // returns true if this source is unconditionally discarding acquired buffers at the moment
+ // regardless of the metadata of those buffers
+ bool areWeDiscardingAvailableBuffers_l();
// Our BufferQueue interfaces. mProducer is passed to the producer through
// getIGraphicBufferProducer, and mConsumer is used internally to retrieve
// the buffers queued by the producer.
- bool mIsPersistent;
sp<IGraphicBufferProducer> mProducer;
sp<IGraphicBufferConsumer> mConsumer;
- // Number of frames pending in BufferQueue that haven't yet been
- // forwarded to the codec.
- size_t mNumFramesAvailable;
+ // The time to stop sending buffers.
+ int64_t mStopTimeUs;
- // Number of frames acquired from consumer (debug only)
- int32_t mNumBufferAcquired;
+ struct ActionItem {
+ typedef enum {
+ PAUSE,
+ RESUME,
+ STOP
+ } ActionType;
+ ActionType mAction;
+ int64_t mActionTimeUs;
+ };
- // Set to true if we want to send end-of-stream after we run out of
- // frames in BufferQueue.
- bool mEndOfStream;
- bool mEndOfStreamSent;
+ // Maintain last action timestamp to ensure all the action timestamps are
+ // monotonically increasing.
+ int64_t mLastActionTimeUs;
- // Cache of GraphicBuffers from the buffer queue. When the codec
- // is done processing a GraphicBuffer, we can use this to map back
- // to a slot number.
- sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
- int32_t mBufferUseCount[BufferQueue::NUM_BUFFER_SLOTS];
-
- // Tracks codec buffers.
- Vector<CodecBuffer> mCodecBuffers;
+ // An action queue that queue up all the actions sent to GraphicBufferSource.
+ // STOP action should only show up at the end of the list as all the actions
+ // after a STOP action will be discarded. mActionQueue is protected by mMutex.
+ List<ActionItem> mActionQueue;
////
friend struct AHandlerReflector<GraphicBufferSource>;
enum {
- kWhatRepeatLastFrame,
+ kWhatRepeatLastFrame, ///< queue last frame for reencoding
};
enum {
kRepeatLastFrameCount = 10,
};
- KeyedVector<int64_t, int64_t> mOriginalTimeUs;
- int64_t mMaxTimestampGapUs;
- int64_t mPrevOriginalTimeUs;
- int64_t mPrevModifiedTimeUs;
int64_t mSkipFramesBeforeNs;
sp<FrameDropper> mFrameDropper;
@@ -321,29 +385,86 @@
sp<ALooper> mLooper;
sp<AHandlerReflector<GraphicBufferSource> > mReflector;
- int64_t mRepeatAfterUs;
- int32_t mRepeatLastFrameGeneration;
- int64_t mRepeatLastFrameTimestamp;
- int32_t mRepeatLastFrameCount;
+ // Repeat last frame feature
+ // -------------------------
+ // configuration parameter: repeat interval for frame repeating (<0 if repeating is disabled)
+ int64_t mFrameRepeatIntervalUs;
- int mLatestBufferId;
- uint64_t mLatestBufferFrameNum;
- sp<Fence> mLatestBufferFence;
+ // current frame repeat generation - used to cancel a pending frame repeat
+ int32_t mRepeatLastFrameGeneration;
+
+ // number of times to repeat latest frame (0 = none)
+ int32_t mOutstandingFrameRepeatCount;
// The previous buffer should've been repeated but
// no codec buffer was available at the time.
- bool mRepeatBufferDeferred;
+ bool mFrameRepeatBlockedOnCodecBuffer;
+
+ // hold a reference to the last acquired (and not discarded) frame for frame repeating
+ VideoBuffer mLatestBuffer;
+
+ // queue last frame for reencode after the repeat interval.
+ void queueFrameRepeat_l();
+
+ // save |item| as the latest buffer and queue it for reencode (repeat)
+ void setLatestBuffer_l(const VideoBuffer &item);
+
+ // submit last frame to encoder and queue it for reencode
+ // \return true if buffer was submitted, false if it wasn't (e.g. source is suspended, there
+ // is no available codec buffer)
+ bool repeatLatestBuffer_l();
// Time lapse / slow motion configuration
- int64_t mTimePerCaptureUs;
- int64_t mTimePerFrameUs;
+ // --------------------------------------
+
+ // desired frame rate for encoding - value <= 0 if undefined
+ double mFps;
+
+ // desired frame rate for capture - value <= 0 if undefined
+ double mCaptureFps;
+
+ // Time lapse mode is enabled if the capture frame rate is defined and it is
+ // smaller than half the encoding frame rate (if defined). In this mode,
+ // frames that come in between the capture interval (the reciprocal of the
+ // capture frame rate) are dropped and the encoding timestamp is adjusted to
+ // match the desired encoding frame rate.
+ //
+ // Slow motion mode is enabled if both encoding and capture frame rates are
+ // defined and the encoding frame rate is less than half the capture frame
+ // rate. In this mode, the source is expected to produce frames with an even
+ // timestamp interval (after rounding) with the configured capture fps. The
+ // first source timestamp is used as the source base time. Afterwards, the
+ // timestamp of each source frame is snapped to the nearest expected capture
+ // timestamp and scaled to match the configured encoding frame rate.
+
+ // These modes must be enabled before using this source.
+
+ // adjusted capture timestamp of the base frame
+ int64_t mBaseCaptureUs;
+
+ // adjusted encoding timestamp of the base frame
+ int64_t mBaseFrameUs;
+
+ // number of frames from the base time
+ int64_t mFrameCount;
+
+ // adjusted capture timestamp for previous frame (negative if there were
+ // none)
int64_t mPrevCaptureUs;
+
+ // adjusted media timestamp for previous frame (negative if there were none)
int64_t mPrevFrameUs;
+ // desired offset between media time and capture time
int64_t mInputBufferTimeOffsetUs;
- MetadataBufferType mMetadataBufferType;
- ColorAspects mColorAspects;
+ // Calculates and outputs the timestamp to use for a buffer with a specific buffer timestamp
+ // |bufferTimestampNs|. Returns false on failure (buffer too close or timestamp is moving
+ // backwards). Otherwise, stores the media timestamp in |*codecTimeUs| and returns true.
+ //
+ // This method takes into account the start time offset and any time lapse or slow motion time
+ // adjustment requests.
+ bool calculateCodecTimestamp_l(nsecs_t bufferTimeNs, int64_t *codecTimeUs);
void onMessageReceived(const sp<AMessage> &msg);
diff --git a/media/libstagefright/omx/IOmxNodeWrapper.h b/media/libstagefright/omx/IOmxNodeWrapper.h
new file mode 100644
index 0000000..cd44e67
--- /dev/null
+++ b/media/libstagefright/omx/IOmxNodeWrapper.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef IOMX_NODE_WRAPPER_SOURCE_H_
+#define IOMX_NODE_WRAPPER_SOURCE_H_
+
+#include <utils/RefBase.h>
+#include <utils/StrongPointer.h>
+#include <ui/GraphicBuffer.h>
+
+#include <stdint.h>
+
+namespace android {
+
+struct IOmxNodeWrapper : public RefBase {
+ virtual status_t emptyBuffer(
+ int32_t bufferId, uint32_t flags,
+ const sp<GraphicBuffer> &buffer = nullptr,
+ int64_t timestamp = 0, int fenceFd = -1) = 0;
+ virtual void dispatchDataSpaceChanged(
+ int32_t dataSpace, int32_t aspects, int32_t pixelFormat) = 0;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_MEDIA_OMX_V1_0_WGRAPHICBUFFERSOURCE_H
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index f7058d7..8c1141d 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -26,149 +26,18 @@
#include "../include/OMXNodeInstance.h"
-#include <binder/IMemory.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <utils/threads.h>
+#include "BWGraphicBufferSource.h"
#include "OMXMaster.h"
#include "OMXUtils.h"
-#include <OMX_AsString.h>
-#include <OMX_Component.h>
-#include <OMX_VideoExt.h>
-
namespace android {
// node ids are created by concatenating the pid with a 16-bit counter
static size_t kMaxNodeInstances = (1 << 16);
-////////////////////////////////////////////////////////////////////////////////
-
-// This provides the underlying Thread used by CallbackDispatcher.
-// Note that deriving CallbackDispatcher from Thread does not work.
-
-struct OMX::CallbackDispatcherThread : public Thread {
- explicit CallbackDispatcherThread(CallbackDispatcher *dispatcher)
- : mDispatcher(dispatcher) {
- }
-
-private:
- CallbackDispatcher *mDispatcher;
-
- bool threadLoop();
-
- CallbackDispatcherThread(const CallbackDispatcherThread &);
- CallbackDispatcherThread &operator=(const CallbackDispatcherThread &);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-struct OMX::CallbackDispatcher : public RefBase {
- explicit CallbackDispatcher(OMXNodeInstance *owner);
-
- // Posts |msg| to the listener's queue. If |realTime| is true, the listener thread is notified
- // that a new message is available on the queue. Otherwise, the message stays on the queue, but
- // the listener is not notified of it. It will process this message when a subsequent message
- // is posted with |realTime| set to true.
- void post(const omx_message &msg, bool realTime = true);
-
- bool loop();
-
-protected:
- virtual ~CallbackDispatcher();
-
-private:
- Mutex mLock;
-
- OMXNodeInstance *mOwner;
- bool mDone;
- Condition mQueueChanged;
- std::list<omx_message> mQueue;
-
- sp<CallbackDispatcherThread> mThread;
-
- void dispatch(std::list<omx_message> &messages);
-
- CallbackDispatcher(const CallbackDispatcher &);
- CallbackDispatcher &operator=(const CallbackDispatcher &);
-};
-
-OMX::CallbackDispatcher::CallbackDispatcher(OMXNodeInstance *owner)
- : mOwner(owner),
- mDone(false) {
- mThread = new CallbackDispatcherThread(this);
- mThread->run("OMXCallbackDisp", ANDROID_PRIORITY_FOREGROUND);
-}
-
-OMX::CallbackDispatcher::~CallbackDispatcher() {
- {
- Mutex::Autolock autoLock(mLock);
-
- mDone = true;
- mQueueChanged.signal();
- }
-
- // A join on self can happen if the last ref to CallbackDispatcher
- // is released within the CallbackDispatcherThread loop
- status_t status = mThread->join();
- if (status != WOULD_BLOCK) {
- // Other than join to self, the only other error return codes are
- // whatever readyToRun() returns, and we don't override that
- CHECK_EQ(status, (status_t)NO_ERROR);
- }
-}
-
-void OMX::CallbackDispatcher::post(const omx_message &msg, bool realTime) {
- Mutex::Autolock autoLock(mLock);
-
- mQueue.push_back(msg);
- if (realTime) {
- mQueueChanged.signal();
- }
-}
-
-void OMX::CallbackDispatcher::dispatch(std::list<omx_message> &messages) {
- if (mOwner == NULL) {
- ALOGV("Would have dispatched a message to a node that's already gone.");
- return;
- }
- mOwner->onMessages(messages);
-}
-
-bool OMX::CallbackDispatcher::loop() {
- for (;;) {
- std::list<omx_message> messages;
-
- {
- Mutex::Autolock autoLock(mLock);
- while (!mDone && mQueue.empty()) {
- mQueueChanged.wait(mLock);
- }
-
- if (mDone) {
- break;
- }
-
- messages.swap(mQueue);
- }
-
- dispatch(messages);
- }
-
- return false;
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-bool OMX::CallbackDispatcherThread::threadLoop() {
- return mDispatcher->loop();
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-OMX::OMX()
- : mMaster(new OMXMaster),
- mNodeCounter(0) {
+OMX::OMX() : mMaster(new OMXMaster), mParser() {
}
OMX::~OMX() {
@@ -177,7 +46,7 @@
}
void OMX::binderDied(const wp<IBinder> &the_late_who) {
- OMXNodeInstance *instance;
+ sp<OMXNodeInstance> instance;
{
Mutex::Autolock autoLock(mLock);
@@ -192,24 +61,9 @@
instance = mLiveNodes.editValueAt(index);
mLiveNodes.removeItemsAt(index);
-
- index = mDispatchers.indexOfKey(instance->nodeID());
- CHECK(index >= 0);
- mDispatchers.removeItemsAt(index);
-
- invalidateNodeID_l(instance->nodeID());
}
- instance->onObserverDied(mMaster);
-}
-
-bool OMX::isSecure(node_id node) {
- OMXNodeInstance *instance = findInstance(node);
- return (instance == NULL ? false : instance->isSecure());
-}
-
-bool OMX::livesLocally(node_id /* node */, pid_t pid) {
- return pid == getpid();
+ instance->onObserverDied();
}
status_t OMX::listNodes(List<ComponentInfo> *list) {
@@ -242,48 +96,51 @@
status_t OMX::allocateNode(
const char *name, const sp<IOMXObserver> &observer,
- sp<IBinder> *nodeBinder, node_id *node) {
+ sp<IOMXNode> *omxNode) {
Mutex::Autolock autoLock(mLock);
- *node = 0;
- if (nodeBinder != NULL) {
- *nodeBinder = NULL;
- }
+ omxNode->clear();
- if (mNodeIDToInstance.size() == kMaxNodeInstances) {
- // all possible node IDs are in use
+ if (mLiveNodes.size() == kMaxNodeInstances) {
return NO_MEMORY;
}
- OMXNodeInstance *instance = new OMXNodeInstance(this, observer, name);
+ sp<OMXNodeInstance> instance = new OMXNodeInstance(this, observer, name);
OMX_COMPONENTTYPE *handle;
OMX_ERRORTYPE err = mMaster->makeComponentInstance(
name, &OMXNodeInstance::kCallbacks,
- instance, &handle);
+ instance.get(), &handle);
if (err != OMX_ErrorNone) {
ALOGE("FAILED to allocate omx component '%s' err=%s(%#x)", name, asString(err), err);
- instance->onGetHandleFailed();
-
return StatusFromOMXError(err);
}
-
- *node = makeNodeID_l(instance);
- mDispatchers.add(*node, new CallbackDispatcher(instance));
-
- instance->setHandle(*node, handle);
+ instance->setHandle(handle);
+ std::vector<AString> quirkVector;
+ if (mParser.getQuirks(name, &quirkVector) == OK) {
+ uint32_t quirks = 0;
+ for (const AString quirk : quirkVector) {
+ if (quirk == "requires-allocate-on-input-ports") {
+ quirks |= kRequiresAllocateBufferOnInputPorts;
+ }
+ if (quirk == "requires-allocate-on-output-ports") {
+ quirks |= kRequiresAllocateBufferOnOutputPorts;
+ }
+ }
+ instance->setQuirks(quirks);
+ }
mLiveNodes.add(IInterface::asBinder(observer), instance);
IInterface::asBinder(observer)->linkToDeath(this);
+ *omxNode = instance;
+
return OK;
}
-status_t OMX::freeNode(node_id node) {
- OMXNodeInstance *instance = findInstance(node);
-
+status_t OMX::freeNode(const sp<OMXNodeInstance> &instance) {
if (instance == NULL) {
return OK;
}
@@ -294,473 +151,44 @@
if (index < 0) {
// This could conceivably happen if the observer dies at roughly the
// same time that a client attempts to free the node explicitly.
- return OK;
+
+ // NOTE: it's guaranteed that this method is called at most once per
+ // instance.
+ ALOGV("freeNode: instance already removed from book-keeping.");
+ } else {
+ mLiveNodes.removeItemsAt(index);
+ IInterface::asBinder(instance->observer())->unlinkToDeath(this);
}
- mLiveNodes.removeItemsAt(index);
}
- IInterface::asBinder(instance->observer())->unlinkToDeath(this);
+ CHECK(instance->handle() != NULL);
+ OMX_ERRORTYPE err = mMaster->destroyComponentInstance(
+ static_cast<OMX_COMPONENTTYPE *>(instance->handle()));
+ ALOGV("freeNode: handle destroyed: %p", instance->handle());
- status_t err = instance->freeNode(mMaster);
-
- {
- Mutex::Autolock autoLock(mLock);
- ssize_t index = mDispatchers.indexOfKey(node);
- CHECK(index >= 0);
- mDispatchers.removeItemsAt(index);
- }
-
- return err;
-}
-
-status_t OMX::sendCommand(
- node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->sendCommand(cmd, param);
-}
-
-status_t OMX::getParameter(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size) {
- ALOGV("getParameter(%u %#x %p %zd)", node, index, params, size);
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->getParameter(
- index, params, size);
-}
-
-status_t OMX::setParameter(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size) {
- ALOGV("setParameter(%u %#x %p %zd)", node, index, params, size);
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->setParameter(
- index, params, size);
-}
-
-status_t OMX::getConfig(
- node_id node, OMX_INDEXTYPE index,
- void *params, size_t size) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->getConfig(
- index, params, size);
-}
-
-status_t OMX::setConfig(
- node_id node, OMX_INDEXTYPE index,
- const void *params, size_t size) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->setConfig(
- index, params, size);
-}
-
-status_t OMX::getState(
- node_id node, OMX_STATETYPE* state) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->getState(
- state);
-}
-
-status_t OMX::enableNativeBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL graphic, OMX_BOOL enable) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->enableNativeBuffers(port_index, graphic, enable);
-}
-
-status_t OMX::getGraphicBufferUsage(
- node_id node, OMX_U32 port_index, OMX_U32* usage) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->getGraphicBufferUsage(port_index, usage);
-}
-
-status_t OMX::storeMetaDataInBuffers(
- node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->storeMetaDataInBuffers(port_index, enable, type);
-}
-
-status_t OMX::prepareForAdaptivePlayback(
- node_id node, OMX_U32 portIndex, OMX_BOOL enable,
- OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->prepareForAdaptivePlayback(
- portIndex, enable, maxFrameWidth, maxFrameHeight);
-}
-
-status_t OMX::configureVideoTunnelMode(
- node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
- OMX_U32 audioHwSync, native_handle_t **sidebandHandle) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->configureVideoTunnelMode(
- portIndex, tunneled, audioHwSync, sidebandHandle);
-}
-
-status_t OMX::useBuffer(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->useBuffer(
- port_index, params, buffer, allottedSize);
-}
-
-status_t OMX::useGraphicBuffer(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->useGraphicBuffer(
- port_index, graphicBuffer, buffer);
-}
-
-status_t OMX::updateGraphicBufferInMeta(
- node_id node, OMX_U32 port_index,
- const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->updateGraphicBufferInMeta(
- port_index, graphicBuffer, buffer);
-}
-
-status_t OMX::updateNativeHandleInMeta(
- node_id node, OMX_U32 port_index,
- const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->updateNativeHandleInMeta(
- port_index, nativeHandle, buffer);
+ return StatusFromOMXError(err);
}
status_t OMX::createInputSurface(
- node_id node, OMX_U32 port_index, android_dataspace dataSpace,
- sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->createInputSurface(
- port_index, dataSpace, bufferProducer, type);
-}
-
-status_t OMX::createPersistentInputSurface(
sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferConsumer> *bufferConsumer) {
- return OMXNodeInstance::createPersistentInputSurface(
- bufferProducer, bufferConsumer);
-}
-
-status_t OMX::setInputSurface(
- node_id node, OMX_U32 port_index,
- const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
+ sp<IGraphicBufferSource> *bufferSource) {
+ if (bufferProducer == NULL || bufferSource == NULL) {
+ ALOGE("b/25884056");
+ return BAD_VALUE;
}
- return instance->setInputSurface(port_index, bufferConsumer, type);
-}
-
-
-status_t OMX::signalEndOfInputStream(node_id node) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
+ sp<GraphicBufferSource> graphicBufferSource = new GraphicBufferSource();
+ status_t err = graphicBufferSource->initCheck();
+ if (err != OK) {
+ ALOGE("Failed to create persistent input surface: %s (%d)",
+ strerror(-err), err);
+ return err;
}
- return instance->signalEndOfInputStream();
-}
+ *bufferProducer = graphicBufferSource->getIGraphicBufferProducer();
+ *bufferSource = new BWGraphicBufferSource(graphicBufferSource);
-status_t OMX::allocateSecureBuffer(
- node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->allocateSecureBuffer(
- port_index, size, buffer, buffer_data, native_handle);
-}
-
-status_t OMX::allocateBufferWithBackup(
- node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
- buffer_id *buffer, OMX_U32 allottedSize) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->allocateBufferWithBackup(
- port_index, params, buffer, allottedSize);
-}
-
-status_t OMX::freeBuffer(node_id node, OMX_U32 port_index, buffer_id buffer) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->freeBuffer(
- port_index, buffer);
-}
-
-status_t OMX::fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->fillBuffer(buffer, fenceFd);
-}
-
-status_t OMX::emptyBuffer(
- node_id node,
- buffer_id buffer,
- OMX_U32 range_offset, OMX_U32 range_length,
- OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->emptyBuffer(
- buffer, range_offset, range_length, flags, timestamp, fenceFd);
-}
-
-status_t OMX::getExtensionIndex(
- node_id node,
- const char *parameter_name,
- OMX_INDEXTYPE *index) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->getExtensionIndex(
- parameter_name, index);
-}
-
-status_t OMX::setInternalOption(
- node_id node,
- OMX_U32 port_index,
- InternalOptionType type,
- const void *data,
- size_t size) {
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return NAME_NOT_FOUND;
- }
-
- return instance->setInternalOption(port_index, type, data, size);
-}
-
-OMX_ERRORTYPE OMX::OnEvent(
- node_id node,
- OMX_IN OMX_EVENTTYPE eEvent,
- OMX_IN OMX_U32 nData1,
- OMX_IN OMX_U32 nData2,
- OMX_IN OMX_PTR pEventData) {
- ALOGV("OnEvent(%d, %" PRIu32", %" PRIu32 ")", eEvent, nData1, nData2);
- OMXNodeInstance *instance = findInstance(node);
-
- if (instance == NULL) {
- return OMX_ErrorComponentNotFound;
- }
-
- // Forward to OMXNodeInstance.
- instance->onEvent(eEvent, nData1, nData2);
-
- sp<OMX::CallbackDispatcher> dispatcher = findDispatcher(node);
-
- // output rendered events are not processed as regular events until they hit the observer
- if (eEvent == OMX_EventOutputRendered) {
- if (pEventData == NULL) {
- return OMX_ErrorBadParameter;
- }
-
- // process data from array
- OMX_VIDEO_RENDEREVENTTYPE *renderData = (OMX_VIDEO_RENDEREVENTTYPE *)pEventData;
- for (size_t i = 0; i < nData1; ++i) {
- omx_message msg;
- msg.type = omx_message::FRAME_RENDERED;
- msg.node = node;
- msg.fenceFd = -1;
- msg.u.render_data.timestamp = renderData[i].nMediaTimeUs;
- msg.u.render_data.nanoTime = renderData[i].nSystemTimeNs;
-
- dispatcher->post(msg, false /* realTime */);
- }
- return OMX_ErrorNone;
- }
-
- omx_message msg;
- msg.type = omx_message::EVENT;
- msg.node = node;
- msg.fenceFd = -1;
- msg.u.event_data.event = eEvent;
- msg.u.event_data.data1 = nData1;
- msg.u.event_data.data2 = nData2;
-
- dispatcher->post(msg, true /* realTime */);
-
- return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE OMX::OnEmptyBufferDone(
- node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd) {
- ALOGV("OnEmptyBufferDone buffer=%p", pBuffer);
-
- omx_message msg;
- msg.type = omx_message::EMPTY_BUFFER_DONE;
- msg.node = node;
- msg.fenceFd = fenceFd;
- msg.u.buffer_data.buffer = buffer;
-
- findDispatcher(node)->post(msg);
-
- return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE OMX::OnFillBufferDone(
- node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd) {
- ALOGV("OnFillBufferDone buffer=%p", pBuffer);
-
- omx_message msg;
- msg.type = omx_message::FILL_BUFFER_DONE;
- msg.node = node;
- msg.fenceFd = fenceFd;
- msg.u.extended_buffer_data.buffer = buffer;
- msg.u.extended_buffer_data.range_offset = pBuffer->nOffset;
- msg.u.extended_buffer_data.range_length = pBuffer->nFilledLen;
- msg.u.extended_buffer_data.flags = pBuffer->nFlags;
- msg.u.extended_buffer_data.timestamp = pBuffer->nTimeStamp;
-
- findDispatcher(node)->post(msg);
-
- return OMX_ErrorNone;
-}
-
-OMX::node_id OMX::makeNodeID_l(OMXNodeInstance *instance) {
- // mLock is already held.
-
- node_id prefix = node_id(getpid() << 16);
- node_id node = 0;
- do {
- if (++mNodeCounter >= kMaxNodeInstances) {
- mNodeCounter = 0; // OK to use because we're combining with the pid
- }
- node = node_id(prefix | mNodeCounter);
- } while (mNodeIDToInstance.indexOfKey(node) >= 0);
- mNodeIDToInstance.add(node, instance);
-
- return node;
-}
-
-OMXNodeInstance *OMX::findInstance(node_id node) {
- Mutex::Autolock autoLock(mLock);
-
- ssize_t index = mNodeIDToInstance.indexOfKey(node);
-
- return index < 0 ? NULL : mNodeIDToInstance.valueAt(index);
-}
-
-sp<OMX::CallbackDispatcher> OMX::findDispatcher(node_id node) {
- Mutex::Autolock autoLock(mLock);
-
- ssize_t index = mDispatchers.indexOfKey(node);
-
- return index < 0 ? NULL : mDispatchers.valueAt(index);
-}
-
-void OMX::invalidateNodeID(node_id node) {
- Mutex::Autolock autoLock(mLock);
- invalidateNodeID_l(node);
-}
-
-void OMX::invalidateNodeID_l(node_id node) {
- // mLock is held.
- mNodeIDToInstance.removeItem(node);
+ return OK;
}
} // namespace android
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index 6132a2c..ac9b0c3 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -32,26 +32,23 @@
OMXMaster::OMXMaster()
: mVendorLibHandle(NULL) {
- mProcessName[0] = 0;
- if (mProcessName[0] == 0) {
- pid_t pid = getpid();
- char filename[20];
- snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
- int fd = open(filename, O_RDONLY);
- if (fd < 0) {
- ALOGW("couldn't determine process name");
- sprintf(mProcessName, "<unknown>");
- } else {
- ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
- if (len < 2) {
- ALOGW("couldn't determine process name");
- sprintf(mProcessName, "<unknown>");
- } else {
- // the name is newline terminated, so erase the newline
- mProcessName[len - 1] = 0;
- }
- close(fd);
- }
+ pid_t pid = getpid();
+ char filename[20];
+ snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
+ int fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ ALOGW("couldn't determine process name");
+ strlcpy(mProcessName, "<unknown>", sizeof(mProcessName));
+ } else {
+ ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
+ if (len < 2) {
+ ALOGW("couldn't determine process name");
+ strlcpy(mProcessName, "<unknown>", sizeof(mProcessName));
+ } else {
+ // the name is newline terminated, so erase the newline
+ mProcessName[len - 1] = 0;
+ }
+ close(fd);
}
addVendorPlugin();
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 355a2dd..db99ef2 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -23,10 +23,11 @@
#include "../include/OMXNodeInstance.h"
#include "OMXMaster.h"
#include "OMXUtils.h"
-#include "GraphicBufferSource.h"
+#include <android/IOMXBufferSource.h>
#include <OMX_Component.h>
#include <OMX_IndexExt.h>
+#include <OMX_VideoExt.h>
#include <OMX_AsString.h>
#include <binder/IMemory.h>
@@ -35,26 +36,31 @@
#include <HardwareAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/MediaErrors.h>
#include <utils/misc.h>
#include <utils/NativeHandle.h>
+#include <media/OMXBuffer.h>
+#include <media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h>
+
+#include <hidlmemory/mapping.h>
static const OMX_U32 kPortIndexInput = 0;
static const OMX_U32 kPortIndexOutput = 1;
-#define CLOGW(fmt, ...) ALOGW("[%x:%s] " fmt, mNodeID, mName, ##__VA_ARGS__)
+#define CLOGW(fmt, ...) ALOGW("[%p:%s] " fmt, mHandle, mName, ##__VA_ARGS__)
#define CLOG_ERROR_IF(cond, fn, err, fmt, ...) \
- ALOGE_IF(cond, #fn "(%x:%s, " fmt ") ERROR: %s(%#x)", \
- mNodeID, mName, ##__VA_ARGS__, asString(err), err)
+ ALOGE_IF(cond, #fn "(%p:%s, " fmt ") ERROR: %s(%#x)", \
+ mHandle, mName, ##__VA_ARGS__, asString(err), err)
#define CLOG_ERROR(fn, err, fmt, ...) CLOG_ERROR_IF(true, fn, err, fmt, ##__VA_ARGS__)
#define CLOG_IF_ERROR(fn, err, fmt, ...) \
CLOG_ERROR_IF((err) != OMX_ErrorNone, fn, err, fmt, ##__VA_ARGS__)
#define CLOGI_(level, fn, fmt, ...) \
- ALOGI_IF(DEBUG >= (level), #fn "(%x:%s, " fmt ")", mNodeID, mName, ##__VA_ARGS__)
+ ALOGI_IF(DEBUG >= (level), #fn "(%p:%s, " fmt ")", mHandle, mName, ##__VA_ARGS__)
#define CLOGD_(level, fn, fmt, ...) \
- ALOGD_IF(DEBUG >= (level), #fn "(%x:%s, " fmt ")", mNodeID, mName, ##__VA_ARGS__)
+ ALOGD_IF(DEBUG >= (level), #fn "(%p:%s, " fmt ")", mHandle, mName, ##__VA_ARGS__)
#define CLOG_LIFE(fn, fmt, ...) CLOGI_(ADebug::kDebugLifeCycle, fn, fmt, ##__VA_ARGS__)
#define CLOG_STATE(fn, fmt, ...) CLOGI_(ADebug::kDebugState, fn, fmt, ##__VA_ARGS__)
@@ -62,7 +68,7 @@
#define CLOG_INTERNAL(fn, fmt, ...) CLOGD_(ADebug::kDebugInternalState, fn, fmt, ##__VA_ARGS__)
#define CLOG_DEBUG_IF(cond, fn, fmt, ...) \
- ALOGD_IF(cond, #fn "(%x, " fmt ")", mNodeID, ##__VA_ARGS__)
+ ALOGD_IF(cond, #fn "(%p, " fmt ")", mHandle, ##__VA_ARGS__)
#define CLOG_BUFFER(fn, fmt, ...) \
CLOG_DEBUG_IF(DEBUG >= ADebug::kDebugAll, fn, fmt, ##__VA_ARGS__)
@@ -94,18 +100,18 @@
struct BufferMeta {
explicit BufferMeta(
- const sp<IMemory> &mem, OMX_U32 portIndex, bool copyToOmx,
- bool copyFromOmx, OMX_U8 *backup)
+ const sp<IMemory> &mem, const sp<IHidlMemory> &hidlMemory,
+ OMX_U32 portIndex, bool copy, OMX_U8 *backup)
: mMem(mem),
- mCopyFromOmx(copyFromOmx),
- mCopyToOmx(copyToOmx),
+ mHidlMemory(hidlMemory),
+ mCopyFromOmx(portIndex == kPortIndexOutput && copy),
+ mCopyToOmx(portIndex == kPortIndexInput && copy),
mPortIndex(portIndex),
mBackup(backup) {
}
- explicit BufferMeta(size_t size, OMX_U32 portIndex)
- : mSize(size),
- mCopyFromOmx(false),
+ explicit BufferMeta(OMX_U32 portIndex)
+ : mCopyFromOmx(false),
mCopyToOmx(false),
mPortIndex(portIndex),
mBackup(NULL) {
@@ -119,15 +125,21 @@
mBackup(NULL) {
}
+ OMX_U8 *getPointer() {
+ return mMem.get() ? static_cast<OMX_U8*>(mMem->pointer()) :
+ mHidlMemory.get() ? static_cast<OMX_U8*>(
+ static_cast<void*>(mHidlMemory->getPointer())) : nullptr;
+ }
+
void CopyFromOMX(const OMX_BUFFERHEADERTYPE *header) {
if (!mCopyFromOmx) {
return;
}
// check component returns proper range
- sp<ABuffer> codec = getBuffer(header, false /* backup */, true /* limit */);
+ sp<ABuffer> codec = getBuffer(header, true /* limit */);
- memcpy((OMX_U8 *)mMem->pointer() + header->nOffset, codec->data(), codec->size());
+ memcpy(getPointer() + header->nOffset, codec->data(), codec->size());
}
void CopyToOMX(const OMX_BUFFERHEADERTYPE *header) {
@@ -136,18 +148,13 @@
}
memcpy(header->pBuffer + header->nOffset,
- (const OMX_U8 *)mMem->pointer() + header->nOffset,
+ getPointer() + header->nOffset,
header->nFilledLen);
}
- // return either the codec or the backup buffer
- sp<ABuffer> getBuffer(const OMX_BUFFERHEADERTYPE *header, bool backup, bool limit) {
- sp<ABuffer> buf;
- if (backup && mMem != NULL) {
- buf = new ABuffer(mMem->pointer(), mMem->size());
- } else {
- buf = new ABuffer(header->pBuffer, header->nAllocLen);
- }
+ // return the codec buffer
+ sp<ABuffer> getBuffer(const OMX_BUFFERHEADERTYPE *header, bool limit) {
+ sp<ABuffer> buf = new ABuffer(header->pBuffer, header->nAllocLen);
if (limit) {
if (header->nOffset + header->nFilledLen > header->nOffset
&& header->nOffset + header->nFilledLen <= header->nAllocLen) {
@@ -179,7 +186,7 @@
sp<GraphicBuffer> mGraphicBuffer;
sp<NativeHandle> mNativeHandle;
sp<IMemory> mMem;
- size_t mSize;
+ sp<IHidlMemory> mHidlMemory;
bool mCopyFromOmx;
bool mCopyToOmx;
OMX_U32 mPortIndex;
@@ -203,16 +210,144 @@
}
}
-OMXNodeInstance::OMXNodeInstance(
- OMX *owner, const sp<IOMXObserver> &observer, const char *name)
+////////////////////////////////////////////////////////////////////////////////
+
+// This provides the underlying Thread used by CallbackDispatcher.
+// Note that deriving CallbackDispatcher from Thread does not work.
+
+struct OMXNodeInstance::CallbackDispatcherThread : public Thread {
+ explicit CallbackDispatcherThread(CallbackDispatcher *dispatcher)
+ : mDispatcher(dispatcher) {
+ }
+
+private:
+ CallbackDispatcher *mDispatcher;
+
+ bool threadLoop();
+
+ CallbackDispatcherThread(const CallbackDispatcherThread &);
+ CallbackDispatcherThread &operator=(const CallbackDispatcherThread &);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+struct OMXNodeInstance::CallbackDispatcher : public RefBase {
+ explicit CallbackDispatcher(const sp<OMXNodeInstance> &owner);
+
+ // Posts |msg| to the listener's queue. If |realTime| is true, the listener thread is notified
+ // that a new message is available on the queue. Otherwise, the message stays on the queue, but
+ // the listener is not notified of it. It will process this message when a subsequent message
+ // is posted with |realTime| set to true.
+ void post(const omx_message &msg, bool realTime = true);
+
+ bool loop();
+
+protected:
+ virtual ~CallbackDispatcher();
+
+private:
+ Mutex mLock;
+
+ sp<OMXNodeInstance> const mOwner;
+ bool mDone;
+ Condition mQueueChanged;
+ std::list<omx_message> mQueue;
+
+ sp<CallbackDispatcherThread> mThread;
+
+ void dispatch(std::list<omx_message> &messages);
+
+ CallbackDispatcher(const CallbackDispatcher &);
+ CallbackDispatcher &operator=(const CallbackDispatcher &);
+};
+
+OMXNodeInstance::CallbackDispatcher::CallbackDispatcher(const sp<OMXNodeInstance> &owner)
: mOwner(owner),
- mNodeID(0),
+ mDone(false) {
+ mThread = new CallbackDispatcherThread(this);
+ mThread->run("OMXCallbackDisp", ANDROID_PRIORITY_FOREGROUND);
+}
+
+OMXNodeInstance::CallbackDispatcher::~CallbackDispatcher() {
+ {
+ Mutex::Autolock autoLock(mLock);
+
+ mDone = true;
+ mQueueChanged.signal();
+ }
+
+ // A join on self can happen if the last ref to CallbackDispatcher
+ // is released within the CallbackDispatcherThread loop
+ status_t status = mThread->join();
+ if (status != WOULD_BLOCK) {
+ // Other than join to self, the only other error return codes are
+ // whatever readyToRun() returns, and we don't override that
+ CHECK_EQ(status, (status_t)NO_ERROR);
+ }
+}
+
+void OMXNodeInstance::CallbackDispatcher::post(const omx_message &msg, bool realTime) {
+ Mutex::Autolock autoLock(mLock);
+
+ mQueue.push_back(msg);
+ if (realTime) {
+ mQueueChanged.signal();
+ }
+}
+
+void OMXNodeInstance::CallbackDispatcher::dispatch(std::list<omx_message> &messages) {
+ if (mOwner == NULL) {
+ ALOGV("Would have dispatched a message to a node that's already gone.");
+ return;
+ }
+ mOwner->onMessages(messages);
+}
+
+bool OMXNodeInstance::CallbackDispatcher::loop() {
+ for (;;) {
+ std::list<omx_message> messages;
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ while (!mDone && mQueue.empty()) {
+ mQueueChanged.wait(mLock);
+ }
+
+ if (mDone) {
+ break;
+ }
+
+ messages.swap(mQueue);
+ }
+
+ dispatch(messages);
+ }
+
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+bool OMXNodeInstance::CallbackDispatcherThread::threadLoop() {
+ return mDispatcher->loop();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+OMXNodeInstance::OMXNodeInstance(
+ OmxNodeOwner *owner, const sp<IOMXObserver> &observer, const char *name)
+ : mOwner(owner),
mHandle(NULL),
mObserver(observer),
mDying(false),
mSailed(false),
mQueriedProhibitedExtensions(false),
- mBufferIDCount(0)
+ mQuirks(0),
+ mBufferIDCount(0),
+ mRestorePtsFailed(false),
+ mMaxTimestampGapUs(-1ll),
+ mPrevOriginalTimeUs(-1ll),
+ mPrevModifiedTimeUs(-1ll)
{
mName = ADebug::GetDebugName(name);
DEBUG = ADebug::GetDebugLevelFromProperty(name, "debug.stagefright.omx-debug");
@@ -224,9 +359,12 @@
mDebugLevelBumpPendingBuffers[1] = 0;
mMetadataType[0] = kMetadataBufferTypeInvalid;
mMetadataType[1] = kMetadataBufferTypeInvalid;
+ mPortMode[0] = IOMX::kPortModePresetByteBuffer;
+ mPortMode[1] = IOMX::kPortModePresetByteBuffer;
mSecureBufferType[0] = kSecureBufferTypeUnknown;
mSecureBufferType[1] = kSecureBufferTypeUnknown;
mIsSecure = AString(name).endsWith(".secure");
+ mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled("legacy-adaptive");
}
OMXNodeInstance::~OMXNodeInstance() {
@@ -234,46 +372,38 @@
CHECK(mHandle == NULL);
}
-void OMXNodeInstance::setHandle(OMX::node_id node_id, OMX_HANDLETYPE handle) {
- mNodeID = node_id;
+void OMXNodeInstance::setHandle(OMX_HANDLETYPE handle) {
CLOG_LIFE(allocateNode, "handle=%p", handle);
CHECK(mHandle == NULL);
mHandle = handle;
+ if (handle != NULL) {
+ mDispatcher = new CallbackDispatcher(this);
+ }
}
-sp<GraphicBufferSource> OMXNodeInstance::getGraphicBufferSource() {
- Mutex::Autolock autoLock(mGraphicBufferSourceLock);
- return mGraphicBufferSource;
+sp<IOMXBufferSource> OMXNodeInstance::getBufferSource() {
+ Mutex::Autolock autoLock(mOMXBufferSourceLock);
+ return mOMXBufferSource;
}
-void OMXNodeInstance::setGraphicBufferSource(
- const sp<GraphicBufferSource>& bufferSource) {
- Mutex::Autolock autoLock(mGraphicBufferSourceLock);
- CLOG_INTERNAL(setGraphicBufferSource, "%p", bufferSource.get());
- mGraphicBufferSource = bufferSource;
+void OMXNodeInstance::setBufferSource(const sp<IOMXBufferSource>& bufferSource) {
+ Mutex::Autolock autoLock(mOMXBufferSourceLock);
+ CLOG_INTERNAL(setBufferSource, "%p", bufferSource.get());
+ mOMXBufferSource = bufferSource;
}
-OMX *OMXNodeInstance::owner() {
- return mOwner;
+OMX_HANDLETYPE OMXNodeInstance::handle() {
+ return mHandle;
}
sp<IOMXObserver> OMXNodeInstance::observer() {
return mObserver;
}
-OMX::node_id OMXNodeInstance::nodeID() {
- return mNodeID;
-}
-
-status_t OMXNodeInstance::freeNode(OMXMaster *master) {
+status_t OMXNodeInstance::freeNode() {
CLOG_LIFE(freeNode, "handle=%p", mHandle);
static int32_t kMaxNumIterations = 10;
- // exit if we have already freed the node
- if (mHandle == NULL) {
- return OK;
- }
-
// Transition the node from its current state all the way down
// to "Loaded".
// This ensures that all active buffers are properly freed even
@@ -283,7 +413,13 @@
// The code below may trigger some more events to be dispatched
// by the OMX component - we want to ignore them as our client
// does not expect them.
- mDying = true;
+ bool expected = false;
+ if (!mDying.compare_exchange_strong(expected, true)) {
+ // exit if we have already freed the node or doing so right now.
+ // NOTE: this ensures that the block below executes at most once.
+ ALOGV("Already dying");
+ return OK;
+ }
OMX_STATETYPE state;
CHECK_EQ(OMX_GetState(mHandle, &state), OMX_ErrorNone);
@@ -350,43 +486,35 @@
LOG_ALWAYS_FATAL("unknown state %s(%#x).", asString(state), state);
break;
}
+ status_t err = mOwner->freeNode(this);
- ALOGV("[%x:%s] calling destroyComponentInstance", mNodeID, mName);
- OMX_ERRORTYPE err = master->destroyComponentInstance(
- static_cast<OMX_COMPONENTTYPE *>(mHandle));
+ mDispatcher.clear();
+ mOMXBufferSource.clear();
mHandle = NULL;
CLOG_IF_ERROR(freeNode, err, "");
free(mName);
mName = NULL;
- mOwner->invalidateNodeID(mNodeID);
- mNodeID = 0;
-
ALOGV("OMXNodeInstance going away.");
- delete this;
- return StatusFromOMXError(err);
+ return err;
}
status_t OMXNodeInstance::sendCommand(
OMX_COMMANDTYPE cmd, OMX_S32 param) {
- if (cmd == OMX_CommandStateSet) {
- // There are no configurations past first StateSet command.
- mSailed = true;
- }
- const sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
+ const sp<IOMXBufferSource> bufferSource(getBufferSource());
if (bufferSource != NULL && cmd == OMX_CommandStateSet) {
if (param == OMX_StateIdle) {
// Initiating transition from Executing -> Idle
// ACodec is waiting for all buffers to be returned, do NOT
// submit any more buffers to the codec.
- bufferSource->omxIdle();
+ bufferSource->onOmxIdle();
} else if (param == OMX_StateLoaded) {
// Initiating transition from Idle/Executing -> Loaded
// Buffers are about to be freed.
- bufferSource->omxLoaded();
- setGraphicBufferSource(NULL);
+ bufferSource->onOmxLoaded();
+ setBufferSource(NULL);
}
// fall through
@@ -394,6 +522,11 @@
Mutex::Autolock autoLock(mLock);
+ if (cmd == OMX_CommandStateSet) {
+ // There are no configurations past first StateSet command.
+ mSailed = true;
+ }
+
// bump internal-state debug level for 2 input and output frames past a command
{
Mutex::Autolock _l(mDebugLock);
@@ -422,18 +555,17 @@
"OMX.google.android.index.getAndroidNativeBufferUsage",
};
- if ((index > OMX_IndexComponentStartUnused && index <= OMX_IndexParamStandardComponentRole)
- || (index > OMX_IndexPortStartUnused && index <= OMX_IndexParamCompBufferSupplier)
- || (index > OMX_IndexAudioStartUnused && index <= OMX_IndexConfigAudioChannelVolume)
- || (index > OMX_IndexVideoStartUnused && index <= OMX_IndexConfigVideoNalSize)
- || (index > OMX_IndexCommonStartUnused
- && index <= OMX_IndexConfigCommonTransitionEffect)
+ if ((index > OMX_IndexComponentStartUnused && index < OMX_IndexComponentEndUnused)
+ || (index > OMX_IndexPortStartUnused && index < OMX_IndexPortEndUnused)
+ || (index > OMX_IndexAudioStartUnused && index < OMX_IndexAudioEndUnused)
+ || (index > OMX_IndexVideoStartUnused && index < OMX_IndexVideoEndUnused)
+ || (index > OMX_IndexCommonStartUnused && index < OMX_IndexCommonEndUnused)
|| (index > (OMX_INDEXTYPE)OMX_IndexExtAudioStartUnused
- && index <= (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported)
+ && index < (OMX_INDEXTYPE)OMX_IndexExtAudioEndUnused)
|| (index > (OMX_INDEXTYPE)OMX_IndexExtVideoStartUnused
- && index <= (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering)
+ && index < (OMX_INDEXTYPE)OMX_IndexExtVideoEndUnused)
|| (index > (OMX_INDEXTYPE)OMX_IndexExtOtherStartUnused
- && index <= (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits)) {
+ && index < (OMX_INDEXTYPE)OMX_IndexExtOtherEndUnused)) {
return false;
}
@@ -474,6 +606,10 @@
OMX_INDEXEXTTYPE extIndex = (OMX_INDEXEXTTYPE)index;
CLOG_CONFIG(setParameter, "%s(%#x), %zu@%p)", asString(extIndex), index, size, params);
+ if (extIndex == OMX_IndexParamMaxFrameDurationForBitrateControl) {
+ return setMaxPtsGapUs(params, size);
+ }
+
if (isProhibitedIndex_l(index)) {
android_errorWriteLog(0x534e4554, "29422020");
return BAD_INDEX;
@@ -520,15 +656,114 @@
return StatusFromOMXError(err);
}
-status_t OMXNodeInstance::getState(OMX_STATETYPE* state) {
+status_t OMXNodeInstance::setPortMode(OMX_U32 portIndex, IOMX::PortMode mode) {
Mutex::Autolock autoLock(mLock);
- OMX_ERRORTYPE err = OMX_GetState(mHandle, state);
- CLOG_IF_ERROR(getState, err, "");
- return StatusFromOMXError(err);
+ if (portIndex >= NELEM(mPortMode)) {
+ ALOGE("b/31385713, portIndex(%u)", portIndex);
+ android_errorWriteLog(0x534e4554, "31385713");
+ return BAD_VALUE;
+ }
+
+ CLOG_CONFIG(setPortMode, "%s(%d), port %d", asString(mode), mode, portIndex);
+
+ switch (mode) {
+ case IOMX::kPortModeDynamicANWBuffer:
+ {
+ if (portIndex == kPortIndexOutput) {
+ if (mLegacyAdaptiveExperiment) {
+ CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+ "not setting port mode to %s(%d) on output",
+ asString(mode), mode);
+ return StatusFromOMXError(OMX_ErrorUnsupportedIndex);
+ }
+
+ status_t err = enableNativeBuffers_l(
+ portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
+ if (err != OK) {
+ return err;
+ }
+ }
+ (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+ return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL);
+ }
+
+ case IOMX::kPortModeDynamicNativeHandle:
+ {
+ if (portIndex != kPortIndexInput) {
+ CLOG_ERROR(setPortMode, BAD_VALUE,
+ "%s(%d) mode is only supported on input port", asString(mode), mode);
+ return BAD_VALUE;
+ }
+ (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+ (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+
+ MetadataBufferType metaType = kMetadataBufferTypeNativeHandleSource;
+ return storeMetaDataInBuffers_l(portIndex, OMX_TRUE, &metaType);
+ }
+
+ case IOMX::kPortModePresetSecureBuffer:
+ {
+ // Allow on both input and output.
+ (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+ (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+ return enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_TRUE);
+ }
+
+ case IOMX::kPortModePresetANWBuffer:
+ {
+ if (portIndex != kPortIndexOutput) {
+ CLOG_ERROR(setPortMode, BAD_VALUE,
+ "%s(%d) mode is only supported on output port", asString(mode), mode);
+ return BAD_VALUE;
+ }
+
+ // Check if we're simulating legacy mode with metadata mode,
+ // if so, enable metadata mode.
+ if (mLegacyAdaptiveExperiment) {
+ if (storeMetaDataInBuffers_l(portIndex, OMX_TRUE, NULL) == OK) {
+ CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+ "metdata mode enabled successfully");
+ return OK;
+ }
+
+ CLOG_INTERNAL(setPortMode, "Legacy adaptive experiment: "
+ "unable to enable metadata mode on output");
+
+ mLegacyAdaptiveExperiment = false;
+ }
+
+ // Disable secure buffer and enable graphic buffer
+ (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+ status_t err = enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_TRUE);
+ if (err != OK) {
+ return err;
+ }
+
+ // Not running experiment, or metadata is not supported.
+ // Disable metadata mode and use legacy mode.
+ (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+ return OK;
+ }
+
+ case IOMX::kPortModePresetByteBuffer:
+ {
+ // Disable secure buffer, native buffer and metadata.
+ (void)enableNativeBuffers_l(portIndex, OMX_TRUE /*graphic*/, OMX_FALSE);
+ (void)enableNativeBuffers_l(portIndex, OMX_FALSE /*graphic*/, OMX_FALSE);
+ (void)storeMetaDataInBuffers_l(portIndex, OMX_FALSE, NULL);
+ return OK;
+ }
+
+ default:
+ break;
+ }
+
+ CLOG_ERROR(setPortMode, BAD_VALUE, "invalid port mode %d", mode);
+ return BAD_VALUE;
}
-status_t OMXNodeInstance::enableNativeBuffers(
+status_t OMXNodeInstance::enableNativeBuffers_l(
OMX_U32 portIndex, OMX_BOOL graphic, OMX_BOOL enable) {
if (portIndex >= NELEM(mSecureBufferType)) {
ALOGE("b/31385713, portIndex(%u)", portIndex);
@@ -536,7 +771,6 @@
return BAD_VALUE;
}
- Mutex::Autolock autoLock(mLock);
CLOG_CONFIG(enableNativeBuffers, "%s:%u%s, %d", portString(portIndex), portIndex,
graphic ? ", graphic" : "", enable);
OMX_STRING name = const_cast<OMX_STRING>(
@@ -568,9 +802,7 @@
if (!graphic) {
// Extension not supported, check for manual override with system property
// This is a temporary workaround until partners support the OMX extension
- char value[PROPERTY_VALUE_MAX];
- if (property_get("media.mediadrmservice.enable", value, NULL)
- && (!strcmp("1", value) || !strcasecmp("true", value))) {
+ if (property_get_bool("media.mediadrmservice.enable", false)) {
CLOG_CONFIG(enableNativeBuffers, "system property override: using native-handles");
mSecureBufferType[portIndex] = kSecureBufferTypeNativeHandle;
} else if (mSecureBufferType[portIndex] == kSecureBufferTypeUnknown) {
@@ -613,13 +845,6 @@
return OK;
}
-status_t OMXNodeInstance::storeMetaDataInBuffers(
- OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
- Mutex::Autolock autolock(mLock);
- CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u en:%d", portString(portIndex), portIndex, enable);
- return storeMetaDataInBuffers_l(portIndex, enable, type);
-}
-
status_t OMXNodeInstance::storeMetaDataInBuffers_l(
OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
if (mSailed) {
@@ -669,6 +894,9 @@
? kMetadataBufferTypeGrallocSource : requestedType;
err = OMX_SetParameter(mHandle, index, ¶ms);
}
+ if (err == OMX_ErrorBadParameter) {
+ err = OMX_ErrorUnsupportedIndex;
+ }
}
// don't log loud error if component does not support metadata mode on the output
@@ -710,6 +938,12 @@
CLOG_CONFIG(prepareForAdaptivePlayback, "%s:%u en=%d max=%ux%u",
portString(portIndex), portIndex, enable, maxFrameWidth, maxFrameHeight);
+ if (mLegacyAdaptiveExperiment) {
+ CLOG_INTERNAL(prepareForAdaptivePlayback,
+ "Legacy adaptive experiment: reporting success");
+ return OK;
+ }
+
OMX_INDEXTYPE index;
OMX_STRING name = const_cast<OMX_STRING>(
"OMX.google.android.index.prepareForAdaptivePlayback");
@@ -780,48 +1014,142 @@
}
status_t OMXNodeInstance::useBuffer(
- OMX_U32 portIndex, const sp<IMemory> ¶ms,
- OMX::buffer_id *buffer, OMX_U32 allottedSize) {
- if (params == NULL || buffer == NULL) {
+ OMX_U32 portIndex, const OMXBuffer &omxBuffer, IOMX::buffer_id *buffer) {
+ if (buffer == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
}
- Mutex::Autolock autoLock(mLock);
- if (allottedSize > params->size() || portIndex >= NELEM(mNumPortBuffers)) {
+ if (portIndex >= NELEM(mNumPortBuffers)) {
return BAD_VALUE;
}
- // metadata buffers are not connected cross process
- // use a backup buffer instead of the actual buffer
- BufferMeta *buffer_meta;
- bool useBackup = mMetadataType[portIndex] != kMetadataBufferTypeInvalid;
- OMX_U8 *data = static_cast<OMX_U8 *>(params->pointer());
- // allocate backup buffer
- if (useBackup) {
- data = new (std::nothrow) OMX_U8[allottedSize];
- if (data == NULL) {
- return NO_MEMORY;
- }
- memset(data, 0, allottedSize);
-
- buffer_meta = new BufferMeta(
- params, portIndex, false /* copyToOmx */, false /* copyFromOmx */, data);
- } else {
- buffer_meta = new BufferMeta(
- params, portIndex, false /* copyToOmx */, false /* copyFromOmx */, NULL);
+ Mutex::Autolock autoLock(mLock);
+ if (!mSailed) {
+ ALOGE("b/35467458");
+ android_errorWriteLog(0x534e4554, "35467458");
+ return BAD_VALUE;
}
- OMX_BUFFERHEADERTYPE *header;
+ switch (omxBuffer.mBufferType) {
+ case OMXBuffer::kBufferTypePreset:
+ return useBuffer_l(portIndex, NULL, NULL, buffer);
- OMX_ERRORTYPE err = OMX_UseBuffer(
- mHandle, &header, portIndex, buffer_meta,
- allottedSize, data);
+ case OMXBuffer::kBufferTypeSharedMem:
+ return useBuffer_l(portIndex, omxBuffer.mMem, NULL, buffer);
+
+ case OMXBuffer::kBufferTypeANWBuffer:
+ return useGraphicBuffer_l(portIndex, omxBuffer.mGraphicBuffer, buffer);
+
+ case OMXBuffer::kBufferTypeHidlMemory: {
+ sp<IHidlMemory> hidlMemory = mapMemory(omxBuffer.mHidlMemory);
+ return useBuffer_l(portIndex, NULL, hidlMemory, buffer);
+ }
+ default:
+ break;
+ }
+
+ return BAD_VALUE;
+}
+
+status_t OMXNodeInstance::useBuffer_l(
+ OMX_U32 portIndex, const sp<IMemory> ¶ms,
+ const sp<IHidlMemory> &hParams, IOMX::buffer_id *buffer) {
+ BufferMeta *buffer_meta;
+ OMX_BUFFERHEADERTYPE *header;
+ OMX_ERRORTYPE err = OMX_ErrorNone;
+ bool isMetadata = mMetadataType[portIndex] != kMetadataBufferTypeInvalid;
+
+ size_t paramsSize;
+ void* paramsPointer;
+ if (params != NULL && hParams != NULL) {
+ return BAD_VALUE;
+ }
+ if (params != NULL) {
+ paramsPointer = params->pointer();
+ paramsSize = params->size();
+ } else if (hParams != NULL) {
+ paramsPointer = hParams->getPointer();
+ paramsSize = hParams->getSize();
+ } else {
+ paramsPointer = nullptr;
+ }
+
+ OMX_U32 allottedSize;
+ if (isMetadata) {
+ if (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource) {
+ allottedSize = sizeof(VideoGrallocMetadata);
+ } else if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer) {
+ allottedSize = sizeof(VideoNativeMetadata);
+ } else if (mMetadataType[portIndex] == kMetadataBufferTypeNativeHandleSource) {
+ allottedSize = sizeof(VideoNativeHandleMetadata);
+ } else {
+ return BAD_VALUE;
+ }
+ } else {
+ // NULL params is allowed only in metadata mode.
+ if (paramsPointer == nullptr) {
+ ALOGE("b/25884056");
+ return BAD_VALUE;
+ }
+ allottedSize = paramsSize;
+ }
+
+ bool isOutputGraphicMetadata = (portIndex == kPortIndexOutput) &&
+ (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource ||
+ mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer);
+
+ uint32_t requiresAllocateBufferBit =
+ (portIndex == kPortIndexInput)
+ ? kRequiresAllocateBufferOnInputPorts
+ : kRequiresAllocateBufferOnOutputPorts;
+
+ // we use useBuffer for output metadata regardless of quirks
+ if (!isOutputGraphicMetadata && (mQuirks & requiresAllocateBufferBit)) {
+ // metadata buffers are not connected cross process; only copy if not meta.
+ buffer_meta = new BufferMeta(
+ params, hParams, portIndex, !isMetadata /* copy */, NULL /* data */);
+
+ err = OMX_AllocateBuffer(
+ mHandle, &header, portIndex, buffer_meta, allottedSize);
+
+ if (err != OMX_ErrorNone) {
+ CLOG_ERROR(allocateBuffer, err,
+ SIMPLE_BUFFER(portIndex, (size_t)allottedSize,
+ paramsPointer));
+ }
+ } else {
+ OMX_U8 *data = NULL;
+
+ // metadata buffers are not connected cross process
+ // use a backup buffer instead of the actual buffer
+ if (isMetadata) {
+ data = new (std::nothrow) OMX_U8[allottedSize];
+ if (data == NULL) {
+ return NO_MEMORY;
+ }
+ memset(data, 0, allottedSize);
+
+ buffer_meta = new BufferMeta(
+ params, hParams, portIndex, false /* copy */, data);
+ } else {
+ data = static_cast<OMX_U8 *>(paramsPointer);
+
+ buffer_meta = new BufferMeta(
+ params, hParams, portIndex, false /* copy */, NULL);
+ }
+
+ err = OMX_UseBuffer(
+ mHandle, &header, portIndex, buffer_meta,
+ allottedSize, data);
+
+ if (err != OMX_ErrorNone) {
+ CLOG_ERROR(useBuffer, err, SIMPLE_BUFFER(
+ portIndex, (size_t)allottedSize, data));
+ }
+ }
if (err != OMX_ErrorNone) {
- CLOG_ERROR(useBuffer, err, SIMPLE_BUFFER(
- portIndex, (size_t)allottedSize, data));
-
delete buffer_meta;
buffer_meta = NULL;
@@ -836,19 +1164,19 @@
addActiveBuffer(portIndex, *buffer);
- sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
+ sp<IOMXBufferSource> bufferSource(getBufferSource());
if (bufferSource != NULL && portIndex == kPortIndexInput) {
- bufferSource->addCodecBuffer(header);
+ bufferSource->onInputBufferAdded(*buffer);
}
CLOG_BUFFER(useBuffer, NEW_BUFFER_FMT(
- *buffer, portIndex, "%u(%zu)@%p", allottedSize, params->size(), params->pointer()));
+ *buffer, portIndex, "%u(%zu)@%p", allottedSize, paramsSize, paramsPointer));
return OK;
}
status_t OMXNodeInstance::useGraphicBuffer2_l(
OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
- OMX::buffer_id *buffer) {
+ IOMX::buffer_id *buffer) {
if (graphicBuffer == NULL || buffer == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
@@ -902,14 +1230,20 @@
// XXX: This function is here for backwards compatibility. Once the OMX
// implementations have been updated this can be removed and useGraphicBuffer2
// can be renamed to useGraphicBuffer.
-status_t OMXNodeInstance::useGraphicBuffer(
+status_t OMXNodeInstance::useGraphicBuffer_l(
OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
- OMX::buffer_id *buffer) {
+ IOMX::buffer_id *buffer) {
if (graphicBuffer == NULL || buffer == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
}
- Mutex::Autolock autoLock(mLock);
+
+ // First, see if we're in metadata mode. We could be running an experiment to simulate
+ // legacy behavior (preallocated buffers) on devices that supports meta.
+ if (mMetadataType[portIndex] != kMetadataBufferTypeInvalid) {
+ return useGraphicBufferWithMetadata_l(
+ portIndex, graphicBuffer, buffer);
+ }
// See if the newer version of the extension is present.
OMX_INDEXTYPE index;
@@ -966,9 +1300,32 @@
return OK;
}
+status_t OMXNodeInstance::useGraphicBufferWithMetadata_l(
+ OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+ IOMX::buffer_id *buffer) {
+ if (portIndex != kPortIndexOutput) {
+ return BAD_VALUE;
+ }
+
+ if (mMetadataType[portIndex] != kMetadataBufferTypeGrallocSource &&
+ mMetadataType[portIndex] != kMetadataBufferTypeANWBuffer) {
+ return BAD_VALUE;
+ }
+
+ status_t err = useBuffer_l(portIndex, NULL, NULL, buffer);
+ if (err != OK) {
+ return err;
+ }
+
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(*buffer, portIndex);
+
+ return updateGraphicBufferInMeta_l(portIndex, graphicBuffer, *buffer, header);
+
+}
+
status_t OMXNodeInstance::updateGraphicBufferInMeta_l(
OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
- OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header, bool updateCodecBuffer) {
+ IOMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header) {
// No need to check |graphicBuffer| since NULL is valid for it as below.
if (header == NULL) {
ALOGE("b/25884056");
@@ -980,14 +1337,9 @@
}
BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
- sp<ABuffer> data = bufferMeta->getBuffer(
- header, !updateCodecBuffer /* backup */, false /* limit */);
+ sp<ABuffer> data = bufferMeta->getBuffer(header, false /* limit */);
bufferMeta->setGraphicBuffer(graphicBuffer);
MetadataBufferType metaType = mMetadataType[portIndex];
- // we use gralloc source only in the codec buffers
- if (metaType == kMetadataBufferTypeGrallocSource && !updateCodecBuffer) {
- metaType = kMetadataBufferTypeANWBuffer;
- }
if (metaType == kMetadataBufferTypeGrallocSource
&& data->capacity() >= sizeof(VideoGrallocMetadata)) {
VideoGrallocMetadata &metadata = *(VideoGrallocMetadata *)(data->data());
@@ -1011,21 +1363,9 @@
return OK;
}
-status_t OMXNodeInstance::updateGraphicBufferInMeta(
- OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
- OMX::buffer_id buffer) {
- Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
- // update backup buffer for input, codec buffer for output
- return updateGraphicBufferInMeta_l(
- portIndex, graphicBuffer, buffer, header,
- true /* updateCodecBuffer */);
-}
-
-status_t OMXNodeInstance::updateNativeHandleInMeta(
- OMX_U32 portIndex, const sp<NativeHandle>& nativeHandle, OMX::buffer_id buffer) {
- Mutex::Autolock autoLock(mLock);
- OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, portIndex);
+status_t OMXNodeInstance::updateNativeHandleInMeta_l(
+ OMX_U32 portIndex, const sp<NativeHandle>& nativeHandle,
+ IOMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header) {
// No need to check |nativeHandle| since NULL is valid for it as below.
if (header == NULL) {
ALOGE("b/25884056");
@@ -1037,9 +1377,7 @@
}
BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
- // update backup buffer
- sp<ABuffer> data = bufferMeta->getBuffer(
- header, false /* backup */, false /* limit */);
+ sp<ABuffer> data = bufferMeta->getBuffer(header, false /* limit */);
bufferMeta->setNativeHandle(nativeHandle);
if (mMetadataType[portIndex] == kMetadataBufferTypeNativeHandleSource
&& data->capacity() >= sizeof(VideoNativeHandleMetadata)) {
@@ -1059,32 +1397,23 @@
return OK;
}
-status_t OMXNodeInstance::createGraphicBufferSource(
- OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
+status_t OMXNodeInstance::setInputSurface(
+ const sp<IOMXBufferSource> &bufferSource) {
+ Mutex::Autolock autolock(mLock);
+
status_t err;
// only allow graphic source on input port, when there are no allocated buffers yet
- if (portIndex != kPortIndexInput) {
- android_errorWriteLog(0x534e4554, "29422020");
- return BAD_VALUE;
- } else if (mNumPortBuffers[portIndex] > 0) {
+ if (mNumPortBuffers[kPortIndexInput] > 0) {
android_errorWriteLog(0x534e4554, "29422020");
return INVALID_OPERATION;
}
- const sp<GraphicBufferSource> surfaceCheck = getGraphicBufferSource();
- if (surfaceCheck != NULL) {
- if (portIndex < NELEM(mMetadataType) && type != NULL) {
- *type = mMetadataType[portIndex];
- }
+ if (getBufferSource() != NULL) {
return ALREADY_EXISTS;
}
- // Input buffers will hold meta-data (ANativeWindowBuffer references).
- if (type != NULL) {
- *type = kMetadataBufferTypeANWBuffer;
- }
- err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, type);
+ err = storeMetaDataInBuffers_l(kPortIndexInput, OMX_TRUE, NULL);
if (err != OK) {
return err;
}
@@ -1093,13 +1422,13 @@
// codec was configured.
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
- def.nPortIndex = portIndex;
+ def.nPortIndex = kPortIndexInput;
OMX_ERRORTYPE oerr = OMX_GetParameter(
mHandle, OMX_IndexParamPortDefinition, &def);
if (oerr != OMX_ErrorNone) {
OMX_INDEXTYPE index = OMX_IndexParamPortDefinition;
- CLOG_ERROR(getParameter, oerr, "%s(%#x): %s:%u",
- asString(index), index, portString(portIndex), portIndex);
+ CLOG_ERROR(getParameter, oerr, "%s(%#x): %s:%u", asString(index),
+ index, portString(kPortIndexInput), kPortIndexInput);
return UNKNOWN_ERROR;
}
@@ -1110,105 +1439,20 @@
return INVALID_OPERATION;
}
- uint32_t usageBits;
- oerr = OMX_GetParameter(
- mHandle, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits, &usageBits);
- if (oerr != OMX_ErrorNone) {
- usageBits = 0;
- }
-
- sp<GraphicBufferSource> bufferSource = new GraphicBufferSource(this,
- def.format.video.nFrameWidth,
- def.format.video.nFrameHeight,
- def.nBufferCountActual,
- usageBits,
- bufferConsumer);
-
- if ((err = bufferSource->initCheck()) != OK) {
- return err;
- }
- setGraphicBufferSource(bufferSource);
-
- return OK;
-}
-
-status_t OMXNodeInstance::createInputSurface(
- OMX_U32 portIndex, android_dataspace dataSpace,
- sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
- if (bufferProducer == NULL) {
- ALOGE("b/25884056");
+ if (def.format.video.nFrameWidth == 0
+ || def.format.video.nFrameHeight == 0) {
+ ALOGE("Invalid video dimension %ux%u",
+ def.format.video.nFrameWidth,
+ def.format.video.nFrameHeight);
return BAD_VALUE;
}
- Mutex::Autolock autolock(mLock);
- status_t err = createGraphicBufferSource(portIndex, NULL /* bufferConsumer */, type);
-
- if (err != OK) {
- return err;
- }
-
- mGraphicBufferSource->setDefaultDataSpace(dataSpace);
-
- *bufferProducer = mGraphicBufferSource->getIGraphicBufferProducer();
+ setBufferSource(bufferSource);
return OK;
}
-//static
-status_t OMXNodeInstance::createPersistentInputSurface(
- sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferConsumer> *bufferConsumer) {
- if (bufferProducer == NULL || bufferConsumer == NULL) {
- ALOGE("b/25884056");
- return BAD_VALUE;
- }
- String8 name("GraphicBufferSource");
-
- sp<IGraphicBufferProducer> producer;
- sp<IGraphicBufferConsumer> consumer;
- BufferQueue::createBufferQueue(&producer, &consumer);
- consumer->setConsumerName(name);
- consumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
-
- sp<BufferQueue::ProxyConsumerListener> proxy =
- new BufferQueue::ProxyConsumerListener(NULL);
- status_t err = consumer->consumerConnect(proxy, false);
- if (err != NO_ERROR) {
- ALOGE("Error connecting to BufferQueue: %s (%d)",
- strerror(-err), err);
- return err;
- }
-
- *bufferProducer = producer;
- *bufferConsumer = consumer;
-
- return OK;
-}
-
-status_t OMXNodeInstance::setInputSurface(
- OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer,
- MetadataBufferType *type) {
- Mutex::Autolock autolock(mLock);
- return createGraphicBufferSource(portIndex, bufferConsumer, type);
-}
-
-void OMXNodeInstance::signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
- mOwner->OnEvent(mNodeID, event, arg1, arg2, NULL);
-}
-
-status_t OMXNodeInstance::signalEndOfInputStream() {
- // For non-Surface input, the MediaCodec should convert the call to a
- // pair of requests (dequeue input buffer, queue input buffer with EOS
- // flag set). Seems easier than doing the equivalent from here.
- sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
- if (bufferSource == NULL) {
- CLOGW("signalEndOfInputStream can only be used with Surface input");
- return INVALID_OPERATION;
- }
- return bufferSource->signalEndOfInputStream();
-}
-
status_t OMXNodeInstance::allocateSecureBuffer(
- OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
+ OMX_U32 portIndex, size_t size, IOMX::buffer_id *buffer,
void **buffer_data, sp<NativeHandle> *native_handle) {
if (buffer == NULL || buffer_data == NULL || native_handle == NULL) {
ALOGE("b/25884056");
@@ -1223,7 +1467,12 @@
Mutex::Autolock autoLock(mLock);
- BufferMeta *buffer_meta = new BufferMeta(size, portIndex);
+ if (!mSailed) {
+ ALOGE("b/35467458");
+ android_errorWriteLog(0x534e4554, "35467458");
+ return BAD_VALUE;
+ }
+ BufferMeta *buffer_meta = new BufferMeta(portIndex);
OMX_BUFFERHEADERTYPE *header;
@@ -1254,9 +1503,9 @@
addActiveBuffer(portIndex, *buffer);
- sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
+ sp<IOMXBufferSource> bufferSource(getBufferSource());
if (bufferSource != NULL && portIndex == kPortIndexInput) {
- bufferSource->addCodecBuffer(header);
+ bufferSource->onInputBufferAdded(*buffer);
}
CLOG_BUFFER(allocateSecureBuffer, NEW_BUFFER_FMT(
*buffer, portIndex, "%zu@%p:%p", size, *buffer_data,
@@ -1265,62 +1514,8 @@
return OK;
}
-status_t OMXNodeInstance::allocateBufferWithBackup(
- OMX_U32 portIndex, const sp<IMemory> ¶ms,
- OMX::buffer_id *buffer, OMX_U32 allottedSize) {
- if (params == NULL || buffer == NULL) {
- ALOGE("b/25884056");
- return BAD_VALUE;
- }
-
- Mutex::Autolock autoLock(mLock);
- if (allottedSize > params->size() || portIndex >= NELEM(mNumPortBuffers)) {
- return BAD_VALUE;
- }
-
- // metadata buffers are not connected cross process; only copy if not meta
- bool copy = mMetadataType[portIndex] == kMetadataBufferTypeInvalid;
-
- BufferMeta *buffer_meta = new BufferMeta(
- params, portIndex,
- (portIndex == kPortIndexInput) && copy /* copyToOmx */,
- (portIndex == kPortIndexOutput) && copy /* copyFromOmx */,
- NULL /* data */);
-
- OMX_BUFFERHEADERTYPE *header;
-
- OMX_ERRORTYPE err = OMX_AllocateBuffer(
- mHandle, &header, portIndex, buffer_meta, allottedSize);
- if (err != OMX_ErrorNone) {
- CLOG_ERROR(allocateBufferWithBackup, err,
- SIMPLE_BUFFER(portIndex, (size_t)allottedSize, params->pointer()));
- delete buffer_meta;
- buffer_meta = NULL;
-
- *buffer = 0;
-
- return StatusFromOMXError(err);
- }
-
- CHECK_EQ(header->pAppPrivate, buffer_meta);
-
- *buffer = makeBufferID(header);
-
- addActiveBuffer(portIndex, *buffer);
-
- sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
- if (bufferSource != NULL && portIndex == kPortIndexInput) {
- bufferSource->addCodecBuffer(header);
- }
-
- CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p :> %u@%p",
- params->size(), params->pointer(), allottedSize, header->pBuffer));
-
- return OK;
-}
-
status_t OMXNodeInstance::freeBuffer(
- OMX_U32 portIndex, OMX::buffer_id buffer) {
+ OMX_U32 portIndex, IOMX::buffer_id buffer) {
Mutex::Autolock autoLock(mLock);
CLOG_BUFFER(freeBuffer, "%s:%u %#x", portString(portIndex), portIndex, buffer);
@@ -1343,7 +1538,8 @@
return StatusFromOMXError(err);
}
-status_t OMXNodeInstance::fillBuffer(OMX::buffer_id buffer, int fenceFd) {
+status_t OMXNodeInstance::fillBuffer(
+ IOMX::buffer_id buffer, const OMXBuffer &omxBuffer, int fenceFd) {
Mutex::Autolock autoLock(mLock);
OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexOutput);
@@ -1351,6 +1547,20 @@
ALOGE("b/25884056");
return BAD_VALUE;
}
+
+ if (omxBuffer.mBufferType == OMXBuffer::kBufferTypeANWBuffer) {
+ status_t err = updateGraphicBufferInMeta_l(
+ kPortIndexOutput, omxBuffer.mGraphicBuffer, buffer, header);
+
+ if (err != OK) {
+ CLOG_ERROR(fillBuffer, err, FULL_BUFFER(
+ (intptr_t)header->pBuffer, header, fenceFd));
+ return err;
+ }
+ } else if (omxBuffer.mBufferType != OMXBuffer::kBufferTypePreset) {
+ return BAD_VALUE;
+ }
+
header->nFilledLen = 0;
header->nOffset = 0;
header->nFlags = 0;
@@ -1378,13 +1588,38 @@
}
status_t OMXNodeInstance::emptyBuffer(
- OMX::buffer_id buffer,
- OMX_U32 rangeOffset, OMX_U32 rangeLength,
+ buffer_id buffer, const OMXBuffer &omxBuffer,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
Mutex::Autolock autoLock(mLock);
+ switch (omxBuffer.mBufferType) {
+ case OMXBuffer::kBufferTypePreset:
+ return emptyBuffer_l(
+ buffer, omxBuffer.mRangeOffset, omxBuffer.mRangeLength,
+ flags, timestamp, fenceFd);
+
+ case OMXBuffer::kBufferTypeANWBuffer:
+ return emptyGraphicBuffer_l(
+ buffer, omxBuffer.mGraphicBuffer, flags, timestamp, fenceFd);
+
+ case OMXBuffer::kBufferTypeNativeHandle:
+ return emptyNativeHandleBuffer_l(
+ buffer, omxBuffer.mNativeHandle, flags, timestamp, fenceFd);
+
+ default:
+ break;
+ }
+
+ return BAD_VALUE;
+}
+
+status_t OMXNodeInstance::emptyBuffer_l(
+ IOMX::buffer_id buffer,
+ OMX_U32 rangeOffset, OMX_U32 rangeLength,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+
// no emptybuffer if using input surface
- if (getGraphicBufferSource() != NULL) {
+ if (getBufferSource() != NULL) {
android_errorWriteLog(0x534e4554, "29422020");
return INVALID_OPERATION;
}
@@ -1530,25 +1765,25 @@
}
// like emptyBuffer, but the data is already in header->pBuffer
-status_t OMXNodeInstance::emptyGraphicBuffer(
- OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &graphicBuffer,
+status_t OMXNodeInstance::emptyGraphicBuffer_l(
+ IOMX::buffer_id buffer, const sp<GraphicBuffer> &graphicBuffer,
OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
if (header == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
}
- Mutex::Autolock autoLock(mLock);
- OMX::buffer_id buffer = findBufferID(header);
status_t err = updateGraphicBufferInMeta_l(
- kPortIndexInput, graphicBuffer, buffer, header,
- true /* updateCodecBuffer */);
+ kPortIndexInput, graphicBuffer, buffer, header);
if (err != OK) {
CLOG_ERROR(emptyGraphicBuffer, err, FULL_BUFFER(
(intptr_t)header->pBuffer, header, fenceFd));
return err;
}
+ int64_t codecTimeUs = getCodecTimestamp(timestamp);
+
header->nOffset = 0;
if (graphicBuffer == NULL) {
header->nFilledLen = 0;
@@ -1557,9 +1792,100 @@
} else {
header->nFilledLen = sizeof(VideoNativeMetadata);
}
+ return emptyBuffer_l(header, flags, codecTimeUs, (intptr_t)header->pBuffer, fenceFd);
+}
+
+status_t OMXNodeInstance::setMaxPtsGapUs(const void *params, size_t size) {
+ if (params == NULL || size != sizeof(OMX_PARAM_U32TYPE)) {
+ CLOG_ERROR(setMaxPtsGapUs, BAD_VALUE, "invalid params (%p,%zu)", params, size);
+ return BAD_VALUE;
+ }
+
+ mMaxTimestampGapUs = (int64_t)((OMX_PARAM_U32TYPE*)params)->nU32;
+
+ return OK;
+}
+
+int64_t OMXNodeInstance::getCodecTimestamp(OMX_TICKS timestamp) {
+ int64_t originalTimeUs = timestamp;
+
+ if (mMaxTimestampGapUs > 0ll) {
+ /* Cap timestamp gap between adjacent frames to specified max
+ *
+ * In the scenario of cast mirroring, encoding could be suspended for
+ * prolonged periods. Limiting the pts gap to workaround the problem
+ * where encoder's rate control logic produces huge frames after a
+ * long period of suspension.
+ */
+ if (mPrevOriginalTimeUs >= 0ll) {
+ int64_t timestampGapUs = originalTimeUs - mPrevOriginalTimeUs;
+ timestamp = (timestampGapUs < mMaxTimestampGapUs ?
+ timestampGapUs : mMaxTimestampGapUs) + mPrevModifiedTimeUs;
+ }
+ ALOGV("IN timestamp: %lld -> %lld",
+ static_cast<long long>(originalTimeUs),
+ static_cast<long long>(timestamp));
+ }
+
+ mPrevOriginalTimeUs = originalTimeUs;
+ mPrevModifiedTimeUs = timestamp;
+
+ if (mMaxTimestampGapUs > 0ll && !mRestorePtsFailed) {
+ mOriginalTimeUs.add(timestamp, originalTimeUs);
+ }
+
+ return timestamp;
+}
+
+status_t OMXNodeInstance::emptyNativeHandleBuffer_l(
+ IOMX::buffer_id buffer, const sp<NativeHandle> &nativeHandle,
+ OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
+ if (header == NULL) {
+ ALOGE("b/25884056");
+ return BAD_VALUE;
+ }
+
+ status_t err = updateNativeHandleInMeta_l(
+ kPortIndexInput, nativeHandle, buffer, header);
+ if (err != OK) {
+ CLOG_ERROR(emptyNativeHandleBuffer_l, err, FULL_BUFFER(
+ (intptr_t)header->pBuffer, header, fenceFd));
+ return err;
+ }
+
+ header->nOffset = 0;
+ header->nFilledLen = (nativeHandle == NULL) ? 0 : sizeof(VideoNativeMetadata);
+
return emptyBuffer_l(header, flags, timestamp, (intptr_t)header->pBuffer, fenceFd);
}
+void OMXNodeInstance::codecBufferFilled(omx_message &msg) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mMaxTimestampGapUs <= 0ll || mRestorePtsFailed) {
+ return;
+ }
+
+ OMX_U32 &flags = msg.u.extended_buffer_data.flags;
+ OMX_TICKS ×tamp = msg.u.extended_buffer_data.timestamp;
+
+ if (!(flags & OMX_BUFFERFLAG_CODECCONFIG)) {
+ ssize_t index = mOriginalTimeUs.indexOfKey(timestamp);
+ if (index >= 0) {
+ ALOGV("OUT timestamp: %lld -> %lld",
+ static_cast<long long>(timestamp),
+ static_cast<long long>(mOriginalTimeUs[index]));
+ timestamp = mOriginalTimeUs[index];
+ mOriginalTimeUs.removeItemsAt(index);
+ } else {
+ // giving up the effort as encoder doesn't appear to preserve pts
+ ALOGW("giving up limiting timestamp gap (pts = %lld)", timestamp);
+ mRestorePtsFailed = true;
+ }
+ }
+}
+
status_t OMXNodeInstance::getExtensionIndex(
const char *parameterName, OMX_INDEXTYPE *index) {
Mutex::Autolock autoLock(mLock);
@@ -1570,133 +1896,22 @@
return StatusFromOMXError(err);
}
-inline static const char *asString(IOMX::InternalOptionType i, const char *def = "??") {
- switch (i) {
- case IOMX::INTERNAL_OPTION_SUSPEND: return "SUSPEND";
- case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
- return "REPEAT_PREVIOUS_FRAME_DELAY";
- case IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP: return "MAX_TIMESTAMP_GAP";
- case IOMX::INTERNAL_OPTION_MAX_FPS: return "MAX_FPS";
- case IOMX::INTERNAL_OPTION_START_TIME: return "START_TIME";
- case IOMX::INTERNAL_OPTION_TIME_LAPSE: return "TIME_LAPSE";
- case IOMX::INTERNAL_OPTION_TIME_OFFSET: return "TIME_OFFSET";
- default: return def;
- }
+status_t OMXNodeInstance::dispatchMessage(const omx_message &msg) {
+ mDispatcher->post(msg, true /*realTime*/);
+ return OK;
}
-template<typename T>
-static bool getInternalOption(
- const void *data, size_t size, T *out) {
- if (size != sizeof(T)) {
- return false;
+status_t OMXNodeInstance::setQuirks(OMX_U32 quirks) {
+ if (quirks & ~kQuirksMask) {
+ return BAD_VALUE;
}
- *out = *(T*)data;
- return true;
-}
-status_t OMXNodeInstance::setInternalOption(
- OMX_U32 portIndex,
- IOMX::InternalOptionType type,
- const void *data,
- size_t size) {
- CLOG_CONFIG(setInternalOption, "%s(%d): %s:%u %zu@%p",
- asString(type), type, portString(portIndex), portIndex, size, data);
- switch (type) {
- case IOMX::INTERNAL_OPTION_SUSPEND:
- case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
- case IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP:
- case IOMX::INTERNAL_OPTION_MAX_FPS:
- case IOMX::INTERNAL_OPTION_START_TIME:
- case IOMX::INTERNAL_OPTION_TIME_LAPSE:
- case IOMX::INTERNAL_OPTION_TIME_OFFSET:
- case IOMX::INTERNAL_OPTION_COLOR_ASPECTS:
- {
- const sp<GraphicBufferSource> &bufferSource =
- getGraphicBufferSource();
+ mQuirks = quirks;
- if (bufferSource == NULL || portIndex != kPortIndexInput) {
- CLOGW("setInternalOption is only for Surface input");
- return ERROR_UNSUPPORTED;
- }
-
- if (type == IOMX::INTERNAL_OPTION_SUSPEND) {
- bool suspend;
- if (!getInternalOption(data, size, &suspend)) {
- return INVALID_OPERATION;
- }
-
- CLOG_CONFIG(setInternalOption, "suspend=%d", suspend);
- bufferSource->suspend(suspend);
- } else if (type == IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY) {
- int64_t delayUs;
- if (!getInternalOption(data, size, &delayUs)) {
- return INVALID_OPERATION;
- }
-
- CLOG_CONFIG(setInternalOption, "delayUs=%lld", (long long)delayUs);
- return bufferSource->setRepeatPreviousFrameDelayUs(delayUs);
- } else if (type == IOMX::INTERNAL_OPTION_TIME_OFFSET) {
- int64_t timeOffsetUs;
- if (!getInternalOption(data, size, &timeOffsetUs)) {
- return INVALID_OPERATION;
- }
- CLOG_CONFIG(setInternalOption, "bufferOffsetUs=%lld", (long long)timeOffsetUs);
- return bufferSource->setInputBufferTimeOffset(timeOffsetUs);
- } else if (type == IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP) {
- int64_t maxGapUs;
- if (!getInternalOption(data, size, &maxGapUs)) {
- return INVALID_OPERATION;
- }
-
- CLOG_CONFIG(setInternalOption, "gapUs=%lld", (long long)maxGapUs);
- return bufferSource->setMaxTimestampGapUs(maxGapUs);
- } else if (type == IOMX::INTERNAL_OPTION_MAX_FPS) {
- float maxFps;
- if (!getInternalOption(data, size, &maxFps)) {
- return INVALID_OPERATION;
- }
-
- CLOG_CONFIG(setInternalOption, "maxFps=%f", maxFps);
- return bufferSource->setMaxFps(maxFps);
- } else if (type == IOMX::INTERNAL_OPTION_START_TIME) {
- int64_t skipFramesBeforeUs;
- if (!getInternalOption(data, size, &skipFramesBeforeUs)) {
- return INVALID_OPERATION;
- }
-
- CLOG_CONFIG(setInternalOption, "beforeUs=%lld", (long long)skipFramesBeforeUs);
- bufferSource->setSkipFramesBeforeUs(skipFramesBeforeUs);
- } else if (type == IOMX::INTERNAL_OPTION_TIME_LAPSE) {
- GraphicBufferSource::TimeLapseConfig config;
- if (!getInternalOption(data, size, &config)) {
- return INVALID_OPERATION;
- }
-
- CLOG_CONFIG(setInternalOption, "perFrameUs=%lld perCaptureUs=%lld",
- (long long)config.mTimePerFrameUs, (long long)config.mTimePerCaptureUs);
-
- return bufferSource->setTimeLapseConfig(config);
- } else if (type == IOMX::INTERNAL_OPTION_COLOR_ASPECTS) {
- ColorAspects aspects;
- if (!getInternalOption(data, size, &aspects)) {
- return INVALID_OPERATION;
- }
-
- CLOG_CONFIG(setInternalOption, "setting color aspects");
- bufferSource->setColorAspects(aspects);
- }
-
- return OK;
- }
-
- default:
- return ERROR_UNSUPPORTED;
- }
+ return OK;
}
bool OMXNodeInstance::handleMessage(omx_message &msg) {
- const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
-
if (msg.type == omx_message::FILL_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
findBufferHeader(msg.u.extended_buffer_data.buffer, kPortIndexOutput);
@@ -1726,12 +1941,8 @@
}
buffer_meta->CopyFromOMX(buffer);
- if (bufferSource != NULL) {
- // fix up the buffer info (especially timestamp) if needed
- bufferSource->codecBufferFilled(buffer);
-
- msg.u.extended_buffer_data.timestamp = buffer->nTimeStamp;
- }
+ // fix up the buffer info (especially timestamp) if needed
+ codecBufferFilled(msg);
} else if (msg.type == omx_message::EMPTY_BUFFER_DONE) {
OMX_BUFFERHEADERTYPE *buffer =
findBufferHeader(msg.u.buffer_data.buffer, kPortIndexInput);
@@ -1747,20 +1958,100 @@
EBD, WITH_STATS(EMPTY_BUFFER(msg.u.buffer_data.buffer, buffer, msg.fenceFd)));
}
+ const sp<IOMXBufferSource> bufferSource(getBufferSource());
+
if (bufferSource != NULL) {
- // This is one of the buffers used exclusively by
- // GraphicBufferSource.
+ // This is one of the buffers used exclusively by IOMXBufferSource.
// Don't dispatch a message back to ACodec, since it doesn't
// know that anyone asked to have the buffer emptied and will
// be very confused.
- bufferSource->codecBufferEmptied(buffer, msg.fenceFd);
+ bufferSource->onInputBufferEmptied(
+ msg.u.buffer_data.buffer, OMXFenceParcelable(msg.fenceFd));
return true;
}
+ } else if (msg.type == omx_message::EVENT &&
+ msg.u.event_data.event == OMX_EventDataSpaceChanged) {
+ handleDataSpaceChanged(msg);
}
return false;
}
+bool OMXNodeInstance::handleDataSpaceChanged(omx_message &msg) {
+ android_dataspace dataSpace = (android_dataspace) msg.u.event_data.data1;
+ android_dataspace origDataSpace = dataSpace;
+
+ if (!ColorUtils::convertDataSpaceToV0(dataSpace)) {
+ // Do not process the data space change, don't notify client either
+ return true;
+ }
+
+ android_pixel_format pixelFormat = (android_pixel_format)msg.u.event_data.data3;
+
+ ColorAspects requestedAspects = ColorUtils::unpackToColorAspects(msg.u.event_data.data2);
+ ColorAspects aspects = requestedAspects; // initially requested aspects
+
+ // request color aspects to encode
+ OMX_INDEXTYPE index;
+ status_t err = getExtensionIndex(
+ "OMX.google.android.index.describeColorAspects", &index);
+ if (err == OK) {
+ // V0 dataspace
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ params.nDataSpace = origDataSpace;
+ params.nPixelFormat = pixelFormat;
+ params.bDataSpaceChanged = OMX_TRUE;
+ params.sAspects = requestedAspects;
+
+ err = getConfig(index, ¶ms, sizeof(params));
+ if (err == OK) {
+ aspects = params.sAspects;
+ ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ err, asString(err));
+ } else {
+ params.sAspects = aspects;
+ err = OK;
+ }
+ params.bDataSpaceChanged = OMX_FALSE;
+ for (int triesLeft = 2; --triesLeft >= 0; ) {
+ status_t err = setConfig(index, ¶ms, sizeof(params));
+ if (err == OK) {
+ err = getConfig(index, ¶ms, sizeof(params));
+ }
+ if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+ params.sAspects, aspects)) {
+ // if we can't set or get color aspects, still communicate dataspace to client
+ break;
+ }
+
+ ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
+ }
+ }
+
+ ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ err, asString(err));
+
+ // signal client that the dataspace has changed; this will update the output format
+ // TODO: we should tie this to an output buffer somehow, and signal the change
+ // just before the output buffer is returned to the client, but there are many
+ // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
+
+ msg.u.event_data.data1 = (OMX_U32) dataSpace;
+ msg.u.event_data.data2 = (OMX_U32) ColorUtils::packToU32(aspects);
+
+ return false;
+}
+
void OMXNodeInstance::onMessages(std::list<omx_message> &messages) {
for (std::list<omx_message>::iterator it = messages.begin(); it != messages.end(); ) {
if (handleMessage(*it)) {
@@ -1775,15 +2066,11 @@
}
}
-void OMXNodeInstance::onObserverDied(OMXMaster *master) {
+void OMXNodeInstance::onObserverDied() {
ALOGE("!!! Observer died. Quickly, do something, ... anything...");
// Try to force shutdown of the node and hope for the best.
- freeNode(master);
-}
-
-void OMXNodeInstance::onGetHandleFailed() {
- delete this;
+ freeNode();
}
// OMXNodeInstance::OnEvent calls OMX::OnEvent, which then calls here.
@@ -1827,13 +2114,13 @@
CLOGI_(level, onEvent, "%s(%x), %s(%x), %s(%x)",
asString(event), event, arg1String, arg1, arg2String, arg2);
- const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
+ const sp<IOMXBufferSource> bufferSource(getBufferSource());
if (bufferSource != NULL
&& event == OMX_EventCmdComplete
&& arg1 == OMX_CommandStateSet
&& arg2 == OMX_StateExecuting) {
- bufferSource->omxExecuting();
+ bufferSource->onOmxExecuting();
}
// allow configuration if we return to the loaded state
@@ -1860,8 +2147,39 @@
if (instance->mDying) {
return OMX_ErrorNone;
}
- return instance->owner()->OnEvent(
- instance->nodeID(), eEvent, nData1, nData2, pEventData);
+
+ instance->onEvent(eEvent, nData1, nData2);
+
+ // output rendered events are not processed as regular events until they hit the observer
+ if (eEvent == OMX_EventOutputRendered) {
+ if (pEventData == NULL) {
+ return OMX_ErrorBadParameter;
+ }
+
+ // process data from array
+ OMX_VIDEO_RENDEREVENTTYPE *renderData = (OMX_VIDEO_RENDEREVENTTYPE *)pEventData;
+ for (size_t i = 0; i < nData1; ++i) {
+ omx_message msg;
+ msg.type = omx_message::FRAME_RENDERED;
+ msg.fenceFd = -1;
+ msg.u.render_data.timestamp = renderData[i].nMediaTimeUs;
+ msg.u.render_data.nanoTime = renderData[i].nSystemTimeNs;
+
+ instance->mDispatcher->post(msg, false /* realTime */);
+ }
+ return OMX_ErrorNone;
+ }
+
+ omx_message msg;
+ msg.type = omx_message::EVENT;
+ msg.fenceFd = -1;
+ msg.u.event_data.event = eEvent;
+ msg.u.event_data.data1 = nData1;
+ msg.u.event_data.data2 = nData2;
+
+ instance->mDispatcher->post(msg, true /* realTime */);
+
+ return OMX_ErrorNone;
}
// static
@@ -1878,8 +2196,14 @@
return OMX_ErrorNone;
}
int fenceFd = instance->retrieveFenceFromMeta_l(pBuffer, kPortIndexOutput);
- return instance->owner()->OnEmptyBufferDone(instance->nodeID(),
- instance->findBufferID(pBuffer), pBuffer, fenceFd);
+
+ omx_message msg;
+ msg.type = omx_message::EMPTY_BUFFER_DONE;
+ msg.fenceFd = fenceFd;
+ msg.u.buffer_data.buffer = instance->findBufferID(pBuffer);
+ instance->mDispatcher->post(msg);
+
+ return OMX_ErrorNone;
}
// static
@@ -1896,11 +2220,21 @@
return OMX_ErrorNone;
}
int fenceFd = instance->retrieveFenceFromMeta_l(pBuffer, kPortIndexOutput);
- return instance->owner()->OnFillBufferDone(instance->nodeID(),
- instance->findBufferID(pBuffer), pBuffer, fenceFd);
+
+ omx_message msg;
+ msg.type = omx_message::FILL_BUFFER_DONE;
+ msg.fenceFd = fenceFd;
+ msg.u.extended_buffer_data.buffer = instance->findBufferID(pBuffer);
+ msg.u.extended_buffer_data.range_offset = pBuffer->nOffset;
+ msg.u.extended_buffer_data.range_length = pBuffer->nFilledLen;
+ msg.u.extended_buffer_data.flags = pBuffer->nFlags;
+ msg.u.extended_buffer_data.timestamp = pBuffer->nTimeStamp;
+ instance->mDispatcher->post(msg);
+
+ return OMX_ErrorNone;
}
-void OMXNodeInstance::addActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id) {
+void OMXNodeInstance::addActiveBuffer(OMX_U32 portIndex, IOMX::buffer_id id) {
ActiveBuffer active;
active.mPortIndex = portIndex;
active.mID = id;
@@ -1912,7 +2246,7 @@
}
void OMXNodeInstance::removeActiveBuffer(
- OMX_U32 portIndex, OMX::buffer_id id) {
+ OMX_U32 portIndex, IOMX::buffer_id id) {
for (size_t i = 0; i < mActiveBuffers.size(); ++i) {
if (mActiveBuffers[i].mPortIndex == portIndex
&& mActiveBuffers[i].mID == id) {
@@ -1937,17 +2271,17 @@
}
}
-OMX::buffer_id OMXNodeInstance::makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
+IOMX::buffer_id OMXNodeInstance::makeBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
if (bufferHeader == NULL) {
return 0;
}
Mutex::Autolock autoLock(mBufferIDLock);
- OMX::buffer_id buffer;
+ IOMX::buffer_id buffer;
do { // handle the very unlikely case of ID overflow
if (++mBufferIDCount == 0) {
++mBufferIDCount;
}
- buffer = (OMX::buffer_id)mBufferIDCount;
+ buffer = (IOMX::buffer_id)mBufferIDCount;
} while (mBufferIDToBufferHeader.indexOfKey(buffer) >= 0);
mBufferIDToBufferHeader.add(buffer, bufferHeader);
mBufferHeaderToBufferID.add(bufferHeader, buffer);
@@ -1955,7 +2289,7 @@
}
OMX_BUFFERHEADERTYPE *OMXNodeInstance::findBufferHeader(
- OMX::buffer_id buffer, OMX_U32 portIndex) {
+ IOMX::buffer_id buffer, OMX_U32 portIndex) {
if (buffer == 0) {
return NULL;
}
@@ -1976,7 +2310,7 @@
return header;
}
-OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
+IOMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
if (bufferHeader == NULL) {
return 0;
}
@@ -1989,7 +2323,7 @@
return mBufferHeaderToBufferID.valueAt(index);
}
-void OMXNodeInstance::invalidateBufferID(OMX::buffer_id buffer) {
+void OMXNodeInstance::invalidateBufferID(IOMX::buffer_id buffer) {
if (buffer == 0) {
return;
}
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
index c5879b8..ee6d1d5 100644
--- a/media/libstagefright/omx/OMXUtils.cpp
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -32,9 +32,13 @@
switch (err) {
case OMX_ErrorNone:
return OK;
+ case OMX_ErrorNoMore:
+ return NOT_ENOUGH_DATA;
case OMX_ErrorUnsupportedSetting:
case OMX_ErrorUnsupportedIndex:
return ERROR_UNSUPPORTED; // this is a media specific error
+ case OMX_ErrorBadParameter:
+ return BAD_VALUE;
case OMX_ErrorInsufficientResources:
return NO_MEMORY;
case OMX_ErrorInvalidComponentName:
@@ -178,7 +182,7 @@
: kMimeToRole[i].decoderRole;
}
-status_t SetComponentRole(const sp<IOMX> &omx, IOMX::node_id node, const char *role) {
+status_t SetComponentRole(const sp<IOMXNode> &omxNode, const char *role) {
OMX_PARAM_COMPONENTROLETYPE roleParams;
InitOMXParams(&roleParams);
@@ -187,8 +191,8 @@
roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
- return omx->setParameter(
- node, OMX_IndexParamStandardComponentRole,
+ return omxNode->setParameter(
+ OMX_IndexParamStandardComponentRole,
&roleParams, sizeof(roleParams));
}
@@ -305,24 +309,24 @@
}
bool DescribeColorFormat(
- const sp<IOMX> &omx, IOMX::node_id node,
+ const sp<IOMXNode> &omxNode,
DescribeColorFormat2Params &describeParams)
{
OMX_INDEXTYPE describeColorFormatIndex;
- if (omx->getExtensionIndex(
- node, "OMX.google.android.index.describeColorFormat",
+ if (omxNode->getExtensionIndex(
+ "OMX.google.android.index.describeColorFormat",
&describeColorFormatIndex) == OK) {
DescribeColorFormatParams describeParamsV1(describeParams);
- if (omx->getParameter(
- node, describeColorFormatIndex,
+ if (omxNode->getParameter(
+ describeColorFormatIndex,
&describeParamsV1, sizeof(describeParamsV1)) == OK) {
describeParams.initFromV1(describeParamsV1);
return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
}
- } else if (omx->getExtensionIndex(
- node, "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
- && omx->getParameter(
- node, describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
+ } else if (omxNode->getExtensionIndex(
+ "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
+ && omxNode->getParameter(
+ describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
}
@@ -331,7 +335,7 @@
// static
bool IsFlexibleColorFormat(
- const sp<IOMX> &omx, IOMX::node_id node,
+ const sp<IOMXNode> &omxNode,
uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
DescribeColorFormat2Params describeParams;
InitOMXParams(&describeParams);
@@ -345,7 +349,7 @@
CHECK(flexibleEquivalent != NULL);
- if (!DescribeColorFormat(omx, node, describeParams)) {
+ if (!DescribeColorFormat(omxNode, describeParams)) {
return false;
}
diff --git a/media/libstagefright/omx/OMXUtils.h b/media/libstagefright/omx/OMXUtils.h
index 315f118..401d64b 100644
--- a/media/libstagefright/omx/OMXUtils.h
+++ b/media/libstagefright/omx/OMXUtils.h
@@ -39,18 +39,42 @@
status_t StatusFromOMXError(OMX_ERRORTYPE err);
const char *GetComponentRole(bool isEncoder, const char *mime);
-status_t SetComponentRole(const sp<IOMX> &omx, IOMX::node_id node, const char *role);
+status_t SetComponentRole(const sp<IOMXNode> &omxNode, const char *role);
struct DescribeColorFormat2Params;
bool IsFlexibleColorFormat(
- const sp<IOMX> &omx, IOMX::node_id node,
- uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent);
+ const sp<IOMXNode> &omxNode, uint32_t colorFormat,
+ bool usingNativeBuffers, OMX_U32 *flexibleEquivalent);
bool DescribeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
bool DescribeColorFormat(
- const sp<IOMX> &omx, IOMX::node_id node,
+ const sp<IOMXNode> &omxNode,
DescribeColorFormat2Params &describeParams);
+inline static const char *asString(MetadataBufferType i, const char *def = "??") {
+ using namespace android;
+ switch (i) {
+ case kMetadataBufferTypeCameraSource: return "CameraSource";
+ case kMetadataBufferTypeGrallocSource: return "GrallocSource";
+ case kMetadataBufferTypeANWBuffer: return "ANWBuffer";
+ case kMetadataBufferTypeNativeHandleSource: return "NativeHandleSource";
+ case kMetadataBufferTypeInvalid: return "Invalid";
+ default: return def;
+ }
+}
+
+inline static const char *asString(IOMX::PortMode mode, const char *def = "??") {
+ using namespace android;
+ switch (mode) {
+ case IOMX::kPortModePresetByteBuffer: return "PresetByteBuffer";
+ case IOMX::kPortModePresetANWBuffer: return "PresetANWBuffer";
+ case IOMX::kPortModePresetSecureBuffer: return "PresetSecureBuffer";
+ case IOMX::kPortModeDynamicANWBuffer: return "DynamicANWBuffer";
+ case IOMX::kPortModeDynamicNativeHandle:return "DynamicNativeHandle";
+ default: return def;
+ }
+}
+
} // namespace android
#endif
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 7c975f7..761b425 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -77,19 +77,34 @@
switch (index) {
case OMX_IndexParamPortDefinition:
{
- portIndex = ((OMX_PARAM_PORTDEFINITIONTYPE *)params)->nPortIndex;
+ const OMX_PARAM_PORTDEFINITIONTYPE *portDefs =
+ (const OMX_PARAM_PORTDEFINITIONTYPE *) params;
+ if (!isValidOMXParam(portDefs)) {
+ return false;
+ }
+ portIndex = portDefs->nPortIndex;
break;
}
case OMX_IndexParamAudioPcm:
{
- portIndex = ((OMX_AUDIO_PARAM_PCMMODETYPE *)params)->nPortIndex;
+ const OMX_AUDIO_PARAM_PCMMODETYPE *pcmMode =
+ (const OMX_AUDIO_PARAM_PCMMODETYPE *) params;
+ if (!isValidOMXParam(pcmMode)) {
+ return false;
+ }
+ portIndex = pcmMode->nPortIndex;
break;
}
case OMX_IndexParamAudioAac:
{
- portIndex = ((OMX_AUDIO_PARAM_AACPROFILETYPE *)params)->nPortIndex;
+ const OMX_AUDIO_PARAM_AACPROFILETYPE *aacMode =
+ (const OMX_AUDIO_PARAM_AACPROFILETYPE *) params;
+ if (!isValidOMXParam(aacMode)) {
+ return false;
+ }
+ portIndex = aacMode->nPortIndex;
break;
}
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
old mode 100755
new mode 100644
index 0f9c00c..a773ca2
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -56,6 +56,7 @@
{ "OMX.google.vp8.decoder", "vpxdec", "video_decoder.vp8" },
{ "OMX.google.vp9.decoder", "vpxdec", "video_decoder.vp9" },
{ "OMX.google.vp8.encoder", "vpxenc", "video_encoder.vp8" },
+ { "OMX.google.vp9.encoder", "vpxenc", "video_encoder.vp9" },
{ "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" },
{ "OMX.google.flac.encoder", "flacenc", "audio_encoder.flac" },
{ "OMX.google.gsm.decoder", "gsmdec", "audio_decoder.gsm" },
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index 409cef7..920dd18 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -62,6 +62,7 @@
mCropWidth(width),
mCropHeight(height),
mOutputPortSettingsChange(NONE),
+ mUpdateColorAspects(false),
mMinInputBufferSize(384), // arbitrary, using one uncompressed macroblock
mMinCompressionRatio(1), // max input size is normally the output size
mComponentRole(componentRole),
@@ -430,10 +431,6 @@
return OMX_ErrorBadPortIndex;
}
- if (formatParams->nIndex != 0) {
- return OMX_ErrorNoMore;
- }
-
if (formatParams->nPortIndex == kInputPortIndex) {
if (formatParams->eCompressionFormat != mCodingType
|| formatParams->eColorFormat != OMX_COLOR_FormatUnused) {
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index 0f9c118..2aa88af 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -30,8 +30,11 @@
#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/MediaDefs.h>
-#include <ui/GraphicBuffer.h>
+#include <ui/Fence.h>
#include <ui/GraphicBufferMapper.h>
+#include <ui/Rect.h>
+
+#include <hardware/gralloc.h>
#include <OMX_IndexExt.h>
@@ -480,18 +483,25 @@
unsigned green = src[greenOffset];
unsigned blue = src[blueOffset];
- // using ITU-R BT.601 conversion matrix
+ // Using ITU-R BT.601-7 (03/2011)
+ // 2.5.1: Ey' = ( 0.299*R + 0.587*G + 0.114*B)
+ // 2.5.2: ECr' = ( 0.701*R - 0.587*G - 0.114*B) / 1.402
+ // ECb' = (-0.299*R - 0.587*G + 0.886*B) / 1.772
+ // 2.5.3: Y = 219 * Ey' + 16
+ // Cr = 224 * ECr' + 128
+ // Cb = 224 * ECb' + 128
+
unsigned luma =
- ((red * 66 + green * 129 + blue * 25) >> 8) + 16;
+ ((red * 65 + green * 129 + blue * 25 + 128) >> 8) + 16;
dstY[x] = luma;
if ((x & 1) == 0 && (y & 1) == 0) {
unsigned U =
- ((-red * 38 - green * 74 + blue * 112) >> 8) + 128;
+ ((-red * 38 - green * 74 + blue * 112 + 128) >> 8) + 128;
unsigned V =
- ((red * 112 - green * 94 - blue * 18) >> 8) + 128;
+ ((red * 112 - green * 94 - blue * 18 + 128) >> 8) + 128;
dstU[x >> 1] = U;
dstV[x >> 1] = V;
@@ -541,6 +551,7 @@
srcVStride = buffer->height;
// convert stride from pixels to bytes
if (format != HAL_PIXEL_FORMAT_YV12 &&
+ format != HAL_PIXEL_FORMAT_YCrCb_420_SP &&
format != HAL_PIXEL_FORMAT_YCbCr_420_888) {
// TODO do we need to support other formats?
srcStride *= 4;
@@ -603,26 +614,24 @@
switch (format) {
case HAL_PIXEL_FORMAT_YV12: // YCrCb / YVU planar
- // convert to flex YUV
ycbcr.y = bits;
ycbcr.cr = (uint8_t *)bits + srcStride * srcVStride;
ycbcr.cb = (uint8_t *)ycbcr.cr + (srcStride >> 1) * (srcVStride >> 1);
ycbcr.chroma_step = 1;
- ycbcr.cstride = srcVStride >> 1;
- ycbcr.ystride = srcVStride;
+ ycbcr.cstride = srcStride >> 1;
+ ycbcr.ystride = srcStride;
ConvertFlexYUVToPlanar(dst, dstStride, dstVStride, &ycbcr, width, height);
break;
case HAL_PIXEL_FORMAT_YCrCb_420_SP: // YCrCb / YVU semiplanar, NV21
- // convert to flex YUV
ycbcr.y = bits;
ycbcr.cr = (uint8_t *)bits + srcStride * srcVStride;
ycbcr.cb = (uint8_t *)ycbcr.cr + 1;
ycbcr.chroma_step = 2;
- ycbcr.cstride = srcVStride;
- ycbcr.ystride = srcVStride;
+ ycbcr.cstride = srcStride;
+ ycbcr.ystride = srcStride;
ConvertFlexYUVToPlanar(dst, dstStride, dstVStride, &ycbcr, width, height);
break;
- case HAL_PIXEL_FORMAT_YCbCr_420_888:
+ case HAL_PIXEL_FORMAT_YCbCr_420_888: // YCbCr / YUV planar
ConvertFlexYUVToPlanar(dst, dstStride, dstVStride, &ycbcr, width, height);
break;
case HAL_PIXEL_FORMAT_RGBX_8888:
diff --git a/media/libstagefright/omx/tests/Android.bp b/media/libstagefright/omx/tests/Android.bp
index e480737..46428e3 100644
--- a/media/libstagefright/omx/tests/Android.bp
+++ b/media/libstagefright/omx/tests/Android.bp
@@ -11,6 +11,12 @@
"libutils",
"liblog",
"libstagefright_foundation",
+ "libcutils",
+ "libhidlbase",
+ "libhidlmemory",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "android.hardware.media.omx@1.0",
],
include_dirs: [
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 50bb0de..fcc44d8 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -38,13 +38,35 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/SimpleDecodingSource.h>
+#include <media/OMXBuffer.h>
+#include <android/hardware/media/omx/1.0/IOmx.h>
+#include <media/omx/1.0/WOmx.h>
#define DEFAULT_TIMEOUT 500000
namespace android {
+/////////////////////////////////////////////////////////////////////
+
+struct Harness::CodecObserver : public BnOMXObserver {
+ CodecObserver(const sp<Harness> &harness, int32_t gen)
+ : mHarness(harness), mGeneration(gen) {}
+
+ void onMessages(const std::list<omx_message> &messages) override;
+
+private:
+ sp<Harness> mHarness;
+ int32_t mGeneration;
+};
+
+void Harness::CodecObserver::onMessages(const std::list<omx_message> &messages) {
+ mHarness->handleMessages(mGeneration, messages);
+}
+
+/////////////////////////////////////////////////////////////////////
+
Harness::Harness()
- : mInitCheck(NO_INIT) {
+ : mInitCheck(NO_INIT), mUseTreble(false) {
mInitCheck = initOMX();
}
@@ -56,26 +78,36 @@
}
status_t Harness::initOMX() {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("media.codec"));
- sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
- mOMX = service->getOMX();
+ if (property_get_bool("persist.media.treble_omx", true)) {
+ using namespace ::android::hardware::media::omx::V1_0;
+ sp<IOmx> tOmx = IOmx::getService();
+ if (tOmx == nullptr) {
+ return NO_INIT;
+ }
+ mOMX = new utils::LWOmx(tOmx);
+ mUseTreble = true;
+ } else {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.codec"));
+ sp<IMediaCodecService> service = interface_cast<IMediaCodecService>(binder);
+ mOMX = service->getOMX();
+ mUseTreble = false;
+ }
return mOMX != 0 ? OK : NO_INIT;
}
-void Harness::onMessages(const std::list<omx_message> &messages) {
+void Harness::handleMessages(int32_t gen, const std::list<omx_message> &messages) {
Mutex::Autolock autoLock(mLock);
for (std::list<omx_message>::const_iterator it = messages.cbegin(); it != messages.cend(); ) {
mMessageQueue.push_back(*it++);
+ mLastMsgGeneration = gen;
}
mMessageAddedCondition.signal();
}
-status_t Harness::dequeueMessageForNode(
- IOMX::node_id node, omx_message *msg, int64_t timeoutUs) {
- return dequeueMessageForNodeIgnoringBuffers(
- node, NULL, NULL, msg, timeoutUs);
+status_t Harness::dequeueMessageForNode(omx_message *msg, int64_t timeoutUs) {
+ return dequeueMessageForNodeIgnoringBuffers(NULL, NULL, msg, timeoutUs);
}
// static
@@ -120,7 +152,6 @@
}
status_t Harness::dequeueMessageForNodeIgnoringBuffers(
- IOMX::node_id node,
Vector<Buffer> *inputBuffers,
Vector<Buffer> *outputBuffers,
omx_message *msg, int64_t timeoutUs) {
@@ -128,21 +159,22 @@
for (;;) {
Mutex::Autolock autoLock(mLock);
+ // Messages are queued in batches, if the last batch queued is
+ // from a node that already expired, discard those messages.
+ if (mLastMsgGeneration < mCurGeneration) {
+ mMessageQueue.clear();
+ }
List<omx_message>::iterator it = mMessageQueue.begin();
while (it != mMessageQueue.end()) {
- if ((*it).node == node) {
- if (handleBufferMessage(*it, inputBuffers, outputBuffers)) {
- it = mMessageQueue.erase(it);
- continue;
- }
-
- *msg = *it;
- mMessageQueue.erase(it);
-
- return OK;
+ if (handleBufferMessage(*it, inputBuffers, outputBuffers)) {
+ it = mMessageQueue.erase(it);
+ continue;
}
- ++it;
+ *msg = *it;
+ mMessageQueue.erase(it);
+
+ return OK;
}
status_t err = (timeoutUs < 0)
@@ -158,16 +190,15 @@
}
status_t Harness::getPortDefinition(
- IOMX::node_id node, OMX_U32 portIndex,
- OMX_PARAM_PORTDEFINITIONTYPE *def) {
+ OMX_U32 portIndex, OMX_PARAM_PORTDEFINITIONTYPE *def) {
def->nSize = sizeof(*def);
def->nVersion.s.nVersionMajor = 1;
def->nVersion.s.nVersionMinor = 0;
def->nVersion.s.nRevision = 0;
def->nVersion.s.nStep = 0;
def->nPortIndex = portIndex;
- return mOMX->getParameter(
- node, OMX_IndexParamPortDefinition, def, sizeof(*def));
+ return mOMXNode->getParameter(
+ OMX_IndexParamPortDefinition, def, sizeof(*def));
}
#define EXPECT(condition, info) \
@@ -179,24 +210,37 @@
EXPECT((err) == OK, info " failed")
status_t Harness::allocatePortBuffers(
- const sp<MemoryDealer> &dealer,
- IOMX::node_id node, OMX_U32 portIndex,
- Vector<Buffer> *buffers) {
+ OMX_U32 portIndex, Vector<Buffer> *buffers) {
buffers->clear();
OMX_PARAM_PORTDEFINITIONTYPE def;
- status_t err = getPortDefinition(node, portIndex, &def);
+ status_t err = getPortDefinition(portIndex, &def);
EXPECT_SUCCESS(err, "getPortDefinition");
for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
Buffer buffer;
- buffer.mMemory = dealer->allocate(def.nBufferSize);
buffer.mFlags = 0;
- CHECK(buffer.mMemory != NULL);
+ if (mUseTreble) {
+ bool success;
+ auto transStatus = mAllocator->allocate(def.nBufferSize,
+ [&success, &buffer](
+ bool s,
+ hidl_memory const& m) {
+ success = s;
+ buffer.mHidlMemory = m;
+ });
+ EXPECT(transStatus.isOk(),
+ "Cannot call allocator");
+ EXPECT(success,
+ "Cannot allocate memory");
+ err = mOMXNode->useBuffer(portIndex, buffer.mHidlMemory, &buffer.mID);
+ } else {
+ buffer.mMemory = mDealer->allocate(def.nBufferSize);
+ CHECK(buffer.mMemory != NULL);
+ err = mOMXNode->useBuffer(portIndex, buffer.mMemory, &buffer.mID);
+ }
- err = mOMX->allocateBufferWithBackup(
- node, portIndex, buffer.mMemory, &buffer.mID, buffer.mMemory->size());
- EXPECT_SUCCESS(err, "allocateBuffer");
+ EXPECT_SUCCESS(err, "useBuffer");
buffers->push(buffer);
}
@@ -204,7 +248,7 @@
return OK;
}
-status_t Harness::setRole(IOMX::node_id node, const char *role) {
+status_t Harness::setRole(const char *role) {
OMX_PARAM_COMPONENTROLETYPE params;
params.nSize = sizeof(params);
params.nVersion.s.nVersionMajor = 1;
@@ -214,31 +258,31 @@
strncpy((char *)params.cRole, role, OMX_MAX_STRINGNAME_SIZE - 1);
params.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
- return mOMX->setParameter(
- node, OMX_IndexParamStandardComponentRole,
+ return mOMXNode->setParameter(
+ OMX_IndexParamStandardComponentRole,
¶ms, sizeof(params));
}
struct NodeReaper {
- NodeReaper(const sp<Harness> &harness, IOMX::node_id node)
+ NodeReaper(const sp<Harness> &harness, const sp<IOMXNode> &omxNode)
: mHarness(harness),
- mNode(node) {
+ mOMXNode(omxNode) {
}
~NodeReaper() {
- if (mNode != 0) {
- mHarness->mOMX->freeNode(mNode);
- mNode = 0;
+ if (mOMXNode != 0) {
+ mOMXNode->freeNode();
+ mOMXNode = NULL;
}
}
void disarm() {
- mNode = 0;
+ mOMXNode = NULL;
}
private:
sp<Harness> mHarness;
- IOMX::node_id mNode;
+ sp<IOMXNode> mOMXNode;
NodeReaper(const NodeReaper &);
NodeReaper &operator=(const NodeReaper &);
@@ -263,24 +307,30 @@
return OK;
}
- sp<MemoryDealer> dealer = new MemoryDealer(16 * 1024 * 1024, "OMXHarness");
- IOMX::node_id node;
+ if (mUseTreble) {
+ mAllocator = IAllocator::getService("ashmem");
+ EXPECT(mAllocator != nullptr,
+ "Cannot obtain hidl AshmemAllocator");
+ } else {
+ mDealer = new MemoryDealer(16 * 1024 * 1024, "OMXHarness");
+ }
- status_t err =
- mOMX->allocateNode(componentName, this, NULL, &node);
+ sp<CodecObserver> observer = new CodecObserver(this, ++mCurGeneration);
+
+ status_t err = mOMX->allocateNode(componentName, observer, &mOMXNode);
EXPECT_SUCCESS(err, "allocateNode");
- NodeReaper reaper(this, node);
+ NodeReaper reaper(this, mOMXNode);
- err = setRole(node, componentRole);
+ err = setRole(componentRole);
EXPECT_SUCCESS(err, "setRole");
// Initiate transition Loaded->Idle
- err = mOMX->sendCommand(node, OMX_CommandStateSet, OMX_StateIdle);
+ err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateIdle);
EXPECT_SUCCESS(err, "sendCommand(go-to-Idle)");
omx_message msg;
- err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+ err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
// Make sure node doesn't just transition to idle before we are done
// allocating all input and output buffers.
EXPECT(err == TIMED_OUT,
@@ -289,17 +339,17 @@
// Now allocate buffers.
Vector<Buffer> inputBuffers;
- err = allocatePortBuffers(dealer, node, 0, &inputBuffers);
+ err = allocatePortBuffers(0, &inputBuffers);
EXPECT_SUCCESS(err, "allocatePortBuffers(input)");
- err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+ err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
CHECK_EQ(err, (status_t)TIMED_OUT);
Vector<Buffer> outputBuffers;
- err = allocatePortBuffers(dealer, node, 1, &outputBuffers);
+ err = allocatePortBuffers(1, &outputBuffers);
EXPECT_SUCCESS(err, "allocatePortBuffers(output)");
- err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+ err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
EXPECT(err == OK
&& msg.type == omx_message::EVENT
&& msg.u.event_data.event == OMX_EventCmdComplete
@@ -309,10 +359,10 @@
"after all input and output buffers were allocated.");
// Initiate transition Idle->Executing
- err = mOMX->sendCommand(node, OMX_CommandStateSet, OMX_StateExecuting);
+ err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateExecuting);
EXPECT_SUCCESS(err, "sendCommand(go-to-Executing)");
- err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+ err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
EXPECT(err == OK
&& msg.type == omx_message::EVENT
&& msg.u.event_data.event == OMX_EventCmdComplete
@@ -322,17 +372,17 @@
"executing state.");
for (size_t i = 0; i < outputBuffers.size(); ++i) {
- err = mOMX->fillBuffer(node, outputBuffers[i].mID);
+ err = mOMXNode->fillBuffer(outputBuffers[i].mID, OMXBuffer::sPreset);
EXPECT_SUCCESS(err, "fillBuffer");
outputBuffers.editItemAt(i).mFlags |= kBufferBusy;
}
- err = mOMX->sendCommand(node, OMX_CommandFlush, 1);
+ err = mOMXNode->sendCommand(OMX_CommandFlush, 1);
EXPECT_SUCCESS(err, "sendCommand(flush-output-port)");
err = dequeueMessageForNodeIgnoringBuffers(
- node, &inputBuffers, &outputBuffers, &msg, DEFAULT_TIMEOUT);
+ &inputBuffers, &outputBuffers, &msg, DEFAULT_TIMEOUT);
EXPECT(err == OK
&& msg.type == omx_message::EVENT
&& msg.u.event_data.event == OMX_EventCmdComplete
@@ -347,18 +397,18 @@
}
for (size_t i = 0; i < outputBuffers.size(); ++i) {
- err = mOMX->fillBuffer(node, outputBuffers[i].mID);
+ err = mOMXNode->fillBuffer(outputBuffers[i].mID, OMXBuffer::sPreset);
EXPECT_SUCCESS(err, "fillBuffer");
outputBuffers.editItemAt(i).mFlags |= kBufferBusy;
}
// Initiate transition Executing->Idle
- err = mOMX->sendCommand(node, OMX_CommandStateSet, OMX_StateIdle);
+ err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateIdle);
EXPECT_SUCCESS(err, "sendCommand(go-to-Idle)");
err = dequeueMessageForNodeIgnoringBuffers(
- node, &inputBuffers, &outputBuffers, &msg, DEFAULT_TIMEOUT);
+ &inputBuffers, &outputBuffers, &msg, DEFAULT_TIMEOUT);
EXPECT(err == OK
&& msg.type == omx_message::EVENT
&& msg.u.event_data.event == OMX_EventCmdComplete
@@ -382,28 +432,28 @@
}
// Initiate transition Idle->Loaded
- err = mOMX->sendCommand(node, OMX_CommandStateSet, OMX_StateLoaded);
+ err = mOMXNode->sendCommand(OMX_CommandStateSet, OMX_StateLoaded);
EXPECT_SUCCESS(err, "sendCommand(go-to-Loaded)");
// Make sure node doesn't just transition to loaded before we are done
// freeing all input and output buffers.
- err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+ err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
CHECK_EQ(err, (status_t)TIMED_OUT);
for (size_t i = 0; i < inputBuffers.size(); ++i) {
- err = mOMX->freeBuffer(node, 0, inputBuffers[i].mID);
+ err = mOMXNode->freeBuffer(0, inputBuffers[i].mID);
EXPECT_SUCCESS(err, "freeBuffer");
}
- err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+ err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
CHECK_EQ(err, (status_t)TIMED_OUT);
for (size_t i = 0; i < outputBuffers.size(); ++i) {
- err = mOMX->freeBuffer(node, 1, outputBuffers[i].mID);
+ err = mOMXNode->freeBuffer(1, outputBuffers[i].mID);
EXPECT_SUCCESS(err, "freeBuffer");
}
- err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
+ err = dequeueMessageForNode(&msg, DEFAULT_TIMEOUT);
EXPECT(err == OK
&& msg.type == omx_message::EVENT
&& msg.u.event_data.event == OMX_EventCmdComplete
@@ -412,12 +462,12 @@
"Component did not properly transition to from idle to "
"loaded state after freeing all input and output buffers.");
- err = mOMX->freeNode(node);
+ err = mOMXNode->freeNode();
EXPECT_SUCCESS(err, "freeNode");
reaper.disarm();
- node = 0;
+ mOMXNode = NULL;
return OK;
}
@@ -784,7 +834,6 @@
using namespace android;
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
const char *me = argv[0];
diff --git a/media/libstagefright/omx/tests/OMXHarness.h b/media/libstagefright/omx/tests/OMXHarness.h
index 1ebf3aa..4fc0f79 100644
--- a/media/libstagefright/omx/tests/OMXHarness.h
+++ b/media/libstagefright/omx/tests/OMXHarness.h
@@ -23,19 +23,25 @@
#include <utils/Vector.h>
#include <utils/threads.h>
+#include <binder/MemoryDealer.h>
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
#include <OMX_Component.h>
namespace android {
class MemoryDealer;
-struct Harness : public BnOMXObserver {
+struct Harness : public RefBase {
+ typedef hidl::memory::V1_0::IMemory TMemory;
+ typedef hardware::hidl_memory hidl_memory;
enum BufferFlags {
kBufferBusy = 1
};
struct Buffer {
IOMX::buffer_id mID;
sp<IMemory> mMemory;
+ hidl_memory mHidlMemory;
uint32_t mFlags;
};
@@ -43,25 +49,20 @@
status_t initCheck() const;
- status_t dequeueMessageForNode(
- IOMX::node_id node, omx_message *msg, int64_t timeoutUs = -1);
+ status_t dequeueMessageForNode(omx_message *msg, int64_t timeoutUs = -1);
status_t dequeueMessageForNodeIgnoringBuffers(
- IOMX::node_id node,
Vector<Buffer> *inputBuffers,
Vector<Buffer> *outputBuffers,
omx_message *msg, int64_t timeoutUs = -1);
status_t getPortDefinition(
- IOMX::node_id node, OMX_U32 portIndex,
- OMX_PARAM_PORTDEFINITIONTYPE *def);
+ OMX_U32 portIndex, OMX_PARAM_PORTDEFINITIONTYPE *def);
status_t allocatePortBuffers(
- const sp<MemoryDealer> &dealer,
- IOMX::node_id node, OMX_U32 portIndex,
- Vector<Buffer> *buffers);
+ OMX_U32 portIndex, Vector<Buffer> *buffers);
- status_t setRole(IOMX::node_id node, const char *role);
+ status_t setRole(const char *role);
status_t testStateTransitions(
const char *componentName, const char *componentRole);
@@ -74,20 +75,27 @@
status_t testAll();
- virtual void onMessages(const std::list<omx_message> &messages);
-
protected:
virtual ~Harness();
private:
+ typedef hidl::allocator::V1_0::IAllocator IAllocator;
+
friend struct NodeReaper;
+ struct CodecObserver;
Mutex mLock;
status_t mInitCheck;
sp<IOMX> mOMX;
+ sp<IOMXNode> mOMXNode;
List<omx_message> mMessageQueue;
Condition mMessageAddedCondition;
+ int32_t mLastMsgGeneration;
+ int32_t mCurGeneration;
+ bool mUseTreble;
+ sp<MemoryDealer> mDealer;
+ sp<IAllocator> mAllocator;
status_t initOMX();
@@ -96,6 +104,8 @@
Vector<Buffer> *inputBuffers,
Vector<Buffer> *outputBuffers);
+ void handleMessages(int32_t gen, const std::list<omx_message> &messages);
+
Harness(const Harness &);
Harness &operator=(const Harness &);
};
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index cfafaa7..8ba9e02 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -418,7 +418,7 @@
if (sessionDesc->getDurationUs(&durationUs)) {
mFormat->setInt64(kKeyDuration, durationUs);
} else {
- mFormat->setInt64(kKeyDuration, 60 * 60 * 1000000ll);
+ mFormat->setInt64(kKeyDuration, -1ll);
}
mInitCheck = OK;
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index 576a0a4..4827cd2 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -43,7 +43,10 @@
const sp<AMessage> ¬ify)
: mID(id),
mHighestSeqNumber(0),
+ mPrevExpected(0),
+ mBaseSeqNumber(0),
mNumBuffersReceived(0),
+ mPrevNumBuffersReceived(0),
mLastNTPTime(0),
mLastNTPTimeUpdateUs(0),
mIssueFIRRequests(false),
@@ -107,6 +110,7 @@
if (mNumBuffersReceived++ == 0) {
mHighestSeqNumber = seqNum;
+ mBaseSeqNumber = seqNum;
mQueue.push_back(buffer);
return true;
}
@@ -226,6 +230,22 @@
return;
}
+ uint8_t fraction = 0;
+
+ // According to appendix A.3 in RFC 3550
+ uint32_t expected = mHighestSeqNumber - mBaseSeqNumber + 1;
+ int64_t intervalExpected = expected - mPrevExpected;
+ int64_t intervalReceived = mNumBuffersReceived - mPrevNumBuffersReceived;
+ int64_t intervalPacketLost = intervalExpected - intervalReceived;
+
+ if (intervalExpected > 0 && intervalPacketLost > 0) {
+ fraction = (intervalPacketLost << 8) / intervalExpected;
+ }
+
+ mPrevExpected = expected;
+ mPrevNumBuffersReceived = mNumBuffersReceived;
+ int32_t cumulativePacketLost = (int32_t)expected - mNumBuffersReceived;
+
uint8_t *data = buffer->data() + buffer->size();
data[0] = 0x80 | 1;
@@ -242,11 +262,11 @@
data[10] = (mID >> 8) & 0xff;
data[11] = mID & 0xff;
- data[12] = 0x00; // fraction lost
+ data[12] = fraction; // fraction lost
- data[13] = 0x00; // cumulative lost
- data[14] = 0x00;
- data[15] = 0x00;
+ data[13] = cumulativePacketLost >> 16; // cumulative lost
+ data[14] = (cumulativePacketLost >> 8) & 0xff;
+ data[15] = cumulativePacketLost & 0xff;
data[16] = mHighestSeqNumber >> 24;
data[17] = (mHighestSeqNumber >> 16) & 0xff;
diff --git a/media/libstagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/ARTPSource.h
index b70f94e..f44e83f 100644
--- a/media/libstagefright/rtsp/ARTPSource.h
+++ b/media/libstagefright/rtsp/ARTPSource.h
@@ -49,7 +49,10 @@
private:
uint32_t mID;
uint32_t mHighestSeqNumber;
+ uint32_t mPrevExpected;
+ uint32_t mBaseSeqNumber;
int32_t mNumBuffersReceived;
+ int32_t mPrevNumBuffersReceived;
List<sp<ABuffer> > mQueue;
sp<ARTPAssembler> mAssembler;
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 8b0331a..325084c 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -17,7 +17,6 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ASessionDescription"
#include <utils/Log.h>
-#include <cutils/log.h>
#include "ASessionDescription.h"
@@ -212,7 +211,7 @@
*PT = x;
- char key[32];
+ char key[20];
snprintf(key, sizeof(key), "a=rtpmap:%lu", x);
if (findAttribute(index, key, desc)) {
snprintf(key, sizeof(key), "a=fmtp:%lu", x);
@@ -231,11 +230,8 @@
*width = 0;
*height = 0;
- char key[33];
+ char key[20];
snprintf(key, sizeof(key), "a=framesize:%lu", PT);
- if (PT > 9999999) {
- android_errorWriteLog(0x534e4554, "25747670");
- }
AString value;
if (!findAttribute(index, key, &value)) {
return false;
diff --git a/media/libstagefright/rtsp/Android.bp b/media/libstagefright/rtsp/Android.bp
index b3b8334..debd07e 100644
--- a/media/libstagefright/rtsp/Android.bp
+++ b/media/libstagefright/rtsp/Android.bp
@@ -45,6 +45,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
@@ -84,5 +88,9 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 76e2e6e..5505aa4 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -25,6 +25,7 @@
#endif
#include <utils/Log.h>
+#include <cutils/properties.h> // for property_get
#include "APacketSource.h"
#include "ARTPConnection.h"
@@ -807,11 +808,7 @@
result = UNKNOWN_ERROR;
} else {
parsePlayResponse(response);
-
- sp<AMessage> timeout = new AMessage('tiou', this);
- mCheckTimeoutGeneration++;
- timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
- timeout->post(kStartupTimeoutUs);
+ postTimeout();
}
}
@@ -1153,10 +1150,7 @@
// Post new timeout in order to make sure to use
// fake timestamps if no new Sender Reports arrive
- sp<AMessage> timeout = new AMessage('tiou', this);
- mCheckTimeoutGeneration++;
- timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
- timeout->post(kStartupTimeoutUs);
+ postTimeout();
}
}
@@ -1248,10 +1242,7 @@
// Start new timeoutgeneration to avoid getting timeout
// before PLAY response arrive
- sp<AMessage> timeout = new AMessage('tiou', this);
- mCheckTimeoutGeneration++;
- timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
- timeout->post(kStartupTimeoutUs);
+ postTimeout();
int64_t timeUs;
CHECK(msg->findInt64("time", &timeUs));
@@ -1305,10 +1296,7 @@
// Post new timeout in order to make sure to use
// fake timestamps if no new Sender Reports arrive
- sp<AMessage> timeout = new AMessage('tiou', this);
- mCheckTimeoutGeneration++;
- timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
- timeout->post(kStartupTimeoutUs);
+ postTimeout();
ssize_t i = response->mHeaders.indexOfKey("rtp-info");
CHECK_GE(i, 0);
@@ -1964,6 +1952,16 @@
msg->post();
}
+ void postTimeout() {
+ sp<AMessage> timeout = new AMessage('tiou', this);
+ mCheckTimeoutGeneration++;
+ timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
+
+ int64_t startupTimeoutUs;
+ startupTimeoutUs = property_get_int64("media.rtsp.timeout-us", kStartupTimeoutUs);
+ timeout->post(startupTimeoutUs);
+ }
+
DISALLOW_EVIL_CONSTRUCTORS(MyHandler);
};
diff --git a/media/libstagefright/rtsp/rtp_test.cpp b/media/libstagefright/rtsp/rtp_test.cpp
index 24f529b..e612a8d 100644
--- a/media/libstagefright/rtsp/rtp_test.cpp
+++ b/media/libstagefright/rtsp/rtp_test.cpp
@@ -37,8 +37,6 @@
int main(int argc, char **argv) {
android::ProcessState::self()->startThreadPool();
- DataSource::RegisterDefaultSniffers();
-
const char *rtpFilename = NULL;
const char *rtcpFilename = NULL;
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index f3362bb..35119c2 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -40,33 +40,6 @@
}
cc_test {
- name: "Utils_test",
-
- srcs: ["Utils_test.cpp"],
-
- shared_libs: [
- "libcutils",
- "liblog",
- "libmedia",
- "libstagefright",
- "libstagefright_foundation",
- "libstagefright_omx",
- ],
-
- include_dirs: [
- "frameworks/av/include",
- "frameworks/av/media/libstagefright",
- "frameworks/av/media/libstagefright/include",
- "frameworks/native/include/media/openmax",
- ],
-
- cflags: [
- "-Werror",
- "-Wall",
- ],
-}
-
-cc_test {
name: "MediaCodecListOverrides_test",
srcs: ["MediaCodecListOverrides_test.cpp"],
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index d419133..7c464ff 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -466,7 +466,7 @@
mr->setVideoSource(videoSource);
mr->setOutputFormat(outputFormat);
mr->setVideoEncoder(videoEncoder);
- mr->setOutputFile(fd, 0, 0);
+ mr->setOutputFile(fd);
mr->setVideoSize(width, height);
mr->setVideoFrameRate(fps);
mr->prepare();
@@ -510,7 +510,7 @@
// Fill the buffer with the a checkerboard pattern
uint8_t* img = NULL;
- sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+ sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
SurfaceMediaSourceTest::fillYV12Buffer(img, width, height, buf->getStride());
buf->unlock();
@@ -527,7 +527,7 @@
ASSERT_TRUE(anb != NULL);
// We do not fill the buffer in. Just queue it back.
- sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+ sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer(),
-1));
}
diff --git a/media/libstagefright/tests/Utils_test.cpp b/media/libstagefright/tests/Utils_test.cpp
deleted file mode 100644
index d736501..0000000
--- a/media/libstagefright/tests/Utils_test.cpp
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Utils_test"
-
-#include <gtest/gtest.h>
-#include <utils/String8.h>
-#include <utils/Errors.h>
-#include <fcntl.h>
-#include <unistd.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AStringUtils.h>
-#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/Utils.h>
-
-namespace android {
-
-class UtilsTest : public ::testing::Test {
-};
-
-TEST_F(UtilsTest, TestStringUtils) {
- ASSERT_EQ(AStringUtils::Compare("Audio", "AudioExt", 5, false), 0);
- ASSERT_EQ(AStringUtils::Compare("Audio", "audiOExt", 5, true), 0);
- ASSERT_NE(AStringUtils::Compare("Audio", "audioExt", 5, false), 0);
- ASSERT_NE(AStringUtils::Compare("Audio", "AudiOExt", 5, false), 0);
-
- ASSERT_LT(AStringUtils::Compare("Audio", "AudioExt", 7, false), 0);
- ASSERT_LT(AStringUtils::Compare("Audio", "audiOExt", 7, true), 0);
-
- ASSERT_GT(AStringUtils::Compare("AudioExt", "Audio", 7, false), 0);
- ASSERT_GT(AStringUtils::Compare("audiOext", "Audio", 7, true), 0);
-
- ASSERT_LT(AStringUtils::Compare("Audio", "Video", 5, false), 0);
- ASSERT_LT(AStringUtils::Compare("Audio1", "Audio2", 6, false), 0);
- ASSERT_LT(AStringUtils::Compare("audio", "VIDEO", 5, true), 0);
- ASSERT_LT(AStringUtils::Compare("audio1", "AUDIO2", 6, true), 0);
-
- ASSERT_GT(AStringUtils::Compare("Video", "Audio", 5, false), 0);
- ASSERT_GT(AStringUtils::Compare("Audio2", "Audio1", 6, false), 0);
- ASSERT_GT(AStringUtils::Compare("VIDEO", "audio", 5, true), 0);
- ASSERT_GT(AStringUtils::Compare("AUDIO2", "audio1", 6, true), 0);
-
- ASSERT_TRUE(AStringUtils::MatchesGlob("AudioA", 5, "AudioB", 5, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 6, "AudioA", 5, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 5, "AudioA", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 5, "audiOB", 5, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("AudioA", 5, "audiOB", 5, true));
- ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 6, "AudioA", 5, true));
- ASSERT_FALSE(AStringUtils::MatchesGlob("AudioA", 5, "AudioA", 6, true));
-
- ASSERT_TRUE(AStringUtils::MatchesGlob("*1", 1, "String8", 6, true));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*1", 1, "String8", 6, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*1", 1, "String8", 0, true));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*1", 1, "String8", 0, false));
-
- ASSERT_TRUE(AStringUtils::MatchesGlob("*ring1", 5, "String8", 6, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*ring2", 5, "STRING8", 6, true));
- ASSERT_FALSE(AStringUtils::MatchesGlob("*ring4", 5, "StRing8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("*ring5", 5, "StrinG8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("*ring8", 5, "String8", 7, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("*ring8", 5, "String8", 7, true));
-
- ASSERT_TRUE(AStringUtils::MatchesGlob("Str*1", 4, "String8", 6, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("Str*2", 4, "STRING8", 6, true));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*3", 4, "string8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*4", 4, "StRing8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*5", 4, "AString8", 7, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*6", 4, "AString8", 7, true));
-
- ASSERT_TRUE(AStringUtils::MatchesGlob("Str*ng1", 6, "String8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng2", 6, "string8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng3", 6, "StRing8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng4", 6, "StriNg8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng5", 6, "StrinG8", 6, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("Str*ng6", 6, "STRING8", 6, true));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng8", 6, "AString8", 7, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng1", 6, "String16", 7, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("Str*ing9", 7, "String8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ringA", 8, "String8", 6, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng8", 6, "AString8", 7, true));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ng1", 6, "String16", 7, true));
- ASSERT_TRUE(AStringUtils::MatchesGlob("Str*ing9", 7, "STRING8", 6, true));
- ASSERT_FALSE(AStringUtils::MatchesGlob("Str*ringA", 8, "String8", 6, true));
-
- ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str1", 8, "bestrestroom", 9, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str1", 8, "bestrestrestroom", 13, false));
- ASSERT_FALSE(AStringUtils::MatchesGlob("*str*stro", 8, "bestrestrestroom", 14, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str*1", 9, "bestrestrestroom", 14, false));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str1", 8, "beSTReSTRoom", 9, true));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str1", 8, "beSTRestreSTRoom", 13, true));
- ASSERT_FALSE(AStringUtils::MatchesGlob("*str*stro", 8, "bestreSTReSTRoom", 14, true));
- ASSERT_TRUE(AStringUtils::MatchesGlob("*str*str*1", 9, "bestreSTReSTRoom", 14, true));
-}
-
-TEST_F(UtilsTest, TestDebug) {
-#define LVL(x) (ADebug::Level)(x)
- ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "", LVL(5)), LVL(5));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", " \t \n ", LVL(2)), LVL(2));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3", LVL(5)), LVL(3));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3:*deo", LVL(5)), LVL(3));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString(
- "video", "\t\n 3 \t\n:\t\n video \t\n", LVL(5)), LVL(3));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3:*deo,2:vid*", LVL(5)), LVL(2));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString(
- "avideo", "\t\n 3 \t\n:\t\n avideo \t\n,\t\n 2 \t\n:\t\n video \t\n", LVL(5)), LVL(3));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString(
- "audio.omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(2));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString(
- "video.omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(3));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(3));
- ASSERT_EQ(ADebug::GetLevelFromSettingsString("omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(4));
-#undef LVL
-}
-
-TEST_F(UtilsTest, TestFourCC) {
- ASSERT_EQ(FOURCC('s', 't', 'm' , 'u'), 'stmu');
-}
-
-TEST_F(UtilsTest, TestMathTemplates) {
- ASSERT_EQ(divRound(-10, -4), 3);
- ASSERT_EQ(divRound(-11, -4), 3);
- ASSERT_EQ(divRound(-12, -4), 3);
- ASSERT_EQ(divRound(-13, -4), 3);
- ASSERT_EQ(divRound(-14, -4), 4);
-
- ASSERT_EQ(divRound(10, -4), -3);
- ASSERT_EQ(divRound(11, -4), -3);
- ASSERT_EQ(divRound(12, -4), -3);
- ASSERT_EQ(divRound(13, -4), -3);
- ASSERT_EQ(divRound(14, -4), -4);
-
- ASSERT_EQ(divRound(-10, 4), -3);
- ASSERT_EQ(divRound(-11, 4), -3);
- ASSERT_EQ(divRound(-12, 4), -3);
- ASSERT_EQ(divRound(-13, 4), -3);
- ASSERT_EQ(divRound(-14, 4), -4);
-
- ASSERT_EQ(divRound(10, 4), 3);
- ASSERT_EQ(divRound(11, 4), 3);
- ASSERT_EQ(divRound(12, 4), 3);
- ASSERT_EQ(divRound(13, 4), 3);
- ASSERT_EQ(divRound(14, 4), 4);
-
- ASSERT_EQ(divUp(-11, -4), 3);
- ASSERT_EQ(divUp(-12, -4), 3);
- ASSERT_EQ(divUp(-13, -4), 4);
-
- ASSERT_EQ(divUp(11, -4), -2);
- ASSERT_EQ(divUp(12, -4), -3);
- ASSERT_EQ(divUp(13, -4), -3);
-
- ASSERT_EQ(divUp(-11, 4), -2);
- ASSERT_EQ(divUp(-12, 4), -3);
- ASSERT_EQ(divUp(-13, 4), -3);
-
- ASSERT_EQ(divUp(11, 4), 3);
- ASSERT_EQ(divUp(12, 4), 3);
- ASSERT_EQ(divUp(13, 4), 4);
-
- ASSERT_EQ(align(11, 4), 12);
- ASSERT_EQ(align(12, 4), 12);
- ASSERT_EQ(align(13, 4), 16);
- ASSERT_EQ(align(11, 8), 16);
- ASSERT_EQ(align(11, 2), 12);
- ASSERT_EQ(align(11, 1), 11);
-
- ASSERT_EQ(abs(5L), 5L);
- ASSERT_EQ(abs(-25), 25);
-
- ASSERT_EQ(min(5.6f, 6.0f), 5.6f);
- ASSERT_EQ(min(6.0f, 5.6f), 5.6f);
- ASSERT_EQ(min(-4.3, 8.6), -4.3);
- ASSERT_EQ(min(8.6, -4.3), -4.3);
-
- ASSERT_EQ(max(5.6f, 6.0f), 6.0f);
- ASSERT_EQ(max(6.0f, 5.6f), 6.0f);
- ASSERT_EQ(max(-4.3, 8.6), 8.6);
- ASSERT_EQ(max(8.6, -4.3), 8.6);
-
- ASSERT_FALSE(isInRange(-43, 86u, -44));
- ASSERT_TRUE(isInRange(-43, 87u, -43));
- ASSERT_TRUE(isInRange(-43, 88u, -1));
- ASSERT_TRUE(isInRange(-43, 89u, 0));
- ASSERT_TRUE(isInRange(-43, 90u, 46));
- ASSERT_FALSE(isInRange(-43, 91u, 48));
- ASSERT_FALSE(isInRange(-43, 92u, 50));
-
- ASSERT_FALSE(isInRange(43, 86u, 42));
- ASSERT_TRUE(isInRange(43, 87u, 43));
- ASSERT_TRUE(isInRange(43, 88u, 44));
- ASSERT_TRUE(isInRange(43, 89u, 131));
- ASSERT_FALSE(isInRange(43, 90u, 133));
- ASSERT_FALSE(isInRange(43, 91u, 135));
-
- ASSERT_FALSE(isInRange(43u, 86u, 42u));
- ASSERT_TRUE(isInRange(43u, 85u, 43u));
- ASSERT_TRUE(isInRange(43u, 84u, 44u));
- ASSERT_TRUE(isInRange(43u, 83u, 125u));
- ASSERT_FALSE(isInRange(43u, 82u, 125u));
- ASSERT_FALSE(isInRange(43u, 81u, 125u));
-
- ASSERT_FALSE(isInRange(-43, ~0u, 43));
- ASSERT_FALSE(isInRange(-43, ~0u, 44));
- ASSERT_FALSE(isInRange(-43, ~0u, ~0));
- ASSERT_FALSE(isInRange(-43, ~0u, 41));
- ASSERT_FALSE(isInRange(-43, ~0u, 40));
-
- ASSERT_FALSE(isInRange(43u, ~0u, 43u));
- ASSERT_FALSE(isInRange(43u, ~0u, 41u));
- ASSERT_FALSE(isInRange(43u, ~0u, 40u));
- ASSERT_FALSE(isInRange(43u, ~0u, ~0u));
-
- ASSERT_FALSE(isInRange(-43, 86u, -44, 0u));
- ASSERT_FALSE(isInRange(-43, 86u, -44, 1u));
- ASSERT_FALSE(isInRange(-43, 86u, -44, 2u));
- ASSERT_FALSE(isInRange(-43, 86u, -44, ~0u));
- ASSERT_TRUE(isInRange(-43, 87u, -43, 0u));
- ASSERT_TRUE(isInRange(-43, 87u, -43, 1u));
- ASSERT_TRUE(isInRange(-43, 87u, -43, 86u));
- ASSERT_TRUE(isInRange(-43, 87u, -43, 87u));
- ASSERT_FALSE(isInRange(-43, 87u, -43, 88u));
- ASSERT_FALSE(isInRange(-43, 87u, -43, ~0u));
- ASSERT_TRUE(isInRange(-43, 88u, -1, 0u));
- ASSERT_TRUE(isInRange(-43, 88u, -1, 45u));
- ASSERT_TRUE(isInRange(-43, 88u, -1, 46u));
- ASSERT_FALSE(isInRange(-43, 88u, -1, 47u));
- ASSERT_FALSE(isInRange(-43, 88u, -1, ~3u));
- ASSERT_TRUE(isInRange(-43, 90u, 46, 0u));
- ASSERT_TRUE(isInRange(-43, 90u, 46, 1u));
- ASSERT_FALSE(isInRange(-43, 90u, 46, 2u));
- ASSERT_FALSE(isInRange(-43, 91u, 48, 0u));
- ASSERT_FALSE(isInRange(-43, 91u, 48, 2u));
- ASSERT_FALSE(isInRange(-43, 91u, 48, ~6u));
- ASSERT_FALSE(isInRange(-43, 92u, 50, 0u));
- ASSERT_FALSE(isInRange(-43, 92u, 50, 1u));
-
- ASSERT_FALSE(isInRange(43u, 86u, 42u, 0u));
- ASSERT_FALSE(isInRange(43u, 86u, 42u, 1u));
- ASSERT_FALSE(isInRange(43u, 86u, 42u, 2u));
- ASSERT_FALSE(isInRange(43u, 86u, 42u, ~0u));
- ASSERT_TRUE(isInRange(43u, 87u, 43u, 0u));
- ASSERT_TRUE(isInRange(43u, 87u, 43u, 1u));
- ASSERT_TRUE(isInRange(43u, 87u, 43u, 86u));
- ASSERT_TRUE(isInRange(43u, 87u, 43u, 87u));
- ASSERT_FALSE(isInRange(43u, 87u, 43u, 88u));
- ASSERT_FALSE(isInRange(43u, 87u, 43u, ~0u));
- ASSERT_TRUE(isInRange(43u, 88u, 60u, 0u));
- ASSERT_TRUE(isInRange(43u, 88u, 60u, 70u));
- ASSERT_TRUE(isInRange(43u, 88u, 60u, 71u));
- ASSERT_FALSE(isInRange(43u, 88u, 60u, 72u));
- ASSERT_FALSE(isInRange(43u, 88u, 60u, ~3u));
- ASSERT_TRUE(isInRange(43u, 90u, 132u, 0u));
- ASSERT_TRUE(isInRange(43u, 90u, 132u, 1u));
- ASSERT_FALSE(isInRange(43u, 90u, 132u, 2u));
- ASSERT_FALSE(isInRange(43u, 91u, 134u, 0u));
- ASSERT_FALSE(isInRange(43u, 91u, 134u, 2u));
- ASSERT_FALSE(isInRange(43u, 91u, 134u, ~6u));
- ASSERT_FALSE(isInRange(43u, 92u, 136u, 0u));
- ASSERT_FALSE(isInRange(43u, 92u, 136u, 1u));
-
- ASSERT_EQ(periodicError(124, 100), 24);
- ASSERT_EQ(periodicError(288, 100), 12);
- ASSERT_EQ(periodicError(-345, 100), 45);
- ASSERT_EQ(periodicError(-493, 100), 7);
- ASSERT_EQ(periodicError(-550, 100), 50);
- ASSERT_EQ(periodicError(-600, 100), 0);
-}
-
-} // namespace android
diff --git a/media/libstagefright/timedtext/Android.bp b/media/libstagefright/timedtext/Android.bp
index e19ca96..a5ad6c6 100644
--- a/media/libstagefright/timedtext/Android.bp
+++ b/media/libstagefright/timedtext/Android.bp
@@ -13,6 +13,10 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
include_dirs: [
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 3972878..f968788 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -13,6 +13,10 @@
"signed-integer-overflow",
"unsigned-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
srcs: [
diff --git a/media/libstagefright/webm/WebmFrameThread.cpp b/media/libstagefright/webm/WebmFrameThread.cpp
index c33e0c7..71bfbc9 100644
--- a/media/libstagefright/webm/WebmFrameThread.cpp
+++ b/media/libstagefright/webm/WebmFrameThread.cpp
@@ -37,17 +37,23 @@
}
status_t WebmFrameThread::start() {
+ status_t err = OK;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
- pthread_create(&mThread, &attr, WebmFrameThread::wrap, this);
+ if ((err = pthread_create(&mThread, &attr, WebmFrameThread::wrap, this))) {
+ mThread = 0;
+ }
pthread_attr_destroy(&attr);
- return OK;
+ return err;
}
status_t WebmFrameThread::stop() {
- void *status;
- pthread_join(mThread, &status);
+ void *status = nullptr;
+ if (mThread) {
+ pthread_join(mThread, &status);
+ mThread = 0;
+ }
return (status_t)(intptr_t)status;
}
diff --git a/media/libstagefright/wifi-display/Android.bp b/media/libstagefright/wifi-display/Android.bp
index 402c6c1..be23359 100644
--- a/media/libstagefright/wifi-display/Android.bp
+++ b/media/libstagefright/wifi-display/Android.bp
@@ -42,5 +42,9 @@
misc_undefined: [
"signed-integer-overflow",
],
+ cfi: true,
+ diag: {
+ cfi: true,
+ },
},
}
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
index 9b0af33..cc412f5 100644
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ b/media/libstagefright/wifi-display/MediaSender.cpp
@@ -423,9 +423,11 @@
CHECK_GE((int32_t)accessUnit->size(), rangeLength);
sp<GraphicBuffer> grbuf(new GraphicBuffer(
- rangeOffset + rangeLength, 1, HAL_PIXEL_FORMAT_Y8,
- GRALLOC_USAGE_HW_VIDEO_ENCODER, rangeOffset + rangeLength,
- handle, false));
+ rangeOffset + rangeLength /* width */, 1 /* height */,
+ HAL_PIXEL_FORMAT_Y8, 1 /* layerCount */,
+ GRALLOC_USAGE_HW_VIDEO_ENCODER,
+ rangeOffset + rangeLength /* stride */, handle,
+ false /* keepOwnership */));
err = mHDCP->encryptNative(
grbuf, rangeOffset, rangeLength,
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 471152e..273af18 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -26,6 +26,7 @@
#include <cutils/properties.h>
#include <gui/Surface.h>
#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -720,7 +721,7 @@
#endif
sp<ABuffer> buffer;
- sp<ABuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
+ sp<MediaCodecBuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
if (outbuf->meta()->findPointer("handle", (void**)&handle) &&
handle != NULL) {
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index b182990..ad95ab5 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -25,6 +25,7 @@
struct ABuffer;
class IGraphicBufferProducer;
struct MediaCodec;
+class MediaCodecBuffer;
#define ENABLE_SILENCE_DETECTION 0
@@ -106,8 +107,8 @@
sp<IGraphicBufferProducer> mGraphicBufferProducer;
- Vector<sp<ABuffer> > mEncoderInputBuffers;
- Vector<sp<ABuffer> > mEncoderOutputBuffers;
+ Vector<sp<MediaCodecBuffer> > mEncoderInputBuffers;
+ Vector<sp<MediaCodecBuffer> > mEncoderOutputBuffers;
List<size_t> mAvailEncoderInputIndices;
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 3587cb9..f1ecca0 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -36,7 +36,6 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/AudioSource.h>
-#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaSource.h>
@@ -748,8 +747,6 @@
status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer(
bool enableAudio, bool enableVideo) {
- DataSource::RegisterDefaultSniffers();
-
mExtractor = new NuMediaExtractor;
status_t err = mExtractor->setDataSource(
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 191db93..4695e5d 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -911,10 +911,8 @@
bool supportsPCM = (modes & 2) != 0; // LPCM 2ch 48kHz
- char val[PROPERTY_VALUE_MAX];
if (supportsPCM
- && property_get("media.wfd.use-pcm-audio", val, NULL)
- && (!strcasecmp("true", val) || !strcmp("1", val))) {
+ && property_get_bool("media.wfd.use-pcm-audio", false)) {
ALOGI("Using PCM audio.");
mUsingPCMAudio = true;
} else if (supportsAAC) {
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index ffbfcbb..f7597db 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -12,24 +12,24 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- main_mediaserver.cpp
+ main_mediaserver.cpp
LOCAL_SHARED_LIBRARIES := \
- libresourcemanagerservice \
- liblog \
- libcutils \
- libmediaplayerservice \
- libutils \
- libbinder \
- libicuuc \
+ libresourcemanagerservice \
+ liblog \
+ libmediaplayerservice \
+ libutils \
+ libbinder \
+ libicuuc \
+ android.hardware.media.omx@1.0 \
LOCAL_STATIC_LIBRARIES := \
libicuandroid_utils \
libregistermsext
LOCAL_C_INCLUDES := \
- frameworks/av/media/libmediaplayerservice \
- frameworks/av/services/mediaresourcemanager \
+ frameworks/av/media/libmediaplayerservice \
+ frameworks/av/services/mediaresourcemanager \
LOCAL_MODULE:= mediaserver
LOCAL_32_BIT_ONLY := true
diff --git a/media/mtp/Android.bp b/media/mtp/Android.bp
index 5d5ae49..543ad5c 100644
--- a/media/mtp/Android.bp
+++ b/media/mtp/Android.bp
@@ -17,13 +17,13 @@
cc_library_shared {
name: "libmtp",
srcs: [
- "AsyncIO.cpp",
"MtpDataPacket.cpp",
"MtpDebug.cpp",
"MtpDevHandle.cpp",
"MtpDevice.cpp",
"MtpDeviceInfo.cpp",
"MtpEventPacket.cpp",
+ "MtpFfsCompatHandle.cpp",
"MtpFfsHandle.cpp",
"MtpObjectInfo.cpp",
"MtpPacket.cpp",
@@ -35,6 +35,7 @@
"MtpStorageInfo.cpp",
"MtpStringBuffer.cpp",
"MtpUtils.cpp",
+ "PosixAsyncIO.cpp",
],
export_include_dirs: ["."],
cflags: [
@@ -45,6 +46,7 @@
"-Werror",
],
shared_libs: [
+ "libasyncio",
"libbase",
"libutils",
"liblog",
diff --git a/media/mtp/AsyncIO.cpp b/media/mtp/AsyncIO.cpp
deleted file mode 100644
index e77ad38..0000000
--- a/media/mtp/AsyncIO.cpp
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <android-base/logging.h>
-#include <condition_variable>
-#include <memory>
-#include <mutex>
-#include <queue>
-
-#include "AsyncIO.h"
-
-namespace {
-
-void read_func(struct aiocb *aiocbp) {
- aiocbp->ret = TEMP_FAILURE_RETRY(pread(aiocbp->aio_fildes,
- aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
- if (aiocbp->ret == -1) aiocbp->error = errno;
-}
-
-void write_func(struct aiocb *aiocbp) {
- aiocbp->ret = TEMP_FAILURE_RETRY(pwrite(aiocbp->aio_fildes,
- aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
- if (aiocbp->ret == -1) aiocbp->error = errno;
-}
-
-void splice_read_func(struct aiocb *aiocbp) {
- loff_t long_offset = aiocbp->aio_offset;
- aiocbp->ret = TEMP_FAILURE_RETRY(splice(aiocbp->aio_fildes,
- &long_offset, aiocbp->aio_sink,
- NULL, aiocbp->aio_nbytes, 0));
- if (aiocbp->ret == -1) aiocbp->error = errno;
-}
-
-void splice_write_func(struct aiocb *aiocbp) {
- loff_t long_offset = aiocbp->aio_offset;
- aiocbp->ret = TEMP_FAILURE_RETRY(splice(aiocbp->aio_fildes, NULL,
- aiocbp->aio_sink, &long_offset,
- aiocbp->aio_nbytes, 0));
- if (aiocbp->ret == -1) aiocbp->error = errno;
-}
-
-std::queue<std::unique_ptr<struct aiocb>> queue;
-std::mutex queue_lock;
-std::condition_variable queue_cond;
-std::condition_variable write_cond;
-int done = 1;
-void splice_write_pool_func(int) {
- while(1) {
- std::unique_lock<std::mutex> lk(queue_lock);
- queue_cond.wait(lk, []{return !queue.empty() || done;});
- if (queue.empty() && done) {
- return;
- }
- std::unique_ptr<struct aiocb> aiocbp = std::move(queue.front());
- queue.pop();
- lk.unlock();
- write_cond.notify_one();
- splice_write_func(aiocbp.get());
- close(aiocbp->aio_fildes);
- }
-}
-
-void write_pool_func(int) {
- while(1) {
- std::unique_lock<std::mutex> lk(queue_lock);
- queue_cond.wait(lk, []{return !queue.empty() || done;});
- if (queue.empty() && done) {
- return;
- }
- std::unique_ptr<struct aiocb> aiocbp = std::move(queue.front());
- queue.pop();
- lk.unlock();
- write_cond.notify_one();
- aiocbp->ret = TEMP_FAILURE_RETRY(pwrite(aiocbp->aio_fildes,
- aiocbp->aio_pool_buf.get(), aiocbp->aio_nbytes, aiocbp->aio_offset));
- if (aiocbp->ret == -1) aiocbp->error = errno;
- }
-}
-
-constexpr int NUM_THREADS = 1;
-constexpr int MAX_QUEUE_SIZE = 10;
-std::thread pool[NUM_THREADS];
-
-} // end anonymous namespace
-
-void aio_pool_init(void(f)(int)) {
- CHECK(done == 1);
- done = 0;
- for (int i = 0; i < NUM_THREADS; i++) {
- pool[i] = std::thread(f, i);
- }
-}
-
-void aio_pool_splice_init() {
- aio_pool_init(splice_write_pool_func);
-}
-
-void aio_pool_write_init() {
- aio_pool_init(write_pool_func);
-}
-
-void aio_pool_end() {
- done = 1;
- for (int i = 0; i < NUM_THREADS; i++) {
- std::unique_lock<std::mutex> lk(queue_lock);
- lk.unlock();
- queue_cond.notify_one();
- }
-
- for (int i = 0; i < NUM_THREADS; i++) {
- pool[i].join();
- }
-}
-
-// used for both writes and splices depending on which init was used before.
-int aio_pool_write(struct aiocb *aiocbp) {
- std::unique_lock<std::mutex> lk(queue_lock);
- write_cond.wait(lk, []{return queue.size() < MAX_QUEUE_SIZE;});
- queue.push(std::unique_ptr<struct aiocb>(aiocbp));
- lk.unlock();
- queue_cond.notify_one();
- return 0;
-}
-
-int aio_read(struct aiocb *aiocbp) {
- aiocbp->thread = std::thread(read_func, aiocbp);
- return 0;
-}
-
-int aio_write(struct aiocb *aiocbp) {
- aiocbp->thread = std::thread(write_func, aiocbp);
- return 0;
-}
-
-int aio_splice_read(struct aiocb *aiocbp) {
- aiocbp->thread = std::thread(splice_read_func, aiocbp);
- return 0;
-}
-
-int aio_splice_write(struct aiocb *aiocbp) {
- aiocbp->thread = std::thread(splice_write_func, aiocbp);
- return 0;
-}
-
-int aio_error(const struct aiocb *aiocbp) {
- return aiocbp->error;
-}
-
-ssize_t aio_return(struct aiocb *aiocbp) {
- return aiocbp->ret;
-}
-
-int aio_suspend(struct aiocb *aiocbp[], int n,
- const struct timespec *) {
- for (int i = 0; i < n; i++) {
- aiocbp[i]->thread.join();
- }
- return 0;
-}
-
-int aio_cancel(int, struct aiocb *) {
- // Not implemented
- return -1;
-}
-
diff --git a/media/mtp/AsyncIO.h b/media/mtp/AsyncIO.h
deleted file mode 100644
index f7515a2..0000000
--- a/media/mtp/AsyncIO.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _ASYNCIO_H
-#define _ASYNCIO_H
-
-#include <fcntl.h>
-#include <linux/aio_abi.h>
-#include <memory>
-#include <signal.h>
-#include <sys/cdefs.h>
-#include <sys/types.h>
-#include <time.h>
-#include <thread>
-#include <unistd.h>
-
-/**
- * Provides a subset of POSIX aio operations, as well
- * as similar operations with splice and threadpools.
- */
-
-struct aiocb {
- int aio_fildes; // Assumed to be the source for splices
- void *aio_buf; // Unused for splices
-
- // Used for threadpool operations only, freed automatically
- std::unique_ptr<char[]> aio_pool_buf;
-
- off_t aio_offset;
- size_t aio_nbytes;
-
- int aio_sink; // Unused for non splice r/w
-
- // Used internally
- std::thread thread;
- ssize_t ret;
- int error;
-};
-
-// Submit a request for IO to be completed
-int aio_read(struct aiocb *);
-int aio_write(struct aiocb *);
-int aio_splice_read(struct aiocb *);
-int aio_splice_write(struct aiocb *);
-
-// Suspend current thread until given IO is complete, at which point
-// its return value and any errors can be accessed
-int aio_suspend(struct aiocb *[], int, const struct timespec *);
-int aio_error(const struct aiocb *);
-ssize_t aio_return(struct aiocb *);
-int aio_cancel(int, struct aiocb *);
-
-// Initialize a threadpool to perform IO. Only one pool can be
-// running at a time.
-void aio_pool_write_init();
-void aio_pool_splice_init();
-// Suspend current thread until all queued work is complete, then ends the threadpool
-void aio_pool_end();
-// Submit IO work for the threadpool to complete. Memory associated with the work is
-// freed automatically when the work is complete.
-int aio_pool_write(struct aiocb *);
-
-#endif // ASYNCIO_H
-
diff --git a/media/mtp/IMtpHandle.h b/media/mtp/IMtpHandle.h
index 9185255..c65bdd0 100644
--- a/media/mtp/IMtpHandle.h
+++ b/media/mtp/IMtpHandle.h
@@ -18,16 +18,16 @@
#include <linux/usb/f_mtp.h>
-constexpr char FFS_MTP_EP0[] = "/dev/usb-ffs/mtp/ep0";
+namespace android {
class IMtpHandle {
public:
// Return number of bytes read/written, or -1 and errno is set
- virtual int read(void *data, int len) = 0;
- virtual int write(const void *data, int len) = 0;
+ virtual int read(void *data, size_t len) = 0;
+ virtual int write(const void *data, size_t len) = 0;
// Return 0 if send/receive is successful, or -1 and errno is set
- virtual int receiveFile(mtp_file_range mfr) = 0;
+ virtual int receiveFile(mtp_file_range mfr, bool zero_packet) = 0;
virtual int sendFile(mtp_file_range mfr) = 0;
virtual int sendEvent(mtp_event me) = 0;
@@ -40,8 +40,7 @@
virtual ~IMtpHandle() {}
};
-IMtpHandle *get_ffs_handle();
-IMtpHandle *get_mtp_handle();
+}
#endif // _IMTP_HANDLE_H
diff --git a/media/mtp/MtpDataPacket.cpp b/media/mtp/MtpDataPacket.cpp
index 40f4cea..d1c71d7 100644
--- a/media/mtp/MtpDataPacket.cpp
+++ b/media/mtp/MtpDataPacket.cpp
@@ -520,7 +520,7 @@
// Wait for result of readDataAsync
int MtpDataPacket::readDataWait(struct usb_device *device) {
- struct usb_request *req = usb_request_wait(device);
+ struct usb_request *req = usb_request_wait(device, -1);
return (req ? req->actual_length : -1);
}
diff --git a/media/mtp/MtpDataPacket.h b/media/mtp/MtpDataPacket.h
index a449d6f..1ddb821 100644
--- a/media/mtp/MtpDataPacket.h
+++ b/media/mtp/MtpDataPacket.h
@@ -20,12 +20,12 @@
#include "MtpPacket.h"
#include "mtp.h"
-class IMtpHandle;
struct usb_device;
struct usb_request;
namespace android {
+class IMtpHandle;
class MtpStringBuffer;
class MtpDataPacket : public MtpPacket {
diff --git a/media/mtp/MtpDevHandle.cpp b/media/mtp/MtpDevHandle.cpp
index afc0525..6aa57ac 100644
--- a/media/mtp/MtpDevHandle.cpp
+++ b/media/mtp/MtpDevHandle.cpp
@@ -14,61 +14,41 @@
* limitations under the License.
*/
-#include <utils/Log.h>
-#include <fcntl.h>
-#include <sys/stat.h>
+#include <android-base/logging.h>
#include <cutils/properties.h>
#include <dirent.h>
#include <errno.h>
+#include <fcntl.h>
#include <linux/usb/ch9.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/stat.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/endian.h>
#include <unistd.h>
-#include <android-base/logging.h>
-#include <android-base/unique_fd.h>
-#include "IMtpHandle.h"
+#include "MtpDevHandle.h"
+
+namespace android {
constexpr char mtp_dev_path[] = "/dev/mtp_usb";
-class MtpDevHandle : public IMtpHandle {
-private:
- android::base::unique_fd mFd;
-
-public:
- MtpDevHandle();
- ~MtpDevHandle();
- int read(void *data, int len);
- int write(const void *data, int len);
-
- int receiveFile(mtp_file_range mfr);
- int sendFile(mtp_file_range mfr);
- int sendEvent(mtp_event me);
-
- int start();
- void close();
-
- int configure(bool ptp);
-};
-
MtpDevHandle::MtpDevHandle()
: mFd(-1) {};
MtpDevHandle::~MtpDevHandle() {}
-int MtpDevHandle::read(void *data, int len) {
+int MtpDevHandle::read(void *data, size_t len) {
return ::read(mFd, data, len);
}
-int MtpDevHandle::write(const void *data, int len) {
+int MtpDevHandle::write(const void *data, size_t len) {
return ::write(mFd, data, len);
}
-int MtpDevHandle::receiveFile(mtp_file_range mfr) {
+int MtpDevHandle::receiveFile(mtp_file_range mfr, bool) {
return ioctl(mFd, MTP_RECEIVE_FILE, reinterpret_cast<unsigned long>(&mfr));
}
@@ -81,7 +61,7 @@
}
int MtpDevHandle::start() {
- mFd = android::base::unique_fd(TEMP_FAILURE_RETRY(open(mtp_dev_path, O_RDWR)));
+ mFd.reset(TEMP_FAILURE_RETRY(open(mtp_dev_path, O_RDWR)));
if (mFd == -1) return -1;
return 0;
}
@@ -95,6 +75,4 @@
return 0;
}
-IMtpHandle *get_mtp_handle() {
- return new MtpDevHandle();
-}
+} // namespace android
diff --git a/media/mtp/MtpDevHandle.h b/media/mtp/MtpDevHandle.h
new file mode 100644
index 0000000..b0480ed
--- /dev/null
+++ b/media/mtp/MtpDevHandle.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MTP_DEV_HANDLE_H
+#define _MTP_DEV_HANDLE_H
+
+#include <android-base/unique_fd.h>
+#include "IMtpHandle.h"
+
+namespace android {
+
+class MtpDevHandle : public IMtpHandle {
+private:
+ android::base::unique_fd mFd;
+
+public:
+ MtpDevHandle();
+ ~MtpDevHandle();
+ int read(void *data, size_t len);
+ int write(const void *data, size_t len);
+
+ int receiveFile(mtp_file_range mfr, bool);
+ int sendFile(mtp_file_range mfr);
+ int sendEvent(mtp_event me);
+
+ int start();
+ void close();
+
+ int configure(bool ptp);
+};
+
+} // namespace android
+
+#endif // _MTP_FFS_HANDLE_H
diff --git a/media/mtp/MtpDevice.cpp b/media/mtp/MtpDevice.cpp
index 52ea363..0bf7854 100644
--- a/media/mtp/MtpDevice.cpp
+++ b/media/mtp/MtpDevice.cpp
@@ -39,6 +39,12 @@
namespace android {
+namespace {
+
+static constexpr int USB_CONTROL_TRANSFER_TIMEOUT_MS = 200;
+
+} // namespace
+
#if 0
static bool isMtpDevice(uint16_t vendor, uint16_t product) {
// Sandisk Sansa Fuze
@@ -84,15 +90,18 @@
interface->bInterfaceSubClass == 1 && // Still Image Capture
interface->bInterfaceProtocol == 1) // Picture Transfer Protocol (PIMA 15470)
{
- char* manufacturerName = usb_device_get_manufacturer_name(device);
- char* productName = usb_device_get_product_name(device);
+ char* manufacturerName = usb_device_get_manufacturer_name(device,
+ USB_CONTROL_TRANSFER_TIMEOUT_MS);
+ char* productName = usb_device_get_product_name(device,
+ USB_CONTROL_TRANSFER_TIMEOUT_MS);
ALOGD("Found camera: \"%s\" \"%s\"\n", manufacturerName, productName);
free(manufacturerName);
free(productName);
} else if (interface->bInterfaceClass == 0xFF &&
interface->bInterfaceSubClass == 0xFF &&
interface->bInterfaceProtocol == 0) {
- char* interfaceName = usb_device_get_string(device, interface->iInterface);
+ char* interfaceName = usb_device_get_string(device, interface->iInterface,
+ USB_CONTROL_TRANSFER_TIMEOUT_MS);
if (!interfaceName) {
continue;
} else if (strcmp(interfaceName, "MTP")) {
@@ -102,8 +111,10 @@
free(interfaceName);
// Looks like an android style MTP device
- char* manufacturerName = usb_device_get_manufacturer_name(device);
- char* productName = usb_device_get_product_name(device);
+ char* manufacturerName = usb_device_get_manufacturer_name(device,
+ USB_CONTROL_TRANSFER_TIMEOUT_MS);
+ char* productName = usb_device_get_product_name(device,
+ USB_CONTROL_TRANSFER_TIMEOUT_MS);
ALOGD("Found MTP device: \"%s\" \"%s\"\n", manufacturerName, productName);
free(manufacturerName);
free(productName);
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index c84c842..a9a3e0e 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -107,7 +107,7 @@
bool sendObject(MtpObjectHandle handle, int size, int srcFD);
bool deleteObject(MtpObjectHandle handle);
MtpObjectHandle getParent(MtpObjectHandle handle);
- MtpObjectHandle getStorageID(MtpObjectHandle handle);
+ MtpStorageID getStorageID(MtpObjectHandle handle);
MtpObjectPropertyList* getObjectPropsSupported(MtpObjectFormat format);
diff --git a/media/mtp/MtpDeviceInfo.cpp b/media/mtp/MtpDeviceInfo.cpp
index 3e1dff7..3d5cb06 100644
--- a/media/mtp/MtpDeviceInfo.cpp
+++ b/media/mtp/MtpDeviceInfo.cpp
@@ -69,6 +69,7 @@
if (!packet.getString(string)) return false;
mVendorExtensionDesc = strdup((const char *)string);
+ if (!mVendorExtensionDesc) return false;
if (!packet.getUInt16(mFunctionalMode)) return false;
mOperations = packet.getAUInt16();
@@ -84,12 +85,16 @@
if (!packet.getString(string)) return false;
mManufacturer = strdup((const char *)string);
+ if (!mManufacturer) return false;
if (!packet.getString(string)) return false;
mModel = strdup((const char *)string);
+ if (!mModel) return false;
if (!packet.getString(string)) return false;
mVersion = strdup((const char *)string);
+ if (!mVersion) return false;
if (!packet.getString(string)) return false;
mSerial = strdup((const char *)string);
+ if (!mSerial) return false;
return true;
}
diff --git a/media/mtp/MtpEventPacket.cpp b/media/mtp/MtpEventPacket.cpp
index fbee72f..921ecbd 100644
--- a/media/mtp/MtpEventPacket.cpp
+++ b/media/mtp/MtpEventPacket.cpp
@@ -62,7 +62,7 @@
}
int MtpEventPacket::readResponse(struct usb_device *device) {
- struct usb_request* const req = usb_request_wait(device);
+ struct usb_request* const req = usb_request_wait(device, -1);
if (req) {
mPacketSize = req->actual_length;
return req->actual_length;
diff --git a/media/mtp/MtpFfsCompatHandle.cpp b/media/mtp/MtpFfsCompatHandle.cpp
new file mode 100644
index 0000000..3dd73f3
--- /dev/null
+++ b/media/mtp/MtpFfsCompatHandle.cpp
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+#include <android-base/properties.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/functionfs.h>
+#include <mutex>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/endian.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "PosixAsyncIO.h"
+#include "MtpFfsCompatHandle.h"
+#include "mtp.h"
+
+#define FUNCTIONFS_ENDPOINT_ALLOC _IOR('g', 231, __u32)
+
+namespace {
+
+// Must be divisible by all max packet size values
+constexpr int MAX_FILE_CHUNK_SIZE = 3145728;
+
+// Safe values since some devices cannot handle large DMAs
+// To get good performance, override these with
+// higher values per device using the properties
+// sys.usb.ffs.max_read and sys.usb.ffs.max_write
+constexpr int USB_FFS_MAX_WRITE = MTP_BUFFER_SIZE;
+constexpr int USB_FFS_MAX_READ = MTP_BUFFER_SIZE;
+
+static_assert(USB_FFS_MAX_WRITE > 0, "Max r/w values must be > 0!");
+static_assert(USB_FFS_MAX_READ > 0, "Max r/w values must be > 0!");
+
+constexpr unsigned int MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
+
+constexpr size_t ENDPOINT_ALLOC_RETRIES = 10;
+
+} // anonymous namespace
+
+namespace android {
+
+MtpFfsCompatHandle::MtpFfsCompatHandle() :
+ mMaxWrite(USB_FFS_MAX_WRITE),
+ mMaxRead(USB_FFS_MAX_READ) {}
+
+MtpFfsCompatHandle::~MtpFfsCompatHandle() {}
+
+int MtpFfsCompatHandle::writeHandle(int fd, const void* data, size_t len) {
+ int ret = 0;
+ const char* buf = static_cast<const char*>(data);
+ while (len > 0) {
+ int write_len = std::min(mMaxWrite, len);
+ int n = TEMP_FAILURE_RETRY(::write(fd, buf, write_len));
+
+ if (n < 0) {
+ PLOG(ERROR) << "write ERROR: fd = " << fd << ", n = " << n;
+ return -1;
+ } else if (n < write_len) {
+ errno = EIO;
+ PLOG(ERROR) << "less written than expected";
+ return -1;
+ }
+ buf += n;
+ len -= n;
+ ret += n;
+ }
+ return ret;
+}
+
+int MtpFfsCompatHandle::readHandle(int fd, void* data, size_t len) {
+ int ret = 0;
+ char* buf = static_cast<char*>(data);
+ while (len > 0) {
+ int read_len = std::min(mMaxRead, len);
+ int n = TEMP_FAILURE_RETRY(::read(fd, buf, read_len));
+ if (n < 0) {
+ PLOG(ERROR) << "read ERROR: fd = " << fd << ", n = " << n;
+ return -1;
+ }
+ ret += n;
+ if (n < read_len) // done reading early
+ break;
+ buf += n;
+ len -= n;
+ }
+ return ret;
+}
+
+int MtpFfsCompatHandle::start() {
+ mLock.lock();
+
+ if (!openEndpoints())
+ return -1;
+
+ for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
+ mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
+ posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
+ POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
+ }
+
+ // Get device specific r/w size
+ mMaxWrite = android::base::GetIntProperty("sys.usb.ffs.max_write", USB_FFS_MAX_WRITE);
+ mMaxRead = android::base::GetIntProperty("sys.usb.ffs.max_read", USB_FFS_MAX_READ);
+
+ size_t attempts = 0;
+ while (mMaxWrite >= USB_FFS_MAX_WRITE && mMaxRead >= USB_FFS_MAX_READ &&
+ attempts < ENDPOINT_ALLOC_RETRIES) {
+ // If larger contiguous chunks of memory aren't available, attempt to try
+ // smaller allocations.
+ if (ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mMaxWrite)) ||
+ ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mMaxRead))) {
+ if (errno == ENODEV) {
+ // Driver hasn't enabled endpoints yet.
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+ attempts += 1;
+ continue;
+ }
+ mMaxWrite /= 2;
+ mMaxRead /=2;
+ } else {
+ return 0;
+ }
+ }
+ // Try to start MtpServer anyway, with the smallest max r/w values
+ mMaxWrite = USB_FFS_MAX_WRITE;
+ mMaxRead = USB_FFS_MAX_READ;
+ PLOG(ERROR) << "Functionfs could not allocate any memory!";
+ return 0;
+}
+
+int MtpFfsCompatHandle::read(void* data, size_t len) {
+ return readHandle(mBulkOut, data, len);
+}
+
+int MtpFfsCompatHandle::write(const void* data, size_t len) {
+ return writeHandle(mBulkIn, data, len);
+}
+
+int MtpFfsCompatHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
+ // When receiving files, the incoming length is given in 32 bits.
+ // A >4G file is given as 0xFFFFFFFF
+ uint32_t file_length = mfr.length;
+ uint64_t offset = mfr.offset;
+ int packet_size = getPacketSize(mBulkOut);
+
+ unsigned char *data = mIobuf[0].bufs.data();
+ unsigned char *data2 = mIobuf[1].bufs.data();
+
+ struct aiocb aio;
+ aio.aio_fildes = mfr.fd;
+ aio.aio_buf = nullptr;
+ struct aiocb *aiol[] = {&aio};
+ int ret = -1;
+ size_t length;
+ bool read = false;
+ bool write = false;
+
+ posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
+
+ // Break down the file into pieces that fit in buffers
+ while (file_length > 0 || write) {
+ if (file_length > 0) {
+ length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
+
+ // Read data from USB, handle errors after waiting for write thread.
+ ret = readHandle(mBulkOut, data, length);
+
+ if (file_length != MAX_MTP_FILE_SIZE && ret < static_cast<int>(length)) {
+ ret = -1;
+ errno = EIO;
+ }
+ read = true;
+ }
+
+ if (write) {
+ // get the return status of the last write request
+ aio_suspend(aiol, 1, nullptr);
+
+ int written = aio_return(&aio);
+ if (written == -1) {
+ errno = aio_error(&aio);
+ return -1;
+ }
+ if (static_cast<size_t>(written) < aio.aio_nbytes) {
+ errno = EIO;
+ return -1;
+ }
+ write = false;
+ }
+
+ // If there was an error reading above
+ if (ret == -1) {
+ return -1;
+ }
+
+ if (read) {
+ if (file_length == MAX_MTP_FILE_SIZE) {
+ // For larger files, receive until a short packet is received.
+ if (static_cast<size_t>(ret) < length) {
+ file_length = 0;
+ }
+ } else {
+ file_length -= ret;
+ }
+ // Enqueue a new write request
+ aio_prepare(&aio, data, length, offset);
+ aio_write(&aio);
+
+ offset += ret;
+ std::swap(data, data2);
+
+ write = true;
+ read = false;
+ }
+ }
+ // Receive an empty packet if size is a multiple of the endpoint size.
+ if (ret % packet_size == 0 || zero_packet) {
+ if (TEMP_FAILURE_RETRY(::read(mBulkOut, data, packet_size)) != 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int MtpFfsCompatHandle::sendFile(mtp_file_range mfr) {
+ uint64_t file_length = mfr.length;
+ uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
+ file_length + sizeof(mtp_data_header));
+ uint64_t offset = mfr.offset;
+ int packet_size = getPacketSize(mBulkIn);
+
+ // If file_length is larger than a size_t, truncating would produce the wrong comparison.
+ // Instead, promote the left side to 64 bits, then truncate the small result.
+ int init_read_len = std::min(
+ static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
+
+ unsigned char *data = mIobuf[0].bufs.data();
+ unsigned char *data2 = mIobuf[1].bufs.data();
+
+ posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
+
+ struct aiocb aio;
+ aio.aio_fildes = mfr.fd;
+ struct aiocb *aiol[] = {&aio};
+ int ret, length;
+ int error = 0;
+ bool read = false;
+ bool write = false;
+
+ // Send the header data
+ mtp_data_header *header = reinterpret_cast<mtp_data_header*>(data);
+ header->length = htole32(given_length);
+ header->type = htole16(2); /* data packet */
+ header->command = htole16(mfr.command);
+ header->transaction_id = htole32(mfr.transaction_id);
+
+ // Some hosts don't support header/data separation even though MTP allows it
+ // Handle by filling first packet with initial file data
+ if (TEMP_FAILURE_RETRY(pread(mfr.fd, reinterpret_cast<char*>(data) +
+ sizeof(mtp_data_header), init_read_len, offset))
+ != init_read_len) return -1;
+ if (writeHandle(mBulkIn, data, sizeof(mtp_data_header) + init_read_len) == -1) return -1;
+ file_length -= init_read_len;
+ offset += init_read_len;
+ ret = init_read_len + sizeof(mtp_data_header);
+
+ // Break down the file into pieces that fit in buffers
+ while (file_length > 0) {
+ if (read) {
+ // Wait for the previous read to finish
+ aio_suspend(aiol, 1, nullptr);
+ ret = aio_return(&aio);
+ if (ret == -1) {
+ errno = aio_error(&aio);
+ return -1;
+ }
+ if (static_cast<size_t>(ret) < aio.aio_nbytes) {
+ errno = EIO;
+ return -1;
+ }
+
+ file_length -= ret;
+ offset += ret;
+ std::swap(data, data2);
+ read = false;
+ write = true;
+ }
+
+ if (error == -1) {
+ return -1;
+ }
+
+ if (file_length > 0) {
+ length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
+ // Queue up another read
+ aio_prepare(&aio, data, length, offset);
+ aio_read(&aio);
+ read = true;
+ }
+
+ if (write) {
+ if (writeHandle(mBulkIn, data2, ret) == -1) {
+ error = -1;
+ }
+ write = false;
+ }
+ }
+
+ if (ret % packet_size == 0) {
+ // If the last packet wasn't short, send a final empty packet
+ if (TEMP_FAILURE_RETRY(::write(mBulkIn, data, 0)) != 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+} // namespace android
+
diff --git a/media/mtp/MtpFfsCompatHandle.h b/media/mtp/MtpFfsCompatHandle.h
new file mode 100644
index 0000000..cd61482
--- /dev/null
+++ b/media/mtp/MtpFfsCompatHandle.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MTP_FFS_COMPAT_HANDLE_H
+#define _MTP_FFS_COMPAT_HANDLE_H
+
+#include <MtpFfsHandle.h>
+
+namespace android {
+
+template <class T> class MtpFfsHandleTest;
+
+class MtpFfsCompatHandle : public MtpFfsHandle {
+ template <class T> friend class android::MtpFfsHandleTest;
+private:
+ int writeHandle(int fd, const void *data, size_t len);
+ int readHandle(int fd, void *data, size_t len);
+
+ size_t mMaxWrite;
+ size_t mMaxRead;
+
+public:
+ int read(void* data, size_t len) override;
+ int write(const void* data, size_t len) override;
+ int receiveFile(mtp_file_range mfr, bool zero_packet) override;
+ int sendFile(mtp_file_range mfr) override;
+
+ /**
+ * Open ffs endpoints and allocate necessary kernel and user memory.
+ * Will sleep until endpoints are enabled, for up to 1 second.
+ */
+ int start() override;
+
+ MtpFfsCompatHandle();
+ ~MtpFfsCompatHandle();
+};
+
+} // namespace android
+
+#endif // _MTP_FFS_COMPAT_HANDLE_H
+
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 35dd10f..89b20e5 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -16,32 +16,29 @@
#include <android-base/logging.h>
#include <android-base/properties.h>
+#include <asyncio/AsyncIO.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/usb/ch9.h>
#include <linux/usb/functionfs.h>
-#include <mutex>
+#include <memory>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/endian.h>
+#include <sys/eventfd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <sys/poll.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
-#include <vector>
-#include "AsyncIO.h"
+#include "PosixAsyncIO.h"
#include "MtpFfsHandle.h"
#include "mtp.h"
-#define cpu_to_le16(x) htole16(x)
-#define cpu_to_le32(x) htole32(x)
-
-#define FUNCTIONFS_ENDPOINT_ALLOC _IOR('g', 231, __u32)
-
namespace {
constexpr char FFS_MTP_EP_IN[] = "/dev/usb-ffs/mtp/ep1";
@@ -51,23 +48,18 @@
constexpr int MAX_PACKET_SIZE_FS = 64;
constexpr int MAX_PACKET_SIZE_HS = 512;
constexpr int MAX_PACKET_SIZE_SS = 1024;
+constexpr int MAX_PACKET_SIZE_EV = 28;
-// Must be divisible by all max packet size values
-constexpr int MAX_FILE_CHUNK_SIZE = 3145728;
+constexpr unsigned AIO_BUFS_MAX = 128;
+constexpr unsigned AIO_BUF_LEN = 16384;
-// Safe values since some devices cannot handle large DMAs
-// To get good performance, override these with
-// higher values per device using the properties
-// sys.usb.ffs.max_read and sys.usb.ffs.max_write
-constexpr int USB_FFS_MAX_WRITE = MTP_BUFFER_SIZE;
-constexpr int USB_FFS_MAX_READ = MTP_BUFFER_SIZE;
+constexpr unsigned FFS_NUM_EVENTS = 5;
-static_assert(USB_FFS_MAX_WRITE > 0, "Max r/w values must be > 0!");
-static_assert(USB_FFS_MAX_READ > 0, "Max r/w values must be > 0!");
+constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
-constexpr unsigned int MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
+constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
-constexpr size_t ENDPOINT_ALLOC_RETRIES = 10;
+struct timespec ZERO_TIMEOUT = { 0, 0 };
struct func_desc {
struct usb_interface_descriptor intf;
@@ -143,12 +135,12 @@
.wMaxPacketSize = MAX_PACKET_SIZE_FS,
};
-const struct usb_endpoint_descriptor_no_audio fs_intr = {
+const struct usb_endpoint_descriptor_no_audio intr = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = 3 | USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
- .wMaxPacketSize = MAX_PACKET_SIZE_FS,
+ .wMaxPacketSize = MAX_PACKET_SIZE_EV,
.bInterval = 6,
};
@@ -168,15 +160,6 @@
.wMaxPacketSize = MAX_PACKET_SIZE_HS,
};
-const struct usb_endpoint_descriptor_no_audio hs_intr = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 3 | USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
- .wMaxPacketSize = MAX_PACKET_SIZE_HS,
- .bInterval = 6,
-};
-
const struct usb_endpoint_descriptor_no_audio ss_sink = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -193,15 +176,6 @@
.wMaxPacketSize = MAX_PACKET_SIZE_SS,
};
-const struct usb_endpoint_descriptor_no_audio ss_intr = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 3 | USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
- .wMaxPacketSize = MAX_PACKET_SIZE_SS,
- .bInterval = 6,
-};
-
const struct usb_ss_ep_comp_descriptor ss_sink_comp = {
.bLength = sizeof(ss_sink_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
@@ -223,14 +197,14 @@
.intf = mtp_interface_desc,
.sink = fs_sink,
.source = fs_source,
- .intr = fs_intr,
+ .intr = intr,
};
const struct func_desc mtp_hs_descriptors = {
.intf = mtp_interface_desc,
.sink = hs_sink,
.source = hs_source,
- .intr = hs_intr,
+ .intr = intr,
};
const struct ss_func_desc mtp_ss_descriptors = {
@@ -239,7 +213,7 @@
.sink_comp = ss_sink_comp,
.source = ss_source,
.source_comp = ss_source_comp,
- .intr = ss_intr,
+ .intr = intr,
.intr_comp = ss_intr_comp,
};
@@ -247,14 +221,14 @@
.intf = ptp_interface_desc,
.sink = fs_sink,
.source = fs_source,
- .intr = fs_intr,
+ .intr = intr,
};
const struct func_desc ptp_hs_descriptors = {
.intf = ptp_interface_desc,
.sink = hs_sink,
.source = hs_source,
- .intr = hs_intr,
+ .intr = intr,
};
const struct ss_func_desc ptp_ss_descriptors = {
@@ -263,7 +237,7 @@
.sink_comp = ss_sink_comp,
.source = ss_source,
.source_comp = ss_source_comp,
- .intr = ss_intr,
+ .intr = intr,
.intr_comp = ss_intr_comp,
};
@@ -276,24 +250,37 @@
} __attribute__((packed)) lang0;
} __attribute__((packed)) strings = {
.header = {
- .magic = cpu_to_le32(FUNCTIONFS_STRINGS_MAGIC),
- .length = cpu_to_le32(sizeof(strings)),
- .str_count = cpu_to_le32(1),
- .lang_count = cpu_to_le32(1),
+ .magic = htole32(FUNCTIONFS_STRINGS_MAGIC),
+ .length = htole32(sizeof(strings)),
+ .str_count = htole32(1),
+ .lang_count = htole32(1),
},
.lang0 = {
- .code = cpu_to_le16(0x0409),
+ .code = htole16(0x0409),
.str1 = STR_INTERFACE,
},
};
+struct mtp_device_status {
+ uint16_t wLength;
+ uint16_t wCode;
+};
+
} // anonymous namespace
namespace android {
-MtpFfsHandle::MtpFfsHandle() :
- mMaxWrite(USB_FFS_MAX_WRITE),
- mMaxRead(USB_FFS_MAX_READ) {}
+int MtpFfsHandle::getPacketSize(int ffs_fd) {
+ struct usb_endpoint_descriptor desc;
+ if (ioctl(ffs_fd, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&desc))) {
+ PLOG(ERROR) << "Could not get FFS bulk-in descriptor";
+ return MAX_PACKET_SIZE_HS;
+ } else {
+ return desc.wMaxPacketSize;
+ }
+}
+
+MtpFfsHandle::MtpFfsHandle() {}
MtpFfsHandle::~MtpFfsHandle() {}
@@ -303,13 +290,51 @@
mBulkOut.reset();
}
+bool MtpFfsHandle::openEndpoints() {
+ if (mBulkIn < 0) {
+ mBulkIn.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_IN, O_RDWR)));
+ if (mBulkIn < 0) {
+ PLOG(ERROR) << FFS_MTP_EP_IN << ": cannot open bulk in ep";
+ return false;
+ }
+ }
+
+ if (mBulkOut < 0) {
+ mBulkOut.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_OUT, O_RDWR)));
+ if (mBulkOut < 0) {
+ PLOG(ERROR) << FFS_MTP_EP_OUT << ": cannot open bulk out ep";
+ return false;
+ }
+ }
+
+ if (mIntr < 0) {
+ mIntr.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_INTR, O_RDWR)));
+ if (mIntr < 0) {
+ PLOG(ERROR) << FFS_MTP_EP_INTR << ": cannot open intr ep";
+ return false;
+ }
+ }
+ return true;
+}
+
+void MtpFfsHandle::advise(int fd) {
+ for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
+ if (posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
+ POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED) < 0)
+ PLOG(ERROR) << "Failed to madvise";
+ }
+ if (posix_fadvise(fd, 0, 0,
+ POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED) < 0)
+ PLOG(ERROR) << "Failed to fadvise";
+}
+
bool MtpFfsHandle::initFunctionfs() {
ssize_t ret;
struct desc_v1 v1_descriptor;
struct desc_v2 v2_descriptor;
- v2_descriptor.header.magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC_V2);
- v2_descriptor.header.length = cpu_to_le32(sizeof(v2_descriptor));
+ v2_descriptor.header.magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC_V2);
+ v2_descriptor.header.length = htole32(sizeof(v2_descriptor));
v2_descriptor.header.flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC |
FUNCTIONFS_HAS_SS_DESC;
v2_descriptor.fs_count = 4;
@@ -328,8 +353,8 @@
ret = TEMP_FAILURE_RETRY(::write(mControl, &v2_descriptor, sizeof(v2_descriptor)));
if (ret < 0) {
- v1_descriptor.header.magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC);
- v1_descriptor.header.length = cpu_to_le32(sizeof(v1_descriptor));
+ v1_descriptor.header.magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC);
+ v1_descriptor.header.length = htole32(sizeof(v1_descriptor));
v1_descriptor.header.fs_count = 4;
v1_descriptor.header.hs_count = 4;
v1_descriptor.fs_descs = mPtp ? ptp_fs_descriptors : mtp_fs_descriptors;
@@ -347,8 +372,6 @@
goto err;
}
}
- if (mBulkIn > -1 || mBulkOut > -1 || mIntr > -1)
- LOG(WARNING) << "Endpoints were not closed before configure!";
return true;
@@ -361,136 +384,162 @@
mControl.reset();
}
-int MtpFfsHandle::writeHandle(int fd, const void* data, int len) {
- LOG(VERBOSE) << "MTP about to write fd = " << fd << ", len=" << len;
- int ret = 0;
- const char* buf = static_cast<const char*>(data);
- while (len > 0) {
- int write_len = std::min(mMaxWrite, len);
- int n = TEMP_FAILURE_RETRY(::write(fd, buf, write_len));
-
- if (n < 0) {
- PLOG(ERROR) << "write ERROR: fd = " << fd << ", n = " << n;
- return -1;
- } else if (n < write_len) {
- errno = EIO;
- PLOG(ERROR) << "less written than expected";
- return -1;
- }
- buf += n;
- len -= n;
- ret += n;
+int MtpFfsHandle::doAsync(void* data, size_t len, bool read) {
+ struct io_event ioevs[1];
+ if (len > AIO_BUF_LEN) {
+ LOG(ERROR) << "Mtp read/write too large " << len;
+ errno = EINVAL;
+ return -1;
}
+ mIobuf[0].buf[0] = reinterpret_cast<unsigned char*>(data);
+ if (iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, len, read) == -1)
+ return -1;
+ int ret = waitEvents(&mIobuf[0], 1, ioevs, nullptr);
+ mIobuf[0].buf[0] = mIobuf[0].bufs.data();
return ret;
}
-int MtpFfsHandle::readHandle(int fd, void* data, int len) {
- LOG(VERBOSE) << "MTP about to read fd = " << fd << ", len=" << len;
+int MtpFfsHandle::read(void* data, size_t len) {
+ return doAsync(data, len, true);
+}
+
+int MtpFfsHandle::write(const void* data, size_t len) {
+ return doAsync(const_cast<void*>(data), len, false);
+}
+
+int MtpFfsHandle::handleEvent() {
+
+ std::vector<usb_functionfs_event> events(FFS_NUM_EVENTS);
+ usb_functionfs_event *event = events.data();
+ int nbytes = TEMP_FAILURE_RETRY(::read(mControl, event,
+ events.size() * sizeof(usb_functionfs_event)));
+ if (nbytes == -1) {
+ return -1;
+ }
int ret = 0;
- char* buf = static_cast<char*>(data);
- while (len > 0) {
- int read_len = std::min(mMaxRead, len);
- int n = TEMP_FAILURE_RETRY(::read(fd, buf, read_len));
- if (n < 0) {
- PLOG(ERROR) << "read ERROR: fd = " << fd << ", n = " << n;
- return -1;
- }
- ret += n;
- if (n < read_len) // done reading early
+ for (size_t n = nbytes / sizeof *event; n; --n, ++event) {
+ switch (event->type) {
+ case FUNCTIONFS_BIND:
+ case FUNCTIONFS_ENABLE:
+ case FUNCTIONFS_RESUME:
+ ret = 0;
+ errno = 0;
break;
- buf += n;
- len -= n;
- }
- return ret;
-}
-
-int MtpFfsHandle::spliceReadHandle(int fd, int pipe_out, int len) {
- LOG(VERBOSE) << "MTP about to splice read fd = " << fd << ", len=" << len;
- int ret = 0;
- loff_t dummyoff;
- while (len > 0) {
- int read_len = std::min(mMaxRead, len);
- dummyoff = 0;
- int n = TEMP_FAILURE_RETRY(splice(fd, &dummyoff, pipe_out, nullptr, read_len, 0));
- if (n < 0) {
- PLOG(ERROR) << "splice read ERROR: fd = " << fd << ", n = " << n;
- return -1;
- }
- ret += n;
- if (n < read_len) // done reading early
+ case FUNCTIONFS_SUSPEND:
+ case FUNCTIONFS_UNBIND:
+ case FUNCTIONFS_DISABLE:
+ errno = ESHUTDOWN;
+ ret = -1;
break;
- len -= n;
+ case FUNCTIONFS_SETUP:
+ if (handleControlRequest(&event->u.setup) == -1)
+ ret = -1;
+ break;
+ default:
+ LOG(ERROR) << "Mtp Event " << event->type << " (unknown)";
+ }
}
return ret;
}
-int MtpFfsHandle::read(void* data, int len) {
- return readHandle(mBulkOut, data, len);
-}
+int MtpFfsHandle::handleControlRequest(const struct usb_ctrlrequest *setup) {
+ uint8_t type = setup->bRequestType;
+ uint8_t code = setup->bRequest;
+ uint16_t length = setup->wLength;
+ uint16_t index = setup->wIndex;
+ uint16_t value = setup->wValue;
+ std::vector<char> buf;
+ buf.resize(length);
+ int ret = 0;
-int MtpFfsHandle::write(const void* data, int len) {
- return writeHandle(mBulkIn, data, len);
+ if (!(type & USB_DIR_IN)) {
+ if (::read(mControl, buf.data(), length) != length) {
+ PLOG(ERROR) << "Mtp error ctrlreq read data";
+ }
+ }
+
+ if ((type & USB_TYPE_MASK) == USB_TYPE_CLASS && index == 0 && value == 0) {
+ switch(code) {
+ case MTP_REQ_RESET:
+ case MTP_REQ_CANCEL:
+ errno = ECANCELED;
+ ret = -1;
+ break;
+ case MTP_REQ_GET_DEVICE_STATUS:
+ {
+ if (length < sizeof(struct mtp_device_status) + 4) {
+ errno = EINVAL;
+ return -1;
+ }
+ struct mtp_device_status *st = reinterpret_cast<struct mtp_device_status*>(buf.data());
+ st->wLength = htole16(sizeof(st));
+ if (mCanceled) {
+ st->wLength += 4;
+ st->wCode = MTP_RESPONSE_TRANSACTION_CANCELLED;
+ uint16_t *endpoints = reinterpret_cast<uint16_t*>(st + 1);
+ endpoints[0] = ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_REVMAP);
+ endpoints[1] = ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_REVMAP);
+ mCanceled = false;
+ } else {
+ st->wCode = MTP_RESPONSE_OK;
+ }
+ length = st->wLength;
+ break;
+ }
+ default:
+ LOG(ERROR) << "Unrecognized Mtp class request! " << code;
+ }
+ } else {
+ LOG(ERROR) << "Unrecognized request type " << type;
+ }
+
+ if (type & USB_DIR_IN) {
+ if (::write(mControl, buf.data(), length) != length) {
+ PLOG(ERROR) << "Mtp error ctrlreq write data";
+ }
+ }
+ return 0;
}
int MtpFfsHandle::start() {
mLock.lock();
- mBulkIn.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_IN, O_RDWR)));
- if (mBulkIn < 0) {
- PLOG(ERROR) << FFS_MTP_EP_IN << ": cannot open bulk in ep";
+ if (!openEndpoints())
return -1;
- }
- mBulkOut.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_OUT, O_RDWR)));
- if (mBulkOut < 0) {
- PLOG(ERROR) << FFS_MTP_EP_OUT << ": cannot open bulk out ep";
- return -1;
- }
-
- mIntr.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_INTR, O_RDWR)));
- if (mIntr < 0) {
- PLOG(ERROR) << FFS_MTP_EP0 << ": cannot open intr ep";
- return -1;
- }
-
- mBuffer1.resize(MAX_FILE_CHUNK_SIZE);
- mBuffer2.resize(MAX_FILE_CHUNK_SIZE);
- posix_madvise(mBuffer1.data(), MAX_FILE_CHUNK_SIZE,
- POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
- posix_madvise(mBuffer2.data(), MAX_FILE_CHUNK_SIZE,
- POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED);
-
- // Get device specific r/w size
- mMaxWrite = android::base::GetIntProperty("sys.usb.ffs.max_write", USB_FFS_MAX_WRITE);
- mMaxRead = android::base::GetIntProperty("sys.usb.ffs.max_read", USB_FFS_MAX_READ);
-
- size_t attempts = 0;
- while (mMaxWrite >= USB_FFS_MAX_WRITE && mMaxRead >= USB_FFS_MAX_READ &&
- attempts < ENDPOINT_ALLOC_RETRIES) {
- // If larger contiguous chunks of memory aren't available, attempt to try
- // smaller allocations.
- if (ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mMaxWrite)) ||
- ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_ALLOC, static_cast<__u32>(mMaxRead))) {
- if (errno == ENODEV) {
- // Driver hasn't enabled endpoints yet.
- std::this_thread::sleep_for(std::chrono::milliseconds(100));
- attempts += 1;
- continue;
- }
- mMaxWrite /= 2;
- mMaxRead /=2;
- } else {
- return 0;
+ for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
+ mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
+ mIobuf[i].iocb.resize(AIO_BUFS_MAX);
+ mIobuf[i].iocbs.resize(AIO_BUFS_MAX);
+ mIobuf[i].buf.resize(AIO_BUFS_MAX);
+ for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
+ mIobuf[i].buf[j] = mIobuf[i].bufs.data() + j * AIO_BUF_LEN;
+ mIobuf[i].iocb[j] = &mIobuf[i].iocbs[j];
}
}
- // Try to start MtpServer anyway, with the smallest max r/w values
- PLOG(ERROR) << "Functionfs could not allocate any memory!";
+
+ memset(&mCtx, 0, sizeof(mCtx));
+ if (io_setup(AIO_BUFS_MAX, &mCtx) < 0) {
+ PLOG(ERROR) << "unable to setup aio";
+ return -1;
+ }
+ mEventFd.reset(eventfd(0, EFD_NONBLOCK));
+ mPollFds[0].fd = mControl;
+ mPollFds[0].events = POLLIN;
+ mPollFds[1].fd = mEventFd;
+ mPollFds[1].events = POLLIN;
+
+ mCanceled = false;
return 0;
}
int MtpFfsHandle::configure(bool usePtp) {
// Wait till previous server invocation has closed
- std::lock_guard<std::mutex> lk(mLock);
+ if (!mLock.try_lock_for(std::chrono::milliseconds(1000))) {
+ LOG(ERROR) << "MtpServer was unable to get configure lock";
+ return -1;
+ }
+ int ret = 0;
// If ptp is changed, the configuration must be rewritten
if (mPtp != usePtp) {
@@ -500,205 +549,391 @@
mPtp = usePtp;
if (!initFunctionfs()) {
- return -1;
+ ret = -1;
}
- return 0;
+ mLock.unlock();
+ return ret;
}
void MtpFfsHandle::close() {
+ io_destroy(mCtx);
closeEndpoints();
mLock.unlock();
}
-/* Read from USB and write to a local file. */
-int MtpFfsHandle::receiveFile(mtp_file_range mfr) {
- // When receiving files, the incoming length is given in 32 bits.
- // A >4G file is given as 0xFFFFFFFF
- uint32_t file_length = mfr.length;
- uint64_t offset = lseek(mfr.fd, 0, SEEK_CUR);
+int MtpFfsHandle::waitEvents(struct io_buffer *buf, int min_events, struct io_event *events,
+ int *counter) {
+ int num_events = 0;
+ int ret = 0;
+ int error = 0;
- char *data = mBuffer1.data();
- char *data2 = mBuffer2.data();
+ while (num_events < min_events) {
+ if (poll(mPollFds, 2, 0) == -1) {
+ PLOG(ERROR) << "Mtp error during poll()";
+ return -1;
+ }
+ if (mPollFds[0].revents & POLLIN) {
+ mPollFds[0].revents = 0;
+ if (handleEvent() == -1) {
+ error = errno;
+ }
+ }
+ if (mPollFds[1].revents & POLLIN) {
+ mPollFds[1].revents = 0;
+ uint64_t ev_cnt = 0;
+
+ if (::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1) {
+ PLOG(ERROR) << "Mtp unable to read eventfd";
+ error = errno;
+ continue;
+ }
+
+ // It's possible that io_getevents will return more events than the eventFd reported,
+ // since events may appear in the time between the calls. In this case, the eventFd will
+ // show up as readable next iteration, but there will be fewer or no events to actually
+ // wait for. Thus we never want io_getevents to block.
+ int this_events = TEMP_FAILURE_RETRY(io_getevents(mCtx, 0, AIO_BUFS_MAX, events, &ZERO_TIMEOUT));
+ if (this_events == -1) {
+ PLOG(ERROR) << "Mtp error getting events";
+ error = errno;
+ }
+ // Add up the total amount of data and find errors on the way.
+ for (unsigned j = 0; j < static_cast<unsigned>(this_events); j++) {
+ if (events[j].res < 0) {
+ errno = -events[j].res;
+ PLOG(ERROR) << "Mtp got error event at " << j << " and " << buf->actual << " total";
+ error = errno;
+ }
+ ret += events[j].res;
+ }
+ num_events += this_events;
+ if (counter)
+ *counter += this_events;
+ }
+ if (error) {
+ errno = error;
+ ret = -1;
+ break;
+ }
+ }
+ return ret;
+}
+
+void MtpFfsHandle::cancelTransaction() {
+ // Device cancels by stalling both bulk endpoints.
+ if (::read(mBulkIn, nullptr, 0) != -1 || errno != EBADMSG)
+ PLOG(ERROR) << "Mtp stall failed on bulk in";
+ if (::write(mBulkOut, nullptr, 0) != -1 || errno != EBADMSG)
+ PLOG(ERROR) << "Mtp stall failed on bulk out";
+ mCanceled = true;
+ errno = ECANCELED;
+}
+
+int MtpFfsHandle::cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start,
+ unsigned end) {
+ // Some manpages for io_cancel are out of date and incorrect.
+ // io_cancel will return -EINPROGRESS on success and does
+ // not place the event in the given memory. We have to use
+ // io_getevents to wait for all the events we cancelled.
+ int ret = 0;
+ unsigned num_events = 0;
+ int save_errno = errno;
+ errno = 0;
+
+ for (unsigned j = start; j < end; j++) {
+ if (io_cancel(mCtx, iocb[j], nullptr) != -1 || errno != EINPROGRESS) {
+ PLOG(ERROR) << "Mtp couldn't cancel request " << j;
+ } else {
+ num_events++;
+ }
+ }
+ if (num_events != end - start) {
+ ret = -1;
+ errno = EIO;
+ }
+ int evs = TEMP_FAILURE_RETRY(io_getevents(mCtx, num_events, AIO_BUFS_MAX, events, nullptr));
+ if (static_cast<unsigned>(evs) != num_events) {
+ PLOG(ERROR) << "Mtp couldn't cancel all requests, got " << evs;
+ ret = -1;
+ }
+
+ uint64_t ev_cnt = 0;
+ if (num_events && ::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1)
+ PLOG(ERROR) << "Mtp Unable to read event fd";
+
+ if (ret == 0) {
+ // Restore errno since it probably got overriden with EINPROGRESS.
+ errno = save_errno;
+ }
+ return ret;
+}
+
+int MtpFfsHandle::iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read) {
+ int ret = 0;
+ buf->actual = AIO_BUFS_MAX;
+ for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
+ unsigned rq_length = std::min(AIO_BUF_LEN, length - AIO_BUF_LEN * j);
+ io_prep(buf->iocb[j], fd, buf->buf[j], rq_length, 0, read);
+ buf->iocb[j]->aio_flags |= IOCB_FLAG_RESFD;
+ buf->iocb[j]->aio_resfd = mEventFd;
+
+ // Not enough data, so table is truncated.
+ if (rq_length < AIO_BUF_LEN || length == AIO_BUF_LEN * (j + 1)) {
+ buf->actual = j + 1;
+ break;
+ }
+ }
+
+ ret = io_submit(mCtx, buf->actual, buf->iocb.data());
+ if (ret != static_cast<int>(buf->actual)) {
+ PLOG(ERROR) << "Mtp io_submit got " << ret << " expected " << buf->actual;
+ if (ret != -1) {
+ errno = EIO;
+ }
+ ret = -1;
+ }
+ return ret;
+}
+
+int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
+ // When receiving files, the incoming length is given in 32 bits.
+ // A >=4G file is given as 0xFFFFFFFF
+ uint32_t file_length = mfr.length;
+ uint64_t offset = mfr.offset;
struct aiocb aio;
aio.aio_fildes = mfr.fd;
aio.aio_buf = nullptr;
struct aiocb *aiol[] = {&aio};
- int ret;
- size_t length;
- bool read = false;
- bool write = false;
- posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
+ int ret = -1;
+ unsigned i = 0;
+ size_t length;
+ struct io_event ioevs[AIO_BUFS_MAX];
+ bool has_write = false;
+ bool error = false;
+ bool write_error = false;
+ int packet_size = getPacketSize(mBulkOut);
+ bool short_packet = false;
+ advise(mfr.fd);
// Break down the file into pieces that fit in buffers
- while (file_length > 0 || write) {
+ while (file_length > 0 || has_write) {
+ // Queue an asynchronous read from USB.
if (file_length > 0) {
length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
-
- // Read data from USB
- if ((ret = readHandle(mBulkOut, data, length)) == -1) {
- return -1;
- }
-
- if (file_length != MAX_MTP_FILE_SIZE && ret < static_cast<int>(length)) {
- errno = EIO;
- return -1;
- }
- read = true;
+ if (iobufSubmit(&mIobuf[i], mBulkOut, length, true) == -1)
+ error = true;
}
- if (write) {
- // get the return status of the last write request
+ // Get the return status of the last write request.
+ if (has_write) {
aio_suspend(aiol, 1, nullptr);
-
int written = aio_return(&aio);
- if (written == -1) {
- errno = aio_error(&aio);
- return -1;
- }
if (static_cast<size_t>(written) < aio.aio_nbytes) {
- errno = EIO;
- return -1;
+ errno = written == -1 ? aio_error(&aio) : EIO;
+ PLOG(ERROR) << "Mtp error writing to disk";
+ write_error = true;
}
- write = false;
+ has_write = false;
}
- if (read) {
- // Enqueue a new write request
- aio.aio_buf = data;
- aio.aio_sink = mfr.fd;
- aio.aio_offset = offset;
- aio.aio_nbytes = ret;
- aio_write(&aio);
+ if (error) {
+ return -1;
+ }
+ // Get the result of the read request, and queue a write to disk.
+ if (file_length > 0) {
+ unsigned num_events = 0;
+ ret = 0;
+ unsigned short_i = mIobuf[i].actual;
+ while (num_events < short_i) {
+ // Get all events up to the short read, if there is one.
+ // We must wait for each event since data transfer could end at any time.
+ int this_events = 0;
+ int event_ret = waitEvents(&mIobuf[i], 1, ioevs, &this_events);
+ num_events += this_events;
+
+ if (event_ret == -1) {
+ cancelEvents(mIobuf[i].iocb.data(), ioevs, num_events, mIobuf[i].actual);
+ return -1;
+ }
+ ret += event_ret;
+ for (int j = 0; j < this_events; j++) {
+ // struct io_event contains a pointer to the associated struct iocb as a __u64.
+ if (static_cast<__u64>(ioevs[j].res) <
+ reinterpret_cast<struct iocb*>(ioevs[j].obj)->aio_nbytes) {
+ // We've found a short event. Store the index since
+ // events won't necessarily arrive in the order they are queued.
+ short_i = (ioevs[j].obj - reinterpret_cast<uint64_t>(mIobuf[i].iocbs.data()))
+ / sizeof(struct iocb) + 1;
+ short_packet = true;
+ }
+ }
+ }
+ if (short_packet) {
+ if (cancelEvents(mIobuf[i].iocb.data(), ioevs, short_i, mIobuf[i].actual)) {
+ write_error = true;
+ }
+ }
if (file_length == MAX_MTP_FILE_SIZE) {
// For larger files, receive until a short packet is received.
if (static_cast<size_t>(ret) < length) {
file_length = 0;
}
+ } else if (ret < static_cast<int>(length)) {
+ // If file is less than 4G and we get a short packet, it's an error.
+ errno = EIO;
+ LOG(ERROR) << "Mtp got unexpected short packet";
+ return -1;
} else {
file_length -= ret;
}
- offset += ret;
- std::swap(data, data2);
+ if (write_error) {
+ cancelTransaction();
+ return -1;
+ }
- write = true;
- read = false;
+ // Enqueue a new write request
+ aio_prepare(&aio, mIobuf[i].bufs.data(), ret, offset);
+ aio_write(&aio);
+
+ offset += ret;
+ i = (i + 1) % NUM_IO_BUFS;
+ has_write = true;
+ }
+ }
+ if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
+ // Receive an empty packet if size is a multiple of the endpoint size
+ // and we didn't already get an empty packet from the header or large file.
+ if (read(mIobuf[0].bufs.data(), packet_size) != 0) {
+ return -1;
}
}
return 0;
}
-/* Read from a local file and send over USB. */
int MtpFfsHandle::sendFile(mtp_file_range mfr) {
uint64_t file_length = mfr.length;
uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
file_length + sizeof(mtp_data_header));
uint64_t offset = mfr.offset;
- struct usb_endpoint_descriptor mBulkIn_desc;
- int packet_size;
-
- if (ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&mBulkIn_desc))) {
- PLOG(ERROR) << "Could not get FFS bulk-in descriptor";
- packet_size = MAX_PACKET_SIZE_HS;
- } else {
- packet_size = mBulkIn_desc.wMaxPacketSize;
- }
+ int packet_size = getPacketSize(mBulkIn);
// If file_length is larger than a size_t, truncating would produce the wrong comparison.
// Instead, promote the left side to 64 bits, then truncate the small result.
int init_read_len = std::min(
static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
- char *data = mBuffer1.data();
- char *data2 = mBuffer2.data();
-
- posix_fadvise(mfr.fd, 0, 0, POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
+ advise(mfr.fd);
struct aiocb aio;
aio.aio_fildes = mfr.fd;
struct aiocb *aiol[] = {&aio};
- int ret, length;
- bool read = false;
- bool write = false;
+ int ret = 0;
+ int length, num_read;
+ unsigned i = 0;
+ struct io_event ioevs[AIO_BUFS_MAX];
+ bool error = false;
+ bool has_write = false;
// Send the header data
- mtp_data_header *header = reinterpret_cast<mtp_data_header*>(data);
- header->length = __cpu_to_le32(given_length);
- header->type = __cpu_to_le16(2); /* data packet */
- header->command = __cpu_to_le16(mfr.command);
- header->transaction_id = __cpu_to_le32(mfr.transaction_id);
+ mtp_data_header *header = reinterpret_cast<mtp_data_header*>(mIobuf[0].bufs.data());
+ header->length = htole32(given_length);
+ header->type = htole16(2); // data packet
+ header->command = htole16(mfr.command);
+ header->transaction_id = htole32(mfr.transaction_id);
// Some hosts don't support header/data separation even though MTP allows it
// Handle by filling first packet with initial file data
- if (TEMP_FAILURE_RETRY(pread(mfr.fd, reinterpret_cast<char*>(data) +
+ if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
sizeof(mtp_data_header), init_read_len, offset))
!= init_read_len) return -1;
- if (writeHandle(mBulkIn, data, sizeof(mtp_data_header) + init_read_len) == -1) return -1;
- if (file_length == static_cast<unsigned>(init_read_len)) return 0;
+ if (write(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len) == -1)
+ return -1;
file_length -= init_read_len;
offset += init_read_len;
- ret = 0;
+ ret = init_read_len + sizeof(mtp_data_header);
// Break down the file into pieces that fit in buffers
- while(file_length > 0) {
- if (read) {
- // Wait for the previous read to finish
- aio_suspend(aiol, 1, nullptr);
- ret = aio_return(&aio);
- if (ret == -1) {
- errno = aio_error(&aio);
- return -1;
- }
- if (static_cast<size_t>(ret) < aio.aio_nbytes) {
- errno = EIO;
- return -1;
- }
+ while(file_length > 0 || has_write) {
+ if (file_length > 0) {
+ // Queue up a read from disk.
+ length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
+ aio_prepare(&aio, mIobuf[i].bufs.data(), length, offset);
+ aio_read(&aio);
+ }
- file_length -= ret;
- offset += ret;
- std::swap(data, data2);
- read = false;
- write = true;
+ if (has_write) {
+ // Wait for usb write. Cancel unwritten portion if there's an error.
+ int num_events = 0;
+ if (waitEvents(&mIobuf[(i-1)%NUM_IO_BUFS], mIobuf[(i-1)%NUM_IO_BUFS].actual, ioevs,
+ &num_events) != ret) {
+ error = true;
+ cancelEvents(mIobuf[(i-1)%NUM_IO_BUFS].iocb.data(), ioevs, num_events,
+ mIobuf[(i-1)%NUM_IO_BUFS].actual);
+ }
+ has_write = false;
}
if (file_length > 0) {
- length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
- // Queue up another read
- aio.aio_buf = data;
- aio.aio_offset = offset;
- aio.aio_nbytes = length;
- aio_read(&aio);
- read = true;
- }
-
- if (write) {
- if (writeHandle(mBulkIn, data2, ret) == -1)
+ // Wait for the previous read to finish
+ aio_suspend(aiol, 1, nullptr);
+ num_read = aio_return(&aio);
+ if (static_cast<size_t>(num_read) < aio.aio_nbytes) {
+ errno = num_read == -1 ? aio_error(&aio) : EIO;
+ PLOG(ERROR) << "Mtp error reading from disk";
+ cancelTransaction();
return -1;
- write = false;
+ }
+
+ file_length -= num_read;
+ offset += num_read;
+
+ if (error) {
+ return -1;
+ }
+
+ // Queue up a write to usb.
+ if (iobufSubmit(&mIobuf[i], mBulkIn, num_read, false) == -1) {
+ return -1;
+ }
+ has_write = true;
+ ret = num_read;
+ }
+
+ i = (i + 1) % NUM_IO_BUFS;
+ }
+
+ if (ret % packet_size == 0) {
+ // If the last packet wasn't short, send a final empty packet
+ if (write(mIobuf[0].bufs.data(), 0) != 0) {
+ return -1;
}
}
-
- if (given_length == MAX_MTP_FILE_SIZE && ret % packet_size == 0) {
- // If the last packet wasn't short, send a final empty packet
- if (writeHandle(mBulkIn, data, 0) == -1) return -1;
- }
-
return 0;
}
int MtpFfsHandle::sendEvent(mtp_event me) {
+ // Mimic the behavior of f_mtp by sending the event async.
+ // Events aren't critical to the connection, so we don't need to check the return value.
+ char *temp = new char[me.length];
+ memcpy(temp, me.data, me.length);
+ me.data = temp;
+ std::thread t([this, me]() { return this->doSendEvent(me); });
+ t.detach();
+ return 0;
+}
+
+void MtpFfsHandle::doSendEvent(mtp_event me) {
unsigned length = me.length;
- int ret = writeHandle(mIntr, me.data, length);
- return static_cast<unsigned>(ret) == length ? 0 : -1;
+ int ret = ::write(mIntr, me.data, length);
+ if (static_cast<unsigned>(ret) != length)
+ PLOG(ERROR) << "Mtp error sending event thread!";
+ delete[] reinterpret_cast<char*>(me.data);
}
} // namespace android
-IMtpHandle *get_ffs_handle() {
- return new android::MtpFfsHandle();
-}
-
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index b4d5a97..2f90bd1 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -18,25 +18,51 @@
#define _MTP_FFS_HANDLE_H
#include <android-base/unique_fd.h>
+#include <linux/aio_abi.h>
+#include <mutex>
+#include <sys/poll.h>
+#include <time.h>
+#include <thread>
+#include <vector>
+
#include <IMtpHandle.h>
namespace android {
-class MtpFfsHandleTest;
+constexpr char FFS_MTP_EP0[] = "/dev/usb-ffs/mtp/ep0";
+
+constexpr int NUM_IO_BUFS = 2;
+
+struct io_buffer {
+ std::vector<struct iocb> iocbs; // Holds memory for all iocbs. Not used directly.
+ std::vector<struct iocb*> iocb; // Pointers to individual iocbs, for syscalls
+ std::vector<unsigned char> bufs; // A large buffer, used with filesystem io
+ std::vector<unsigned char*> buf; // Pointers within the larger buffer, for syscalls
+ unsigned actual; // The number of buffers submitted for this request
+};
+
+template <class T> class MtpFfsHandleTest;
class MtpFfsHandle : public IMtpHandle {
- friend class android::MtpFfsHandleTest;
-private:
- int writeHandle(int fd, const void *data, int len);
- int readHandle(int fd, void *data, int len);
- int spliceReadHandle(int fd, int fd_out, int len);
+ template <class T> friend class android::MtpFfsHandleTest;
+protected:
bool initFunctionfs();
void closeConfig();
void closeEndpoints();
+ void advise(int fd);
+ int handleControlRequest(const struct usb_ctrlrequest *request);
+ int doAsync(void* data, size_t len, bool read);
+ int handleEvent();
+ void cancelTransaction();
+ void doSendEvent(mtp_event me);
+ bool openEndpoints();
+
+ static int getPacketSize(int ffs_fd);
bool mPtp;
+ bool mCanceled;
- std::mutex mLock;
+ std::timed_mutex mLock; // protects configure() vs main loop
android::base::unique_fd mControl;
// "in" from the host's perspective => sink for mtp server
@@ -45,28 +71,35 @@
android::base::unique_fd mBulkOut;
android::base::unique_fd mIntr;
- int mMaxWrite;
- int mMaxRead;
+ aio_context_t mCtx;
- std::vector<char> mBuffer1;
- std::vector<char> mBuffer2;
+ android::base::unique_fd mEventFd;
+ struct pollfd mPollFds[2];
+
+ struct io_buffer mIobuf[NUM_IO_BUFS];
+
+ // Submit an io request of given length. Return amount submitted or -1.
+ int iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read);
+
+ // Cancel submitted requests from start to end in the given array. Return 0 or -1.
+ int cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start, unsigned end);
+
+ // Wait for at minimum the given number of events. Returns the amount of data in the returned
+ // events. Increments counter by the number of events returned.
+ int waitEvents(struct io_buffer *buf, int min_events, struct io_event *events, int *counter);
public:
- int read(void *data, int len);
- int write(const void *data, int len);
+ int read(void *data, size_t len) override;
+ int write(const void *data, size_t len) override;
- int receiveFile(mtp_file_range mfr);
- int sendFile(mtp_file_range mfr);
- int sendEvent(mtp_event me);
+ int receiveFile(mtp_file_range mfr, bool zero_packet) override;
+ int sendFile(mtp_file_range mfr) override;
+ int sendEvent(mtp_event me) override;
- /**
- * Open ffs endpoints and allocate necessary kernel and user memory.
- * Will sleep until endpoints are enabled, for up to 1 second.
- */
- int start();
- void close();
+ int start() override;
+ void close() override;
- int configure(bool ptp);
+ int configure(bool ptp) override;
MtpFfsHandle();
~MtpFfsHandle();
@@ -85,5 +118,5 @@
} // namespace android
-#endif // _MTP_FF_HANDLE_H
+#endif // _MTP_FFS_HANDLE_H
diff --git a/media/mtp/MtpObjectInfo.cpp b/media/mtp/MtpObjectInfo.cpp
index 0573104..43b745f 100644
--- a/media/mtp/MtpObjectInfo.cpp
+++ b/media/mtp/MtpObjectInfo.cpp
@@ -77,6 +77,7 @@
if (!packet.getString(string)) return false;
mName = strdup((const char *)string);
+ if (!mName) return false;
if (!packet.getString(string)) return false;
if (parseDateTime((const char*)string, time))
@@ -88,6 +89,7 @@
if (!packet.getString(string)) return false;
mKeywords = strdup((const char *)string);
+ if (!mKeywords) return false;
return true;
}
diff --git a/media/mtp/MtpPacket.cpp b/media/mtp/MtpPacket.cpp
index 35ecb4f..3dd4248 100644
--- a/media/mtp/MtpPacket.cpp
+++ b/media/mtp/MtpPacket.cpp
@@ -70,8 +70,8 @@
char* bufptr = buffer;
for (size_t i = 0; i < mPacketSize; i++) {
- sprintf(bufptr, "%02X ", mBuffer[i]);
- bufptr += strlen(bufptr);
+ bufptr += snprintf(bufptr, sizeof(buffer) - (bufptr - buffer), "%02X ",
+ mBuffer[i]);
if (i % DUMP_BYTES_PER_ROW == (DUMP_BYTES_PER_ROW - 1)) {
ALOGV("%s", buffer);
bufptr = buffer;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index b773e8a..e148b0c 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -31,6 +31,9 @@
#include "MtpDebug.h"
#include "MtpDatabase.h"
+#include "MtpDevHandle.h"
+#include "MtpFfsCompatHandle.h"
+#include "MtpFfsHandle.h"
#include "MtpObjectInfo.h"
#include "MtpProperty.h"
#include "MtpServer.h"
@@ -125,16 +128,21 @@
IMtpHandle* MtpServer::sHandle = nullptr;
int MtpServer::configure(bool usePtp) {
+ bool ffs_ok = access(FFS_MTP_EP0, W_OK) == 0;
if (sHandle == nullptr) {
- bool ffs_ok = access(FFS_MTP_EP0, W_OK) == 0;
- sHandle = ffs_ok ? get_ffs_handle() : get_mtp_handle();
+ if (ffs_ok) {
+ bool aio_compat = android::base::GetBoolProperty("sys.usb.ffs.aio_compat", false);
+ sHandle = aio_compat ? new MtpFfsCompatHandle() : new MtpFfsHandle();
+ } else {
+ sHandle = new MtpDevHandle();
+ }
}
-
- int ret = sHandle->configure(usePtp);
- if (ret) ALOGE("Failed to configure MTP driver!");
- else android::base::SetProperty("sys.usb.ffs.mtp.ready", "1");
-
- return ret;
+ if (sHandle->configure(usePtp)) {
+ ALOGE("Failed to configure Mtp driver!");
+ return -1;
+ }
+ android::base::SetProperty("sys.usb.ffs.mtp.ready", "1");
+ return 0;
}
void MtpServer::addStorage(MtpStorage* storage) {
@@ -878,6 +886,7 @@
length = fileLength - offset;
const char* filePath = (const char *)pathBuf;
+ ALOGV("sending partial %s %" PRIu64 " %" PRIu32, filePath, offset, length);
mtp_file_range mfr;
mfr.fd = open(filePath, O_RDONLY);
if (mfr.fd < 0) {
@@ -1056,23 +1065,22 @@
ALOGE("failed to write initial data");
result = MTP_RESPONSE_GENERAL_ERROR;
} else {
- if (mSendObjectFileSize - initialData > 0) {
- mfr.offset = initialData;
- if (mSendObjectFileSize == 0xFFFFFFFF) {
- // tell driver to read until it receives a short packet
- mfr.length = 0xFFFFFFFF;
- } else {
- mfr.length = mSendObjectFileSize - initialData;
- }
+ mfr.offset = initialData;
+ if (mSendObjectFileSize == 0xFFFFFFFF) {
+ // tell driver to read until it receives a short packet
+ mfr.length = 0xFFFFFFFF;
+ } else {
+ mfr.length = mSendObjectFileSize - initialData;
+ }
- mfr.command = 0;
- mfr.transaction_id = 0;
+ mfr.command = 0;
+ mfr.transaction_id = 0;
- // transfer the file
- ret = sHandle->receiveFile(mfr);
- if ((ret < 0) && (errno == ECANCELED)) {
- isCanceled = true;
- }
+ // transfer the file
+ ret = sHandle->receiveFile(mfr, mfr.length == 0 &&
+ initialData == MTP_BUFFER_SIZE - MTP_CONTAINER_HEADER_SIZE);
+ if ((ret < 0) && (errno == ECANCELED)) {
+ isCanceled = true;
}
}
@@ -1271,19 +1279,18 @@
if (ret < 0) {
ALOGE("failed to write initial data");
} else {
- if (length > 0) {
- mtp_file_range mfr;
- mfr.fd = edit->mFD;
- mfr.offset = offset;
- mfr.length = length;
- mfr.command = 0;
- mfr.transaction_id = 0;
+ mtp_file_range mfr;
+ mfr.fd = edit->mFD;
+ mfr.offset = offset;
+ mfr.length = length;
+ mfr.command = 0;
+ mfr.transaction_id = 0;
- // transfer the file
- ret = sHandle->receiveFile(mfr);
- if ((ret < 0) && (errno == ECANCELED)) {
- isCanceled = true;
- }
+ // transfer the file
+ ret = sHandle->receiveFile(mfr, mfr.length == 0 &&
+ initialData == MTP_BUFFER_SIZE - MTP_CONTAINER_HEADER_SIZE);
+ if ((ret < 0) && (errno == ECANCELED)) {
+ isCanceled = true;
}
}
if (ret < 0) {
diff --git a/media/mtp/MtpStorageInfo.cpp b/media/mtp/MtpStorageInfo.cpp
index 5d4ebbf..8801a38 100644
--- a/media/mtp/MtpStorageInfo.cpp
+++ b/media/mtp/MtpStorageInfo.cpp
@@ -58,8 +58,10 @@
if (!packet.getString(string)) return false;
mStorageDescription = strdup((const char *)string);
+ if (!mStorageDescription) return false;
if (!packet.getString(string)) return false;
mVolumeIdentifier = strdup((const char *)string);
+ if (!mVolumeIdentifier) return false;
return true;
}
diff --git a/media/mtp/PosixAsyncIO.cpp b/media/mtp/PosixAsyncIO.cpp
new file mode 100644
index 0000000..e67c568
--- /dev/null
+++ b/media/mtp/PosixAsyncIO.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <unistd.h>
+
+#include "PosixAsyncIO.h"
+
+namespace {
+
+void read_func(struct aiocb *aiocbp) {
+ aiocbp->ret = TEMP_FAILURE_RETRY(pread(aiocbp->aio_fildes,
+ aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
+ if (aiocbp->ret == -1) aiocbp->error = errno;
+}
+
+void write_func(struct aiocb *aiocbp) {
+ aiocbp->ret = TEMP_FAILURE_RETRY(pwrite(aiocbp->aio_fildes,
+ aiocbp->aio_buf, aiocbp->aio_nbytes, aiocbp->aio_offset));
+ if (aiocbp->ret == -1) aiocbp->error = errno;
+}
+
+} // end anonymous namespace
+
+aiocb::~aiocb() {
+ CHECK(!thread.joinable());
+}
+
+int aio_read(struct aiocb *aiocbp) {
+ aiocbp->thread = std::thread(read_func, aiocbp);
+ return 0;
+}
+
+int aio_write(struct aiocb *aiocbp) {
+ aiocbp->thread = std::thread(write_func, aiocbp);
+ return 0;
+}
+
+int aio_error(const struct aiocb *aiocbp) {
+ return aiocbp->error;
+}
+
+ssize_t aio_return(struct aiocb *aiocbp) {
+ return aiocbp->ret;
+}
+
+int aio_suspend(struct aiocb *aiocbp[], int n,
+ const struct timespec *) {
+ for (int i = 0; i < n; i++) {
+ aiocbp[i]->thread.join();
+ }
+ return 0;
+}
+
+void aio_prepare(struct aiocb *aiocbp, void* buf, size_t count, off_t offset) {
+ aiocbp->aio_buf = buf;
+ aiocbp->aio_offset = offset;
+ aiocbp->aio_nbytes = count;
+}
diff --git a/media/mtp/PosixAsyncIO.h b/media/mtp/PosixAsyncIO.h
new file mode 100644
index 0000000..590aaef
--- /dev/null
+++ b/media/mtp/PosixAsyncIO.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _POSIXASYNCIO_H
+#define _POSIXASYNCIO_H
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <time.h>
+#include <thread>
+#include <unistd.h>
+
+/**
+ * Provides a subset of POSIX aio operations.
+ */
+
+struct aiocb {
+ int aio_fildes;
+ void *aio_buf;
+
+ off_t aio_offset;
+ size_t aio_nbytes;
+
+ // Used internally
+ std::thread thread;
+ ssize_t ret;
+ int error;
+
+ ~aiocb();
+};
+
+// Submit a request for IO to be completed
+int aio_read(struct aiocb *);
+int aio_write(struct aiocb *);
+
+// Suspend current thread until given IO is complete, at which point
+// its return value and any errors can be accessed
+// All submitted requests must have a corresponding suspend.
+// aiocb->aio_buf must refer to valid memory until after the suspend call
+int aio_suspend(struct aiocb *[], int, const struct timespec *);
+int aio_error(const struct aiocb *);
+ssize_t aio_return(struct aiocb *);
+
+// Helper method for setting aiocb members
+void aio_prepare(struct aiocb *, void*, size_t, off_t);
+
+#endif // POSIXASYNCIO_H
+
diff --git a/media/mtp/mtp.h b/media/mtp/mtp.h
index adfb102..644780f 100644
--- a/media/mtp/mtp.h
+++ b/media/mtp/mtp.h
@@ -493,4 +493,10 @@
#define MTP_ASSOCIATION_TYPE_UNDEFINED 0x0000
#define MTP_ASSOCIATION_TYPE_GENERIC_FOLDER 0x0001
+// MTP class reqeusts
+#define MTP_REQ_CANCEL 0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
+#define MTP_REQ_RESET 0x66
+#define MTP_REQ_GET_DEVICE_STATUS 0x67
+
#endif // _MTP_H
diff --git a/media/mtp/tests/Android.bp b/media/mtp/tests/Android.bp
index 356406d..a0480b6 100644
--- a/media/mtp/tests/Android.bp
+++ b/media/mtp/tests/Android.bp
@@ -30,8 +30,9 @@
}
cc_test {
- name: "async_io_test",
- srcs: ["AsyncIO_test.cpp"],
+ name: "posix_async_io_test",
+ test_suites: ["device-tests"],
+ srcs: ["PosixAsyncIO_test.cpp"],
shared_libs: [
"libbase",
"libmtp",
diff --git a/media/mtp/tests/AsyncIO_test.cpp b/media/mtp/tests/AsyncIO_test.cpp
deleted file mode 100644
index b5f4538..0000000
--- a/media/mtp/tests/AsyncIO_test.cpp
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#define LOG_TAG "AsyncIO_test.cpp"
-
-#include <android-base/test_utils.h>
-#include <fcntl.h>
-#include <gtest/gtest.h>
-#include <string>
-#include <unistd.h>
-#include <utils/Log.h>
-
-#include "AsyncIO.h"
-
-namespace android {
-
-constexpr int TEST_PACKET_SIZE = 512;
-constexpr int POOL_COUNT = 10;
-
-static const std::string dummyDataStr =
- "/*\n * Copyright 2015 The Android Open Source Project\n *\n * Licensed un"
- "der the Apache License, Version 2.0 (the \"License\");\n * you may not us"
- "e this file except in compliance with the License.\n * You may obtain a c"
- "opy of the License at\n *\n * http://www.apache.org/licenses/LICENSE"
- "-2.0\n *\n * Unless required by applicable law or agreed to in writing, s"
- "oftware\n * distributed under the License is distributed on an \"AS IS\" "
- "BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o"
- "r implied.\n * Se";
-
-
-class AsyncIOTest : public ::testing::Test {
-protected:
- TemporaryFile dummy_file;
-
- AsyncIOTest() {}
- ~AsyncIOTest() {}
-};
-
-TEST_F(AsyncIOTest, testRead) {
- char buf[TEST_PACKET_SIZE + 1];
- buf[TEST_PACKET_SIZE] = '\0';
- EXPECT_EQ(write(dummy_file.fd, dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
- struct aiocb aio;
- struct aiocb *aiol[] = {&aio};
- aio.aio_fildes = dummy_file.fd;
- aio.aio_buf = buf;
- aio.aio_offset = 0;
- aio.aio_nbytes = TEST_PACKET_SIZE;
-
- EXPECT_EQ(aio_read(&aio), 0);
- EXPECT_EQ(aio_suspend(aiol, 1, nullptr), 0);
- EXPECT_EQ(aio_return(&aio), TEST_PACKET_SIZE);
- EXPECT_STREQ(buf, dummyDataStr.c_str());
-}
-
-TEST_F(AsyncIOTest, testWrite) {
- char buf[TEST_PACKET_SIZE + 1];
- buf[TEST_PACKET_SIZE] = '\0';
- struct aiocb aio;
- struct aiocb *aiol[] = {&aio};
- aio.aio_fildes = dummy_file.fd;
- aio.aio_buf = const_cast<char*>(dummyDataStr.c_str());
- aio.aio_offset = 0;
- aio.aio_nbytes = TEST_PACKET_SIZE;
-
- EXPECT_EQ(aio_write(&aio), 0);
- EXPECT_EQ(aio_suspend(aiol, 1, nullptr), 0);
- EXPECT_EQ(aio_return(&aio), TEST_PACKET_SIZE);
- EXPECT_EQ(read(dummy_file.fd, buf, TEST_PACKET_SIZE), TEST_PACKET_SIZE);
- EXPECT_STREQ(buf, dummyDataStr.c_str());
-}
-
-TEST_F(AsyncIOTest, testError) {
- char buf[TEST_PACKET_SIZE + 1];
- buf[TEST_PACKET_SIZE] = '\0';
- struct aiocb aio;
- struct aiocb *aiol[] = {&aio};
- aio.aio_fildes = -1;
- aio.aio_buf = const_cast<char*>(dummyDataStr.c_str());
- aio.aio_offset = 0;
- aio.aio_nbytes = TEST_PACKET_SIZE;
-
- EXPECT_EQ(aio_write(&aio), 0);
- EXPECT_EQ(aio_suspend(aiol, 1, nullptr), 0);
- EXPECT_EQ(aio_return(&aio), -1);
- EXPECT_EQ(aio_error(&aio), EBADF);
-}
-
-TEST_F(AsyncIOTest, testSpliceRead) {
- char buf[TEST_PACKET_SIZE + 1];
- buf[TEST_PACKET_SIZE] = '\0';
- int pipeFd[2];
- EXPECT_EQ(pipe(pipeFd), 0);
- EXPECT_EQ(write(dummy_file.fd, dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
- struct aiocb aio;
- struct aiocb *aiol[] = {&aio};
- aio.aio_fildes = dummy_file.fd;
- aio.aio_sink = pipeFd[1];
- aio.aio_offset = 0;
- aio.aio_nbytes = TEST_PACKET_SIZE;
-
- EXPECT_EQ(aio_splice_read(&aio), 0);
- EXPECT_EQ(aio_suspend(aiol, 1, nullptr), 0);
- EXPECT_EQ(aio_return(&aio), TEST_PACKET_SIZE);
-
- EXPECT_EQ(read(pipeFd[0], buf, TEST_PACKET_SIZE), TEST_PACKET_SIZE);
- EXPECT_STREQ(buf, dummyDataStr.c_str());
-}
-
-TEST_F(AsyncIOTest, testSpliceWrite) {
- char buf[TEST_PACKET_SIZE + 1];
- buf[TEST_PACKET_SIZE] = '\0';
- int pipeFd[2];
- EXPECT_EQ(pipe(pipeFd), 0);
- EXPECT_EQ(write(pipeFd[1], dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
- struct aiocb aio;
- struct aiocb *aiol[] = {&aio};
- aio.aio_fildes = pipeFd[0];
- aio.aio_sink = dummy_file.fd;
- aio.aio_offset = 0;
- aio.aio_nbytes = TEST_PACKET_SIZE;
-
- EXPECT_EQ(aio_splice_write(&aio), 0);
- EXPECT_EQ(aio_suspend(aiol, 1, nullptr), 0);
- EXPECT_EQ(aio_return(&aio), TEST_PACKET_SIZE);
- EXPECT_EQ(read(dummy_file.fd, buf, TEST_PACKET_SIZE), TEST_PACKET_SIZE);
- EXPECT_STREQ(buf, dummyDataStr.c_str());
-}
-
-TEST_F(AsyncIOTest, testPoolWrite) {
- aio_pool_write_init();
- char buf[TEST_PACKET_SIZE * POOL_COUNT + 1];
- buf[TEST_PACKET_SIZE * POOL_COUNT] = '\0';
-
- for (int i = 0; i < POOL_COUNT; i++) {
- struct aiocb *aiop = new struct aiocb;
- aiop->aio_fildes = dummy_file.fd;
- aiop->aio_pool_buf = std::unique_ptr<char[]>(new char[TEST_PACKET_SIZE]);
- memcpy(aiop->aio_pool_buf.get(), dummyDataStr.c_str(), TEST_PACKET_SIZE);
- aiop->aio_offset = i * TEST_PACKET_SIZE;
- aiop->aio_nbytes = TEST_PACKET_SIZE;
- EXPECT_EQ(aio_pool_write(aiop), 0);
- }
- aio_pool_end();
- EXPECT_EQ(read(dummy_file.fd, buf, TEST_PACKET_SIZE * POOL_COUNT), TEST_PACKET_SIZE * POOL_COUNT);
-
- std::stringstream ss;
- for (int i = 0; i < POOL_COUNT; i++)
- ss << dummyDataStr;
-
- EXPECT_STREQ(buf, ss.str().c_str());
-}
-
-TEST_F(AsyncIOTest, testSplicePoolWrite) {
- aio_pool_splice_init();
- char buf[TEST_PACKET_SIZE * POOL_COUNT + 1];
- buf[TEST_PACKET_SIZE * POOL_COUNT] = '\0';
-
- for (int i = 0; i < POOL_COUNT; i++) {
- int pipeFd[2];
- EXPECT_EQ(pipe(pipeFd), 0);
- EXPECT_EQ(write(pipeFd[1], dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
- struct aiocb *aiop = new struct aiocb;
- aiop->aio_fildes = pipeFd[0];
- aiop->aio_sink = dummy_file.fd;
- aiop->aio_offset = i * TEST_PACKET_SIZE;
- aiop->aio_nbytes = TEST_PACKET_SIZE;
- EXPECT_EQ(aio_pool_write(aiop), 0);
- }
- aio_pool_end();
- EXPECT_EQ(read(dummy_file.fd, buf, TEST_PACKET_SIZE * POOL_COUNT), TEST_PACKET_SIZE * POOL_COUNT);
-
- std::stringstream ss;
- for (int i = 0; i < POOL_COUNT; i++)
- ss << dummyDataStr;
-
- EXPECT_STREQ(buf, ss.str().c_str());
-}
-
-} // namespace android
diff --git a/media/mtp/tests/MtpFfsHandle_test.cpp b/media/mtp/tests/MtpFfsHandle_test.cpp
index e575148..8d7301d 100644
--- a/media/mtp/tests/MtpFfsHandle_test.cpp
+++ b/media/mtp/tests/MtpFfsHandle_test.cpp
@@ -26,12 +26,11 @@
#include <utils/Log.h>
#include "MtpFfsHandle.h"
+#include "MtpFfsCompatHandle.h"
namespace android {
-constexpr int MAX_FILE_CHUNK_SIZE = 3 * 1024 * 1024;
-
-constexpr int TEST_PACKET_SIZE = 512;
+constexpr int TEST_PACKET_SIZE = 500;
constexpr int SMALL_MULT = 30;
constexpr int MED_MULT = 510;
@@ -43,17 +42,19 @@
"-2.0\n *\n * Unless required by applicable law or agreed to in writing, s"
"oftware\n * distributed under the License is distributed on an \"AS IS\" "
"BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o"
- "r implied.\n * Se";
+ "r im";
/**
* Functional tests for the MtpFfsHandle class. Ensures header and data integrity
* by mocking ffs endpoints as pipes to capture input / output.
*/
+template <class T>
class MtpFfsHandleTest : public ::testing::Test {
protected:
- std::unique_ptr<IMtpHandle> handle;
+ std::unique_ptr<MtpFfsHandle> handle;
// Pipes for reading endpoint data
+ android::base::unique_fd control;
android::base::unique_fd bulk_in;
android::base::unique_fd bulk_out;
android::base::unique_fd intr;
@@ -62,88 +63,144 @@
MtpFfsHandleTest() {
int fd[2];
- handle = std::unique_ptr<IMtpHandle>(get_ffs_handle());
- MtpFfsHandle *ffs_handle = static_cast<MtpFfsHandle*>(handle.get());
- EXPECT_TRUE(ffs_handle != NULL);
+ handle = std::make_unique<T>();
+
+ EXPECT_EQ(pipe(fd), 0);
+ handle->mControl.reset(fd[0]);
+ control.reset(fd[1]);
EXPECT_EQ(pipe(fd), 0);
EXPECT_EQ(fcntl(fd[0], F_SETPIPE_SZ, 1048576), 1048576);
bulk_in.reset(fd[0]);
- ffs_handle->mBulkIn.reset(fd[1]);
+ handle->mBulkIn.reset(fd[1]);
EXPECT_EQ(pipe(fd), 0);
EXPECT_EQ(fcntl(fd[0], F_SETPIPE_SZ, 1048576), 1048576);
bulk_out.reset(fd[1]);
- ffs_handle->mBulkOut.reset(fd[0]);
+ handle->mBulkOut.reset(fd[0]);
EXPECT_EQ(pipe(fd), 0);
intr.reset(fd[0]);
- ffs_handle->mIntr.reset(fd[1]);
+ handle->mIntr.reset(fd[1]);
- ffs_handle->mBuffer1.resize(MAX_FILE_CHUNK_SIZE);
- ffs_handle->mBuffer2.resize(MAX_FILE_CHUNK_SIZE);
+ handle->start();
}
- ~MtpFfsHandleTest() {}
+ ~MtpFfsHandleTest() {
+ handle->close();
+ }
};
-TEST_F(MtpFfsHandleTest, testRead) {
- EXPECT_EQ(write(bulk_out, dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
+typedef ::testing::Types<MtpFfsHandle, MtpFfsCompatHandle> mtpHandles;
+TYPED_TEST_CASE(MtpFfsHandleTest, mtpHandles);
+
+TYPED_TEST(MtpFfsHandleTest, testRead) {
+ EXPECT_EQ(write(this->bulk_out, dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
char buf[TEST_PACKET_SIZE + 1];
buf[TEST_PACKET_SIZE] = '\0';
- EXPECT_EQ(handle->read(buf, TEST_PACKET_SIZE), TEST_PACKET_SIZE);
+ EXPECT_EQ(this->handle->read(buf, TEST_PACKET_SIZE), TEST_PACKET_SIZE);
EXPECT_STREQ(buf, dummyDataStr.c_str());
}
-TEST_F(MtpFfsHandleTest, testWrite) {
+TYPED_TEST(MtpFfsHandleTest, testWrite) {
char buf[TEST_PACKET_SIZE + 1];
buf[TEST_PACKET_SIZE] = '\0';
- EXPECT_EQ(handle->write(dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
- EXPECT_EQ(read(bulk_in, buf, TEST_PACKET_SIZE), TEST_PACKET_SIZE);
+ EXPECT_EQ(this->handle->write(dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
+ EXPECT_EQ(read(this->bulk_in, buf, TEST_PACKET_SIZE), TEST_PACKET_SIZE);
EXPECT_STREQ(buf, dummyDataStr.c_str());
}
-TEST_F(MtpFfsHandleTest, testReceiveFileSmall) {
+TYPED_TEST(MtpFfsHandleTest, testReceiveFileEmpty) {
+ std::stringstream ss;
+ mtp_file_range mfr;
+ int size = 0;
+ char buf[size + 1];
+ buf[size] = '\0';
+
+ mfr.offset = 0;
+ mfr.length = size;
+ mfr.fd = this->dummy_file.fd;
+
+ EXPECT_EQ(write(this->bulk_out, ss.str().c_str(), size), size);
+ EXPECT_EQ(this->handle->receiveFile(mfr, false), 0);
+
+ EXPECT_EQ(read(this->dummy_file.fd, buf, size), size);
+}
+
+TYPED_TEST(MtpFfsHandleTest, testReceiveFileSmall) {
std::stringstream ss;
mtp_file_range mfr;
int size = TEST_PACKET_SIZE * SMALL_MULT;
char buf[size + 1];
buf[size] = '\0';
+ mfr.offset = 0;
mfr.length = size;
- mfr.fd = dummy_file.fd;
+ mfr.fd = this->dummy_file.fd;
for (int i = 0; i < SMALL_MULT; i++)
ss << dummyDataStr;
- EXPECT_EQ(write(bulk_out, ss.str().c_str(), size), size);
- EXPECT_EQ(handle->receiveFile(mfr), 0);
+ EXPECT_EQ(write(this->bulk_out, ss.str().c_str(), size), size);
+ EXPECT_EQ(this->handle->receiveFile(mfr, false), 0);
- EXPECT_EQ(read(dummy_file.fd, buf, size), size);
+ EXPECT_EQ(read(this->dummy_file.fd, buf, size), size);
EXPECT_STREQ(buf, ss.str().c_str());
}
-TEST_F(MtpFfsHandleTest, testReceiveFileMed) {
+TYPED_TEST(MtpFfsHandleTest, testReceiveFileMed) {
std::stringstream ss;
mtp_file_range mfr;
int size = TEST_PACKET_SIZE * MED_MULT;
char buf[size + 1];
buf[size] = '\0';
+ mfr.offset = 0;
mfr.length = size;
- mfr.fd = dummy_file.fd;
+ mfr.fd = this->dummy_file.fd;
for (int i = 0; i < MED_MULT; i++)
ss << dummyDataStr;
- EXPECT_EQ(write(bulk_out, ss.str().c_str(), size), size);
- EXPECT_EQ(handle->receiveFile(mfr), 0);
+ EXPECT_EQ(write(this->bulk_out, ss.str().c_str(), size), size);
+ EXPECT_EQ(this->handle->receiveFile(mfr, false), 0);
- EXPECT_EQ(read(dummy_file.fd, buf, size), size);
+ EXPECT_EQ(read(this->dummy_file.fd, buf, size), size);
EXPECT_STREQ(buf, ss.str().c_str());
}
-TEST_F(MtpFfsHandleTest, testSendFileSmall) {
+TYPED_TEST(MtpFfsHandleTest, testReceiveFileMedPartial) {
+ std::stringstream ss;
+ mtp_file_range mfr;
+ int size = TEST_PACKET_SIZE * MED_MULT;
+ char buf[size + 1];
+ buf[size] = '\0';
+
+ mfr.fd = this->dummy_file.fd;
+ for (int i = 0; i < MED_MULT; i++)
+ ss << dummyDataStr;
+
+ EXPECT_EQ(write(this->bulk_out, ss.str().c_str(), size), size);
+
+ std::random_device rd;
+ std::mt19937 gen(rd());
+ std::uniform_int_distribution<> dis(1, TEST_PACKET_SIZE);
+ int offset = 0;
+ while (offset != size) {
+ mfr.offset = offset;
+ int length = std::min(size - offset, dis(gen));
+ mfr.length = length;
+
+ EXPECT_EQ(this->handle->receiveFile(mfr, false), 0);
+ offset += length;
+ }
+
+ EXPECT_EQ(read(this->dummy_file.fd, buf, size), size);
+
+ EXPECT_STREQ(buf, ss.str().c_str());
+}
+
+TYPED_TEST(MtpFfsHandleTest, testSendFileSmall) {
std::stringstream ss;
mtp_file_range mfr;
mfr.command = 42;
@@ -154,14 +211,14 @@
buf[size + sizeof(mtp_data_header)] = '\0';
mfr.length = size;
- mfr.fd = dummy_file.fd;
+ mfr.fd = this->dummy_file.fd;
for (int i = 0; i < SMALL_MULT; i++)
ss << dummyDataStr;
- EXPECT_EQ(write(dummy_file.fd, ss.str().c_str(), size), size);
- EXPECT_EQ(handle->sendFile(mfr), 0);
+ EXPECT_EQ(write(this->dummy_file.fd, ss.str().c_str(), size), size);
+ EXPECT_EQ(this->handle->sendFile(mfr), 0);
- EXPECT_EQ(read(bulk_in, buf, size + sizeof(mtp_data_header)),
+ EXPECT_EQ(read(this->bulk_in, buf, size + sizeof(mtp_data_header)),
static_cast<long>(size + sizeof(mtp_data_header)));
struct mtp_data_header *header = reinterpret_cast<struct mtp_data_header*>(buf);
@@ -172,7 +229,7 @@
EXPECT_EQ(header->transaction_id, static_cast<unsigned int>(1337));
}
-TEST_F(MtpFfsHandleTest, testSendFileMed) {
+TYPED_TEST(MtpFfsHandleTest, testSendFileMed) {
std::stringstream ss;
mtp_file_range mfr;
mfr.command = 42;
@@ -183,14 +240,14 @@
buf[size + sizeof(mtp_data_header)] = '\0';
mfr.length = size;
- mfr.fd = dummy_file.fd;
+ mfr.fd = this->dummy_file.fd;
for (int i = 0; i < MED_MULT; i++)
ss << dummyDataStr;
- EXPECT_EQ(write(dummy_file.fd, ss.str().c_str(), size), size);
- EXPECT_EQ(handle->sendFile(mfr), 0);
+ EXPECT_EQ(write(this->dummy_file.fd, ss.str().c_str(), size), size);
+ EXPECT_EQ(this->handle->sendFile(mfr), 0);
- EXPECT_EQ(read(bulk_in, buf, size + sizeof(mtp_data_header)),
+ EXPECT_EQ(read(this->bulk_in, buf, size + sizeof(mtp_data_header)),
static_cast<long>(size + sizeof(mtp_data_header)));
struct mtp_data_header *header = reinterpret_cast<struct mtp_data_header*>(buf);
@@ -201,10 +258,10 @@
EXPECT_EQ(header->transaction_id, static_cast<unsigned int>(1337));
}
-TEST_F(MtpFfsHandleTest, testSendFileMedPartial) {
+TYPED_TEST(MtpFfsHandleTest, testSendFileMedPartial) {
std::stringstream ss;
mtp_file_range mfr;
- mfr.fd = dummy_file.fd;
+ mfr.fd = this->dummy_file.fd;
mfr.command = 42;
mfr.transaction_id = 1337;
int size = TEST_PACKET_SIZE * MED_MULT;
@@ -214,7 +271,7 @@
for (int i = 0; i < MED_MULT; i++)
ss << dummyDataStr;
- EXPECT_EQ(write(dummy_file.fd, ss.str().c_str(), size), size);
+ EXPECT_EQ(write(this->dummy_file.fd, ss.str().c_str(), size), size);
std::random_device rd;
std::mt19937 gen(rd());
@@ -225,9 +282,9 @@
int length = std::min(size - offset, dis(gen));
mfr.length = length;
char temp_buf[length + sizeof(mtp_data_header)];
- EXPECT_EQ(handle->sendFile(mfr), 0);
+ EXPECT_EQ(this->handle->sendFile(mfr), 0);
- EXPECT_EQ(read(bulk_in, temp_buf, length + sizeof(mtp_data_header)),
+ EXPECT_EQ(read(this->bulk_in, temp_buf, length + sizeof(mtp_data_header)),
static_cast<long>(length + sizeof(mtp_data_header)));
struct mtp_data_header *header = reinterpret_cast<struct mtp_data_header*>(temp_buf);
@@ -241,7 +298,7 @@
EXPECT_STREQ(buf, ss.str().c_str());
}
-TEST_F(MtpFfsHandleTest, testSendFileEmpty) {
+TYPED_TEST(MtpFfsHandleTest, testSendFileEmpty) {
mtp_file_range mfr;
mfr.command = 42;
mfr.transaction_id = 1337;
@@ -251,11 +308,11 @@
buf[size + sizeof(mtp_data_header)] = '\0';
mfr.length = size;
- mfr.fd = dummy_file.fd;
+ mfr.fd = this->dummy_file.fd;
- EXPECT_EQ(handle->sendFile(mfr), 0);
+ EXPECT_EQ(this->handle->sendFile(mfr), 0);
- EXPECT_EQ(read(bulk_in, buf, size + sizeof(mtp_data_header)),
+ EXPECT_EQ(read(this->bulk_in, buf, size + sizeof(mtp_data_header)),
static_cast<long>(size + sizeof(mtp_data_header)));
struct mtp_data_header *header = reinterpret_cast<struct mtp_data_header*>(buf);
@@ -265,15 +322,15 @@
EXPECT_EQ(header->transaction_id, static_cast<unsigned int>(1337));
}
-TEST_F(MtpFfsHandleTest, testSendEvent) {
+TYPED_TEST(MtpFfsHandleTest, testSendEvent) {
struct mtp_event event;
event.length = TEST_PACKET_SIZE;
event.data = const_cast<char*>(dummyDataStr.c_str());
char buf[TEST_PACKET_SIZE + 1];
buf[TEST_PACKET_SIZE] = '\0';
- handle->sendEvent(event);
- read(intr, buf, TEST_PACKET_SIZE);
+ this->handle->sendEvent(event);
+ read(this->intr, buf, TEST_PACKET_SIZE);
EXPECT_STREQ(buf, dummyDataStr.c_str());
}
diff --git a/media/mtp/tests/PosixAsyncIO_test.cpp b/media/mtp/tests/PosixAsyncIO_test.cpp
new file mode 100644
index 0000000..63b9a35
--- /dev/null
+++ b/media/mtp/tests/PosixAsyncIO_test.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "PosixAsyncIO_test.cpp"
+
+#include <android-base/test_utils.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <string>
+#include <unistd.h>
+#include <utils/Log.h>
+
+#include "PosixAsyncIO.h"
+
+namespace android {
+
+constexpr int TEST_PACKET_SIZE = 512;
+
+static const std::string dummyDataStr =
+ "/*\n * Copyright 2015 The Android Open Source Project\n *\n * Licensed un"
+ "der the Apache License, Version 2.0 (the \"License\");\n * you may not us"
+ "e this file except in compliance with the License.\n * You may obtain a c"
+ "opy of the License at\n *\n * http://www.apache.org/licenses/LICENSE"
+ "-2.0\n *\n * Unless required by applicable law or agreed to in writing, s"
+ "oftware\n * distributed under the License is distributed on an \"AS IS\" "
+ "BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o"
+ "r implied.\n * Se";
+
+
+class PosixAsyncIOTest : public ::testing::Test {
+protected:
+ TemporaryFile dummy_file;
+
+ PosixAsyncIOTest() {}
+ ~PosixAsyncIOTest() {}
+};
+
+TEST_F(PosixAsyncIOTest, testRead) {
+ char buf[TEST_PACKET_SIZE + 1];
+ buf[TEST_PACKET_SIZE] = '\0';
+ EXPECT_EQ(write(dummy_file.fd, dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
+ struct aiocb aio;
+ struct aiocb *aiol[] = {&aio};
+ aio.aio_fildes = dummy_file.fd;
+ aio.aio_buf = buf;
+ aio.aio_offset = 0;
+ aio.aio_nbytes = TEST_PACKET_SIZE;
+
+ EXPECT_EQ(aio_read(&aio), 0);
+ EXPECT_EQ(aio_suspend(aiol, 1, nullptr), 0);
+ EXPECT_EQ(aio_return(&aio), TEST_PACKET_SIZE);
+ EXPECT_STREQ(buf, dummyDataStr.c_str());
+}
+
+TEST_F(PosixAsyncIOTest, testWrite) {
+ char buf[TEST_PACKET_SIZE + 1];
+ buf[TEST_PACKET_SIZE] = '\0';
+ struct aiocb aio;
+ struct aiocb *aiol[] = {&aio};
+ aio.aio_fildes = dummy_file.fd;
+ aio.aio_buf = const_cast<char*>(dummyDataStr.c_str());
+ aio.aio_offset = 0;
+ aio.aio_nbytes = TEST_PACKET_SIZE;
+
+ EXPECT_EQ(aio_write(&aio), 0);
+ EXPECT_EQ(aio_suspend(aiol, 1, nullptr), 0);
+ EXPECT_EQ(aio_return(&aio), TEST_PACKET_SIZE);
+ EXPECT_EQ(read(dummy_file.fd, buf, TEST_PACKET_SIZE), TEST_PACKET_SIZE);
+ EXPECT_STREQ(buf, dummyDataStr.c_str());
+}
+
+TEST_F(PosixAsyncIOTest, testError) {
+ char buf[TEST_PACKET_SIZE + 1];
+ buf[TEST_PACKET_SIZE] = '\0';
+ struct aiocb aio;
+ struct aiocb *aiol[] = {&aio};
+ aio.aio_fildes = -1;
+ aio.aio_buf = const_cast<char*>(dummyDataStr.c_str());
+ aio.aio_offset = 0;
+ aio.aio_nbytes = TEST_PACKET_SIZE;
+
+ EXPECT_EQ(aio_write(&aio), 0);
+ EXPECT_EQ(aio_suspend(aiol, 1, nullptr), 0);
+ EXPECT_EQ(aio_return(&aio), -1);
+ EXPECT_EQ(aio_error(&aio), EBADF);
+}
+
+} // namespace android
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 1aeb38f..40974f3 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -48,6 +48,7 @@
include_dirs: [
"bionic/libc/private",
"frameworks/base/core/jni",
+ "frameworks/native/include/media/openmax",
"system/media/camera/include",
],
@@ -59,15 +60,22 @@
"-Wall",
],
+ static_libs: [
+ "libgrallocusage",
+ ],
+
shared_libs: [
"libbinder",
"libmedia",
+ "libmedia_jni",
"libmediadrm",
+ "libskia",
"libstagefright",
"libstagefright_foundation",
"liblog",
"libutils",
"libcutils",
+ "libandroid",
"libandroid_runtime",
"libbinder",
"libgui",
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 40900ad..6d28d1b 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -22,6 +22,8 @@
#include "NdkImagePriv.h"
#include "NdkImageReaderPriv.h"
+#include <android_media_Utils.h>
+#include <android_runtime/android_hardware_HardwareBuffer.h>
#include <utils/Log.h>
#include "hardware/camera3.h"
@@ -29,12 +31,10 @@
#define ALIGN(x, mask) ( ((x) + (mask) - 1) & ~((mask) - 1) )
-AImage::AImage(AImageReader* reader, int32_t format,
- CpuConsumer::LockedBuffer* buffer, int64_t timestamp,
- int32_t width, int32_t height, int32_t numPlanes) :
- mReader(reader), mFormat(format),
- mBuffer(buffer), mTimestamp(timestamp),
- mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
+AImage::AImage(AImageReader* reader, int32_t format, uint64_t usage, BufferItem* buffer,
+ int64_t timestamp, int32_t width, int32_t height, int32_t numPlanes) :
+ mReader(reader), mFormat(format), mUsage(usage), mBuffer(buffer), mLockedBuffer(nullptr),
+ mTimestamp(timestamp), mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
}
// Can only be called by free() with mLock hold
@@ -52,7 +52,7 @@
}
void
-AImage::close() {
+AImage::close(int releaseFenceFd) {
Mutex::Autolock _l(mLock);
if (mIsClosed) {
return;
@@ -62,10 +62,11 @@
LOG_ALWAYS_FATAL("Error: AImage not closed before AImageReader close!");
return;
}
- reader->releaseImageLocked(this);
+ reader->releaseImageLocked(this, releaseFenceFd);
// Should have been set to nullptr in releaseImageLocked
// Set to nullptr here for extra safety only
mBuffer = nullptr;
+ mLockedBuffer = nullptr;
mIsClosed = true;
}
@@ -169,8 +170,77 @@
return AMEDIA_OK;
}
+media_status_t AImage::lockImage() {
+ if (mBuffer == nullptr || mBuffer->mGraphicBuffer == nullptr) {
+ LOG_ALWAYS_FATAL("%s: AImage %p has no buffer.", __FUNCTION__, this);
+ return AMEDIA_ERROR_INVALID_OBJECT;
+ }
+
+ if ((mUsage & AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN) == 0) {
+ ALOGE("%s: AImage %p does not have any software read usage bits set, usage=%" PRIu64 "",
+ __FUNCTION__, this, mUsage);
+ return AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE;
+ }
+
+ if (mLockedBuffer != nullptr) {
+ // Return immediately if the image has already been locked.
+ return AMEDIA_OK;
+ }
+
+ auto lockedBuffer = std::make_unique<CpuConsumer::LockedBuffer>();
+
+ uint64_t grallocUsage = android_hardware_HardwareBuffer_convertToGrallocUsageBits(mUsage);
+
+ status_t ret =
+ lockImageFromBuffer(mBuffer, grallocUsage, mBuffer->mFence->dup(), lockedBuffer.get());
+ if (ret != OK) {
+ ALOGE("%s: AImage %p failed to lock, error=%d", __FUNCTION__, this, ret);
+ return AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE;
+ }
+
+ ALOGV("%s: Successfully locked the image %p.", __FUNCTION__, this);
+ mLockedBuffer = std::move(lockedBuffer);
+
+ return AMEDIA_OK;
+}
+
+media_status_t AImage::unlockImageIfLocked(int* fenceFd) {
+ if (fenceFd == nullptr) {
+ LOG_ALWAYS_FATAL("%s: fenceFd cannot be null.", __FUNCTION__);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (mBuffer == nullptr || mBuffer->mGraphicBuffer == nullptr) {
+ LOG_ALWAYS_FATAL("%s: AImage %p has no buffer.", __FUNCTION__, this);
+ return AMEDIA_ERROR_INVALID_OBJECT;
+ }
+
+ if (mLockedBuffer == nullptr) {
+ // This image hasn't been locked yet, no need to unlock.
+ *fenceFd = -1;
+ return AMEDIA_OK;
+ }
+
+ // No fence by default.
+ int releaseFenceFd = -1;
+ status_t res = mBuffer->mGraphicBuffer->unlockAsync(&releaseFenceFd);
+ if (res != OK) {
+ ALOGE("%s unlock buffer failed on iamge %p.", __FUNCTION__, this);
+ *fenceFd = -1;
+ return AMEDIA_IMGREADER_CANNOT_UNLOCK_IMAGE;
+ }
+
+ *fenceFd = releaseFenceFd;
+ return AMEDIA_OK;
+}
+
media_status_t
AImage::getPlanePixelStride(int planeIdx, /*out*/int32_t* pixelStride) const {
+ if (mLockedBuffer == nullptr) {
+ ALOGE("%s: buffer not locked.", __FUNCTION__);
+ return AMEDIA_IMGREADER_IMAGE_NOT_LOCKED;
+ }
+
if (planeIdx < 0 || planeIdx >= mNumPlanes) {
ALOGE("Error: planeIdx %d out of bound [0,%d]",
planeIdx, mNumPlanes - 1);
@@ -183,10 +253,10 @@
ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
return AMEDIA_ERROR_INVALID_OBJECT;
}
- int32_t fmt = mBuffer->flexFormat;
+ int32_t fmt = mLockedBuffer->flexFormat;
switch (fmt) {
case HAL_PIXEL_FORMAT_YCbCr_420_888:
- *pixelStride = (planeIdx == 0) ? 1 : mBuffer->chromaStep;
+ *pixelStride = (planeIdx == 0) ? 1 : mLockedBuffer->chromaStep;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
*pixelStride = (planeIdx == 0) ? 1 : 2;
@@ -226,6 +296,11 @@
media_status_t
AImage::getPlaneRowStride(int planeIdx, /*out*/int32_t* rowStride) const {
+ if (mLockedBuffer == nullptr) {
+ ALOGE("%s: buffer not locked.", __FUNCTION__);
+ return AMEDIA_IMGREADER_IMAGE_NOT_LOCKED;
+ }
+
if (planeIdx < 0 || planeIdx >= mNumPlanes) {
ALOGE("Error: planeIdx %d out of bound [0,%d]",
planeIdx, mNumPlanes - 1);
@@ -238,54 +313,58 @@
ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
return AMEDIA_ERROR_INVALID_OBJECT;
}
- int32_t fmt = mBuffer->flexFormat;
+ int32_t fmt = mLockedBuffer->flexFormat;
switch (fmt) {
case HAL_PIXEL_FORMAT_YCbCr_420_888:
- *rowStride = (planeIdx == 0) ? mBuffer->stride : mBuffer->chromaStride;
+ *rowStride = (planeIdx == 0) ? mLockedBuffer->stride
+ : mLockedBuffer->chromaStride;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
- *rowStride = mBuffer->width;
+ *rowStride = mLockedBuffer->width;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_YV12:
- if (mBuffer->stride % 16) {
- ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+ if (mLockedBuffer->stride % 16) {
+ ALOGE("Stride %d is not 16 pixel aligned!", mLockedBuffer->stride);
return AMEDIA_ERROR_UNKNOWN;
}
- *rowStride = (planeIdx == 0) ? mBuffer->stride : ALIGN(mBuffer->stride / 2, 16);
+ *rowStride = (planeIdx == 0) ? mLockedBuffer->stride
+ : ALIGN(mLockedBuffer->stride / 2, 16);
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RAW10:
case HAL_PIXEL_FORMAT_RAW12:
// RAW10 and RAW12 are used for 10-bit and 12-bit raw data, they are single plane
- *rowStride = mBuffer->stride;
+ *rowStride = mLockedBuffer->stride;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_Y8:
- if (mBuffer->stride % 16) {
- ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+ if (mLockedBuffer->stride % 16) {
+ ALOGE("Stride %d is not 16 pixel aligned!",
+ mLockedBuffer->stride);
return AMEDIA_ERROR_UNKNOWN;
}
- *rowStride = mBuffer->stride;
+ *rowStride = mLockedBuffer->stride;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_Y16:
case HAL_PIXEL_FORMAT_RAW16:
// In native side, strides are specified in pixels, not in bytes.
// Single plane 16bpp bayer data. even width/height,
// row stride multiple of 16 pixels (32 bytes)
- if (mBuffer->stride % 16) {
- ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+ if (mLockedBuffer->stride % 16) {
+ ALOGE("Stride %d is not 16 pixel aligned!",
+ mLockedBuffer->stride);
return AMEDIA_ERROR_UNKNOWN;
}
- *rowStride = mBuffer->stride * 2;
+ *rowStride = mLockedBuffer->stride * 2;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RGB_565:
- *rowStride = mBuffer->stride * 2;
+ *rowStride = mLockedBuffer->stride * 2;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
- *rowStride = mBuffer->stride * 4;
+ *rowStride = mLockedBuffer->stride * 4;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RGB_888:
// Single plane, 24bpp.
- *rowStride = mBuffer->stride * 3;
+ *rowStride = mLockedBuffer->stride * 3;
return AMEDIA_OK;
case HAL_PIXEL_FORMAT_BLOB:
case HAL_PIXEL_FORMAT_RAW_OPAQUE:
@@ -300,13 +379,13 @@
uint32_t
AImage::getJpegSize() const {
- if (mBuffer == nullptr) {
+ if (mLockedBuffer == nullptr) {
LOG_ALWAYS_FATAL("Error: buffer is null");
}
uint32_t size = 0;
- uint32_t width = mBuffer->width;
- uint8_t* jpegBuffer = mBuffer->data;
+ uint32_t width = mLockedBuffer->width;
+ uint8_t* jpegBuffer = mLockedBuffer->data;
// First check for JPEG transport header at the end of the buffer
uint8_t* header = jpegBuffer + (width - sizeof(struct camera3_jpeg_blob));
@@ -334,6 +413,11 @@
media_status_t
AImage::getPlaneData(int planeIdx,/*out*/uint8_t** data, /*out*/int* dataLength) const {
+ if (mLockedBuffer == nullptr) {
+ ALOGE("%s: buffer not locked.", __FUNCTION__);
+ return AMEDIA_IMGREADER_IMAGE_NOT_LOCKED;
+ }
+
if (planeIdx < 0 || planeIdx >= mNumPlanes) {
ALOGE("Error: planeIdx %d out of bound [0,%d]",
planeIdx, mNumPlanes - 1);
@@ -352,140 +436,154 @@
uint8_t* cr = nullptr;
uint8_t* pData = nullptr;
int bytesPerPixel = 0;
- int32_t fmt = mBuffer->flexFormat;
+ int32_t fmt = mLockedBuffer->flexFormat;
switch (fmt) {
case HAL_PIXEL_FORMAT_YCbCr_420_888:
- pData = (planeIdx == 0) ? mBuffer->data :
- (planeIdx == 1) ? mBuffer->dataCb : mBuffer->dataCr;
+ pData = (planeIdx == 0) ? mLockedBuffer->data
+ : (planeIdx == 1) ? mLockedBuffer->dataCb
+ : mLockedBuffer->dataCr;
// only map until last pixel
if (planeIdx == 0) {
- dataSize = mBuffer->stride * (mBuffer->height - 1) + mBuffer->width;
+ dataSize = mLockedBuffer->stride * (mLockedBuffer->height - 1) +
+ mLockedBuffer->width;
} else {
- dataSize = mBuffer->chromaStride * (mBuffer->height / 2 - 1) +
- mBuffer->chromaStep * (mBuffer->width / 2 - 1) + 1;
+ dataSize =
+ mLockedBuffer->chromaStride *
+ (mLockedBuffer->height / 2 - 1) +
+ mLockedBuffer->chromaStep * (mLockedBuffer->width / 2 - 1) +
+ 1;
}
break;
// NV21
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
- cr = mBuffer->data + (mBuffer->stride * mBuffer->height);
+ cr = mLockedBuffer->data +
+ (mLockedBuffer->stride * mLockedBuffer->height);
cb = cr + 1;
// only map until last pixel
- ySize = mBuffer->width * (mBuffer->height - 1) + mBuffer->width;
- cSize = mBuffer->width * (mBuffer->height / 2 - 1) + mBuffer->width - 1;
-
- pData = (planeIdx == 0) ? mBuffer->data :
- (planeIdx == 1) ? cb : cr;
+ ySize = mLockedBuffer->width * (mLockedBuffer->height - 1) +
+ mLockedBuffer->width;
+ cSize = mLockedBuffer->width * (mLockedBuffer->height / 2 - 1) +
+ mLockedBuffer->width - 1;
+ pData = (planeIdx == 0) ? mLockedBuffer->data
+ : (planeIdx == 1) ? cb : cr;
dataSize = (planeIdx == 0) ? ySize : cSize;
break;
case HAL_PIXEL_FORMAT_YV12:
// Y and C stride need to be 16 pixel aligned.
- if (mBuffer->stride % 16) {
- ALOGE("Stride %d is not 16 pixel aligned!", mBuffer->stride);
+ if (mLockedBuffer->stride % 16) {
+ ALOGE("Stride %d is not 16 pixel aligned!",
+ mLockedBuffer->stride);
return AMEDIA_ERROR_UNKNOWN;
}
- ySize = mBuffer->stride * mBuffer->height;
- cStride = ALIGN(mBuffer->stride / 2, 16);
- cr = mBuffer->data + ySize;
- cSize = cStride * mBuffer->height / 2;
+ ySize = mLockedBuffer->stride * mLockedBuffer->height;
+ cStride = ALIGN(mLockedBuffer->stride / 2, 16);
+ cr = mLockedBuffer->data + ySize;
+ cSize = cStride * mLockedBuffer->height / 2;
cb = cr + cSize;
- pData = (planeIdx == 0) ? mBuffer->data :
- (planeIdx == 1) ? cb : cr;
+ pData = (planeIdx == 0) ? mLockedBuffer->data
+ : (planeIdx == 1) ? cb : cr;
dataSize = (planeIdx == 0) ? ySize : cSize;
break;
case HAL_PIXEL_FORMAT_Y8:
// Single plane, 8bpp.
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->stride * mLockedBuffer->height;
break;
case HAL_PIXEL_FORMAT_Y16:
bytesPerPixel = 2;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize =
+ mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
case HAL_PIXEL_FORMAT_BLOB:
// Used for JPEG data, height must be 1, width == size, single plane.
- if (mBuffer->height != 1) {
- ALOGE("Jpeg should have height value one but got %d", mBuffer->height);
+ if (mLockedBuffer->height != 1) {
+ ALOGE("Jpeg should have height value one but got %d",
+ mLockedBuffer->height);
return AMEDIA_ERROR_UNKNOWN;
}
- pData = mBuffer->data;
+ pData = mLockedBuffer->data;
dataSize = getJpegSize();
break;
case HAL_PIXEL_FORMAT_RAW16:
// Single plane 16bpp bayer data.
bytesPerPixel = 2;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize =
+ mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
case HAL_PIXEL_FORMAT_RAW_OPAQUE:
// Used for RAW_OPAQUE data, height must be 1, width == size, single plane.
- if (mBuffer->height != 1) {
- ALOGE("RAW_OPAQUE should have height value one but got %d", mBuffer->height);
+ if (mLockedBuffer->height != 1) {
+ ALOGE("RAW_OPAQUE should have height value one but got %d",
+ mLockedBuffer->height);
return AMEDIA_ERROR_UNKNOWN;
}
- pData = mBuffer->data;
- dataSize = mBuffer->width;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->width;
break;
case HAL_PIXEL_FORMAT_RAW10:
// Single plane 10bpp bayer data.
- if (mBuffer->width % 4) {
- ALOGE("Width is not multiple of 4 %d", mBuffer->width);
+ if (mLockedBuffer->width % 4) {
+ ALOGE("Width is not multiple of 4 %d", mLockedBuffer->width);
return AMEDIA_ERROR_UNKNOWN;
}
- if (mBuffer->height % 2) {
- ALOGE("Height is not multiple of 2 %d", mBuffer->height);
+ if (mLockedBuffer->height % 2) {
+ ALOGE("Height is not multiple of 2 %d", mLockedBuffer->height);
return AMEDIA_ERROR_UNKNOWN;
}
- if (mBuffer->stride < (mBuffer->width * 10 / 8)) {
+ if (mLockedBuffer->stride < (mLockedBuffer->width * 10 / 8)) {
ALOGE("stride (%d) should be at least %d",
- mBuffer->stride, mBuffer->width * 10 / 8);
+ mLockedBuffer->stride, mLockedBuffer->width * 10 / 8);
return AMEDIA_ERROR_UNKNOWN;
}
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->stride * mLockedBuffer->height;
break;
case HAL_PIXEL_FORMAT_RAW12:
// Single plane 10bpp bayer data.
- if (mBuffer->width % 4) {
- ALOGE("Width is not multiple of 4 %d", mBuffer->width);
+ if (mLockedBuffer->width % 4) {
+ ALOGE("Width is not multiple of 4 %d", mLockedBuffer->width);
return AMEDIA_ERROR_UNKNOWN;
}
- if (mBuffer->height % 2) {
- ALOGE("Height is not multiple of 2 %d", mBuffer->height);
+ if (mLockedBuffer->height % 2) {
+ ALOGE("Height is not multiple of 2 %d", mLockedBuffer->height);
return AMEDIA_ERROR_UNKNOWN;
}
- if (mBuffer->stride < (mBuffer->width * 12 / 8)) {
+ if (mLockedBuffer->stride < (mLockedBuffer->width * 12 / 8)) {
ALOGE("stride (%d) should be at least %d",
- mBuffer->stride, mBuffer->width * 12 / 8);
+ mLockedBuffer->stride, mLockedBuffer->width * 12 / 8);
return AMEDIA_ERROR_UNKNOWN;
}
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->stride * mLockedBuffer->height;
break;
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
// Single plane, 32bpp.
bytesPerPixel = 4;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize =
+ mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
case HAL_PIXEL_FORMAT_RGB_565:
// Single plane, 16bpp.
bytesPerPixel = 2;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize =
+ mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
case HAL_PIXEL_FORMAT_RGB_888:
// Single plane, 24bpp.
bytesPerPixel = 3;
- pData = mBuffer->data;
- dataSize = mBuffer->stride * mBuffer->height * bytesPerPixel;
+ pData = mLockedBuffer->data;
+ dataSize = mLockedBuffer->stride * mLockedBuffer->height * bytesPerPixel;
break;
default:
ALOGE("Pixel format: 0x%x is unsupported", fmt);
@@ -497,12 +595,31 @@
return AMEDIA_OK;
}
+media_status_t
+AImage::getHardwareBuffer(/*out*/AHardwareBuffer** buffer) const {
+ if (mBuffer == nullptr || mBuffer->mGraphicBuffer == nullptr) {
+ ALOGE("%s: AImage %p has no buffer.", __FUNCTION__, this);
+ return AMEDIA_ERROR_INVALID_OBJECT;
+ }
+
+ // TODO(jwcai) Someone from Android graphics team stating this should just be a static_cast.
+ *buffer = reinterpret_cast<AHardwareBuffer*>(mBuffer->mGraphicBuffer.get());
+ return AMEDIA_OK;
+}
+
EXPORT
void AImage_delete(AImage* image) {
ALOGV("%s", __FUNCTION__);
+ AImage_deleteAsync(image, -1);
+ return;
+}
+
+EXPORT
+void AImage_deleteAsync(AImage* image, int releaseFenceFd) {
+ ALOGV("%s", __FUNCTION__);
if (image != nullptr) {
image->lockReader();
- image->close();
+ image->close(releaseFenceFd);
image->unlockReader();
if (!image->isClosed()) {
LOG_ALWAYS_FATAL("Image close failed!");
@@ -602,6 +719,12 @@
__FUNCTION__, image, pixelStride);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
+ media_status_t ret = const_cast<AImage*>(image)->lockImage();
+ if (ret != AMEDIA_OK) {
+ ALOGE("%s: failed to lock buffer for CPU access. image %p, error=%d.",
+ __FUNCTION__, image, ret);
+ return ret;
+ }
return image->getPlanePixelStride(planeIdx, pixelStride);
}
@@ -614,6 +737,12 @@
__FUNCTION__, image, rowStride);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
+ media_status_t ret = const_cast<AImage*>(image)->lockImage();
+ if (ret != AMEDIA_OK) {
+ ALOGE("%s: failed to lock buffer for CPU access. image %p, error=%d.",
+ __FUNCTION__, image, ret);
+ return ret;
+ }
return image->getPlaneRowStride(planeIdx, rowStride);
}
@@ -627,5 +756,23 @@
__FUNCTION__, image, data, dataLength);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
+ media_status_t ret = const_cast<AImage*>(image)->lockImage();
+ if (ret != AMEDIA_OK) {
+ ALOGE("%s: failed to lock buffer for CPU access. image %p, error=%d.",
+ __FUNCTION__, image, ret);
+ return ret;
+ }
return image->getPlaneData(planeIdx, data, dataLength);
}
+
+EXPORT
+media_status_t AImage_getHardwareBuffer(
+ const AImage* image, /*out*/AHardwareBuffer** buffer) {
+ ALOGV("%s", __FUNCTION__);
+
+ if (image == nullptr || buffer == nullptr) {
+ ALOGE("%s: bad argument. image %p buffer %p", __FUNCTION__, image, buffer);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ return image->getHardwareBuffer(buffer);
+}
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index 197cacd..e0f16da 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -21,6 +21,7 @@
#include <utils/Log.h>
#include <utils/StrongPointer.h>
+#include <gui/BufferItem.h>
#include <gui/CpuConsumer.h>
#include "NdkImageReaderPriv.h"
@@ -31,12 +32,12 @@
// TODO: this only supports ImageReader
struct AImage {
- AImage(AImageReader* reader, int32_t format,
- CpuConsumer::LockedBuffer* buffer, int64_t timestamp,
- int32_t width, int32_t height, int32_t numPlanes);
+ AImage(AImageReader* reader, int32_t format, uint64_t usage, BufferItem* buffer,
+ int64_t timestamp, int32_t width, int32_t height, int32_t numPlanes);
// free all resources while keeping object alive. Caller must obtain reader lock
- void close();
+ void close() { close(-1); }
+ void close(int releaseFenceFd);
// Remove from object memory. Must be called after close
void free();
@@ -54,9 +55,13 @@
media_status_t getNumPlanes(/*out*/int32_t* numPlanes) const;
media_status_t getTimestamp(/*out*/int64_t* timestamp) const;
+ media_status_t lockImage();
+ media_status_t unlockImageIfLocked(/*out*/int* fenceFd);
+
media_status_t getPlanePixelStride(int planeIdx, /*out*/int32_t* pixelStride) const;
media_status_t getPlaneRowStride(int planeIdx, /*out*/int32_t* rowStride) const;
media_status_t getPlaneData(int planeIdx,/*out*/uint8_t** data, /*out*/int* dataLength) const;
+ media_status_t getHardwareBuffer(/*out*/AHardwareBuffer** buffer) const;
private:
// AImage should be deleted through free() API.
@@ -69,7 +74,9 @@
// When reader is close, AImage will only accept close API call
wp<AImageReader> mReader;
const int32_t mFormat;
- CpuConsumer::LockedBuffer* mBuffer;
+ const uint64_t mUsage; // AHARDWAREBUFFER_USAGE_* flags.
+ BufferItem* mBuffer;
+ std::unique_ptr<CpuConsumer::LockedBuffer> mLockedBuffer;
const int64_t mTimestamp;
const int32_t mWidth;
const int32_t mHeight;
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index d470cb0..5d1a20b 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -22,8 +22,12 @@
#include "NdkImagePriv.h"
#include "NdkImageReaderPriv.h"
+#include <cutils/atomic.h>
#include <utils/Log.h>
+#include <android_media_Utils.h>
#include <android_runtime/android_view_Surface.h>
+#include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <grallocusage/GrallocUsageConversion.h>
using namespace android;
@@ -37,6 +41,7 @@
const char* AImageReader::kCallbackFpKey = "Callback";
const char* AImageReader::kContextKey = "Context";
+const char* AImageReader::kGraphicBufferKey = "GraphicBuffer";
bool
AImageReader::isSupportedFormat(int32_t format) {
@@ -113,6 +118,45 @@
return AMEDIA_OK;
}
+void
+AImageReader::BufferRemovedListener::onBufferFreed(const wp<GraphicBuffer>& graphicBuffer) {
+ Mutex::Autolock _l(mLock);
+ sp<AImageReader> reader = mReader.promote();
+ if (reader == nullptr) {
+ ALOGW("A frame is available after AImageReader closed!");
+ return; // reader has been closed
+ }
+ if (mListener.onBufferRemoved == nullptr) {
+ return; // No callback registered
+ }
+
+ sp<GraphicBuffer> gBuffer = graphicBuffer.promote();
+ if (gBuffer == nullptr) {
+ ALOGW("A buffer being freed has gone away!");
+ return; // buffer is already destroyed
+ }
+
+ sp<AMessage> msg = new AMessage(AImageReader::kWhatBufferRemoved, reader->mHandler);
+ msg->setPointer(
+ AImageReader::kCallbackFpKey, (void*) mListener.onBufferRemoved);
+ msg->setPointer(AImageReader::kContextKey, mListener.context);
+ msg->setObject(AImageReader::kGraphicBufferKey, gBuffer);
+ msg->post();
+}
+
+media_status_t
+AImageReader::BufferRemovedListener::setBufferRemovedListener(
+ AImageReader_BufferRemovedListener* listener) {
+ Mutex::Autolock _l(mLock);
+ if (listener == nullptr) {
+ mListener.context = nullptr;
+ mListener.onBufferRemoved = nullptr;
+ } else {
+ mListener = *listener;
+ }
+ return AMEDIA_OK;
+}
+
media_status_t
AImageReader::setImageListenerLocked(AImageReader_ImageListener* listener) {
return mFrameListener->setImageListener(listener);
@@ -124,9 +168,50 @@
return setImageListenerLocked(listener);
}
+media_status_t
+AImageReader::setBufferRemovedListenerLocked(AImageReader_BufferRemovedListener* listener) {
+ return mBufferRemovedListener->setBufferRemovedListener(listener);
+}
+
+media_status_t
+AImageReader::setBufferRemovedListener(AImageReader_BufferRemovedListener* listener) {
+ Mutex::Autolock _l(mLock);
+ return setBufferRemovedListenerLocked(listener);
+}
+
void AImageReader::CallbackHandler::onMessageReceived(
const sp<AMessage> &msg) {
switch (msg->what()) {
+ case kWhatBufferRemoved:
+ {
+ AImageReader_BufferRemovedCallback onBufferRemoved;
+ void* context;
+ bool found = msg->findPointer(kCallbackFpKey, (void**) &onBufferRemoved);
+ if (!found || onBufferRemoved == nullptr) {
+ ALOGE("%s: Cannot find onBufferRemoved callback fp!", __FUNCTION__);
+ return;
+ }
+ found = msg->findPointer(kContextKey, &context);
+ if (!found) {
+ ALOGE("%s: Cannot find callback context!", __FUNCTION__);
+ return;
+ }
+ sp<RefBase> bufferToFree;
+ found = msg->findObject(kGraphicBufferKey, &bufferToFree);
+ if (!found || bufferToFree == nullptr) {
+ ALOGE("%s: Cannot find the buffer to free!", __FUNCTION__);
+ return;
+ }
+
+ // TODO(jwcai) Someone from Android graphics team stating this should just be a
+ // static_cast.
+ AHardwareBuffer* outBuffer = reinterpret_cast<AHardwareBuffer*>(bufferToFree.get());
+
+ // At this point, bufferToFree holds the last reference to the GraphicBuffer owned by
+ // this AImageReader, and the reference will be gone once this function returns.
+ (*onBufferRemoved)(context, mReader, outBuffer);
+ break;
+ }
case kWhatImageAvailable:
{
AImageReader_ImageCallback onImageAvailable;
@@ -150,53 +235,61 @@
}
}
-AImageReader::AImageReader(int32_t width, int32_t height, int32_t format, int32_t maxImages) :
- mWidth(width), mHeight(height), mFormat(format), mMaxImages(maxImages),
- mNumPlanes(getNumPlanesForFormat(format)),
- mFrameListener(new FrameListener(this)) {}
+AImageReader::AImageReader(int32_t width,
+ int32_t height,
+ int32_t format,
+ uint64_t usage,
+ int32_t maxImages)
+ : mWidth(width),
+ mHeight(height),
+ mFormat(format),
+ mUsage(usage),
+ mMaxImages(maxImages),
+ mNumPlanes(getNumPlanesForFormat(format)),
+ mFrameListener(new FrameListener(this)),
+ mBufferRemovedListener(new BufferRemovedListener(this)) {}
media_status_t
AImageReader::init() {
PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
mHalFormat = android_view_Surface_mapPublicFormatToHalFormat(publicFormat);
mHalDataSpace = android_view_Surface_mapPublicFormatToHalDataspace(publicFormat);
+ mHalUsage = android_hardware_HardwareBuffer_convertToGrallocUsageBits(mUsage);
sp<IGraphicBufferProducer> gbProducer;
sp<IGraphicBufferConsumer> gbConsumer;
BufferQueue::createBufferQueue(&gbProducer, &gbConsumer);
- sp<CpuConsumer> cpuConsumer;
- String8 consumerName = String8::format("ImageReader-%dx%df%xm%d-%d-%d",
- mWidth, mHeight, mFormat, mMaxImages, getpid(),
+ String8 consumerName = String8::format("ImageReader-%dx%df%xu%" PRIu64 "m%d-%d-%d",
+ mWidth, mHeight, mFormat, mUsage, mMaxImages, getpid(),
createProcessUniqueId());
- cpuConsumer = new CpuConsumer(gbConsumer, mMaxImages, /*controlledByApp*/true);
- if (cpuConsumer == nullptr) {
- ALOGE("Failed to allocate CpuConsumer");
+ mBufferItemConsumer =
+ new BufferItemConsumer(gbConsumer, mHalUsage, mMaxImages, /*controlledByApp*/ true);
+ if (mBufferItemConsumer == nullptr) {
+ ALOGE("Failed to allocate BufferItemConsumer");
return AMEDIA_ERROR_UNKNOWN;
}
- mCpuConsumer = cpuConsumer;
- mCpuConsumer->setName(consumerName);
mProducer = gbProducer;
-
- sp<ConsumerBase> consumer = cpuConsumer;
- consumer->setFrameAvailableListener(mFrameListener);
+ mBufferItemConsumer->setName(consumerName);
+ mBufferItemConsumer->setFrameAvailableListener(mFrameListener);
+ mBufferItemConsumer->setBufferFreedListener(mBufferRemovedListener);
status_t res;
- res = cpuConsumer->setDefaultBufferSize(mWidth, mHeight);
+ res = mBufferItemConsumer->setDefaultBufferSize(mWidth, mHeight);
if (res != OK) {
- ALOGE("Failed to set CpuConsumer buffer size");
+ ALOGE("Failed to set BufferItemConsumer buffer size");
return AMEDIA_ERROR_UNKNOWN;
}
- res = cpuConsumer->setDefaultBufferFormat(mHalFormat);
+ res = mBufferItemConsumer->setDefaultBufferFormat(mHalFormat);
if (res != OK) {
- ALOGE("Failed to set CpuConsumer buffer format");
+ ALOGE("Failed to set BufferItemConsumer buffer format");
return AMEDIA_ERROR_UNKNOWN;
}
- res = cpuConsumer->setDefaultBufferDataSpace(mHalDataSpace);
+ res = mBufferItemConsumer->setDefaultBufferDataSpace(mHalDataSpace);
if (res != OK) {
- ALOGE("Failed to set CpuConsumer buffer dataSpace");
+ ALOGE("Failed to set BufferItemConsumer buffer dataSpace");
return AMEDIA_ERROR_UNKNOWN;
}
@@ -208,7 +301,7 @@
mWindow = static_cast<ANativeWindow*>(mSurface.get());
for (int i = 0; i < mMaxImages; i++) {
- CpuConsumer::LockedBuffer* buffer = new CpuConsumer::LockedBuffer;
+ BufferItem* buffer = new BufferItem;
mBuffers.push_back(buffer);
}
@@ -233,6 +326,9 @@
AImageReader_ImageListener nullListener = {nullptr, nullptr};
setImageListenerLocked(&nullListener);
+ AImageReader_BufferRemovedListener nullBufferRemovedListener = {nullptr, nullptr};
+ setBufferRemovedListenerLocked(&nullBufferRemovedListener);
+
if (mCbLooper != nullptr) {
mCbLooper->unregisterHandler(mHandler->id());
mCbLooper->stop();
@@ -247,133 +343,154 @@
image->close();
}
- // Delete LockedBuffers
+ // Delete Buffer Items
for (auto it = mBuffers.begin();
it != mBuffers.end(); it++) {
delete *it;
}
- if (mCpuConsumer != nullptr) {
- mCpuConsumer->abandon();
- mCpuConsumer->setFrameAvailableListener(nullptr);
+ if (mBufferItemConsumer != nullptr) {
+ mBufferItemConsumer->abandon();
+ mBufferItemConsumer->setFrameAvailableListener(nullptr);
}
}
media_status_t
-AImageReader::acquireCpuConsumerImageLocked(/*out*/AImage** image) {
+AImageReader::acquireImageLocked(/*out*/AImage** image, /*out*/int* acquireFenceFd) {
*image = nullptr;
- CpuConsumer::LockedBuffer* buffer = getLockedBufferLocked();
+ BufferItem* buffer = getBufferItemLocked();
if (buffer == nullptr) {
ALOGW("Unable to acquire a lockedBuffer, very likely client tries to lock more than"
" maxImages buffers");
return AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED;
}
- status_t res = mCpuConsumer->lockNextBuffer(buffer);
+ // When the output paramter fence is not NULL, we are acquiring the image asynchronously.
+ bool waitForFence = acquireFenceFd == nullptr;
+ status_t res = mBufferItemConsumer->acquireBuffer(buffer, 0, waitForFence);
+
if (res != NO_ERROR) {
- returnLockedBufferLocked(buffer);
- if (res != BAD_VALUE /*no buffers*/) {
- if (res == NOT_ENOUGH_DATA) {
+ returnBufferItemLocked(buffer);
+ if (res != BufferQueue::NO_BUFFER_AVAILABLE) {
+ if (res == INVALID_OPERATION) {
return AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED;
} else {
- ALOGE("%s Fail to lockNextBuffer with error: %d ",
- __FUNCTION__, res);
+ ALOGE("%s: Acquire image failed with some unknown error: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
return AMEDIA_ERROR_UNKNOWN;
}
}
return AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE;
}
- if (buffer->flexFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
- ALOGE("NV21 format is not supported by AImageReader");
- return AMEDIA_ERROR_UNSUPPORTED;
- }
+ const int bufferWidth = getBufferWidth(buffer);
+ const int bufferHeight = getBufferHeight(buffer);
+ const int bufferFmt = buffer->mGraphicBuffer->getPixelFormat();
+ const int bufferUsage = buffer->mGraphicBuffer->getUsage();
- // Check if the left-top corner of the crop rect is origin, we currently assume this point is
- // zero, will revist this once this assumption turns out problematic.
- Point lt = buffer->crop.leftTop();
- if (lt.x != 0 || lt.y != 0) {
- ALOGE("crop left top corner [%d, %d] need to be at origin", lt.x, lt.y);
- return AMEDIA_ERROR_UNKNOWN;
- }
+ const int readerWidth = mWidth;
+ const int readerHeight = mHeight;
+ const int readerFmt = mHalFormat;
+ const int readerUsage = mHalUsage;
- // Check if the producer buffer configurations match what ImageReader configured.
- int outputWidth = getBufferWidth(buffer);
- int outputHeight = getBufferHeight(buffer);
-
- int readerFmt = mHalFormat;
- int readerWidth = mWidth;
- int readerHeight = mHeight;
-
- if ((buffer->format != HAL_PIXEL_FORMAT_BLOB) && (readerFmt != HAL_PIXEL_FORMAT_BLOB) &&
- (readerWidth != outputWidth || readerHeight != outputHeight)) {
- ALOGW("%s: Producer buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
- __FUNCTION__, outputWidth, outputHeight, readerWidth, readerHeight);
- }
-
- int bufFmt = buffer->format;
- if (readerFmt == HAL_PIXEL_FORMAT_YCbCr_420_888) {
- bufFmt = buffer->flexFormat;
- }
-
- if (readerFmt != bufFmt) {
- if (readerFmt == HAL_PIXEL_FORMAT_YCbCr_420_888 && (bufFmt ==
- HAL_PIXEL_FORMAT_YCrCb_420_SP || bufFmt == HAL_PIXEL_FORMAT_YV12)) {
- // Special casing for when producer switches to a format compatible with flexible YUV
- // (HAL_PIXEL_FORMAT_YCbCr_420_888).
- mHalFormat = bufFmt;
- ALOGD("%s: Overriding buffer format YUV_420_888 to %x.", __FUNCTION__, bufFmt);
- } else {
- // Return the buffer to the queue.
- mCpuConsumer->unlockBuffer(*buffer);
- returnLockedBufferLocked(buffer);
-
- ALOGE("Producer output buffer format: 0x%x, ImageReader configured format: 0x%x",
- buffer->format, readerFmt);
-
+ // Check if the producer buffer configurations match what AImageReader configured. Add some
+ // extra checks for non-opaque formats.
+ if (!isFormatOpaque(readerFmt)) {
+ // Check if the left-top corner of the crop rect is origin, we currently assume this point
+ // is zero, will revisit this once this assumption turns out problematic.
+ Point lt = buffer->mCrop.leftTop();
+ if (lt.x != 0 || lt.y != 0) {
+ ALOGE("Crop left top corner [%d, %d] not at origin", lt.x, lt.y);
return AMEDIA_ERROR_UNKNOWN;
}
+
+ // Check if the producer buffer configurations match what ImageReader configured.
+ ALOGV_IF(readerWidth != bufferWidth || readerHeight != bufferHeight,
+ "%s: Buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
+ __FUNCTION__, bufferWidth, bufferHeight, readerWidth, readerHeight);
+
+ // Check if the buffer usage is a super set of reader's usage bits, aka all usage bits that
+ // ImageReader requested has been supported from the producer side.
+ ALOGD_IF((readerUsage | bufferUsage) != bufferUsage,
+ "%s: Producer buffer usage: %x, doesn't cover all usage bits AImageReader "
+ "configured: %x",
+ __FUNCTION__, bufferUsage, readerUsage);
+
+ if (readerFmt != bufferFmt) {
+ if (readerFmt == HAL_PIXEL_FORMAT_YCbCr_420_888 && isPossiblyYUV(bufferFmt)) {
+ // Special casing for when producer switches to a format compatible with flexible
+ // YUV.
+ mHalFormat = bufferFmt;
+ ALOGD("%s: Overriding buffer format YUV_420_888 to 0x%x.", __FUNCTION__, bufferFmt);
+ } else {
+ // Return the buffer to the queue. No need to provide fence, as this buffer wasn't
+ // used anywhere yet.
+ mBufferItemConsumer->releaseBuffer(*buffer);
+ returnBufferItemLocked(buffer);
+
+ ALOGE("%s: Output buffer format: 0x%x, ImageReader configured format: 0x%x",
+ __FUNCTION__, bufferFmt, readerFmt);
+
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+ }
}
if (mHalFormat == HAL_PIXEL_FORMAT_BLOB) {
- *image = new AImage(this, mFormat, buffer, buffer->timestamp,
- readerWidth, readerHeight, mNumPlanes);
+ *image = new AImage(this, mFormat, mUsage, buffer, buffer->mTimestamp,
+ readerWidth, readerHeight, mNumPlanes);
} else {
- *image = new AImage(this, mFormat, buffer, buffer->timestamp,
- outputWidth, outputHeight, mNumPlanes);
+ *image = new AImage(this, mFormat, mUsage, buffer, buffer->mTimestamp,
+ bufferWidth, bufferHeight, mNumPlanes);
}
mAcquiredImages.push_back(*image);
+
+ // When the output paramter fence is not NULL, we are acquiring the image asynchronously.
+ if (acquireFenceFd != nullptr) {
+ *acquireFenceFd = buffer->mFence->dup();
+ }
+
return AMEDIA_OK;
}
-CpuConsumer::LockedBuffer*
-AImageReader::getLockedBufferLocked() {
+BufferItem*
+AImageReader::getBufferItemLocked() {
if (mBuffers.empty()) {
return nullptr;
}
- // Return a LockedBuffer pointer and remove it from the list
+ // Return a BufferItem pointer and remove it from the list
auto it = mBuffers.begin();
- CpuConsumer::LockedBuffer* buffer = *it;
+ BufferItem* buffer = *it;
mBuffers.erase(it);
return buffer;
}
void
-AImageReader::returnLockedBufferLocked(CpuConsumer::LockedBuffer* buffer) {
+AImageReader::returnBufferItemLocked(BufferItem* buffer) {
mBuffers.push_back(buffer);
}
void
-AImageReader::releaseImageLocked(AImage* image) {
- CpuConsumer::LockedBuffer* buffer = image->mBuffer;
+AImageReader::releaseImageLocked(AImage* image, int releaseFenceFd) {
+ BufferItem* buffer = image->mBuffer;
if (buffer == nullptr) {
// This should not happen, but is not fatal
ALOGW("AImage %p has no buffer!", image);
return;
}
- mCpuConsumer->unlockBuffer(*buffer);
- returnLockedBufferLocked(buffer);
+ int unlockFenceFd = -1;
+ media_status_t ret = image->unlockImageIfLocked(&unlockFenceFd);
+ if (ret < 0) {
+ ALOGW("%s: AImage %p is cannot be unlocked.", __FUNCTION__, image);
+ return;
+ }
+
+ sp<Fence> unlockFence = unlockFenceFd > 0 ? new Fence(unlockFenceFd) : Fence::NO_FENCE;
+ sp<Fence> releaseFence = releaseFenceFd > 0 ? new Fence(releaseFenceFd) : Fence::NO_FENCE;
+ sp<Fence> bufferFence = Fence::merge("AImageReader", unlockFence, releaseFence);
+ mBufferItemConsumer->releaseBuffer(*buffer, bufferFence);
+ returnBufferItemLocked(buffer);
image->mBuffer = nullptr;
bool found = false;
@@ -394,33 +511,35 @@
}
int
-AImageReader::getBufferWidth(CpuConsumer::LockedBuffer* buffer) {
- if (buffer == nullptr) return -1;
+AImageReader::getBufferWidth(BufferItem* buffer) {
+ if (buffer == NULL) return -1;
- if (!buffer->crop.isEmpty()) {
- return buffer->crop.getWidth();
+ if (!buffer->mCrop.isEmpty()) {
+ return buffer->mCrop.getWidth();
}
- return buffer->width;
+
+ return buffer->mGraphicBuffer->getWidth();
}
int
-AImageReader::getBufferHeight(CpuConsumer::LockedBuffer* buffer) {
- if (buffer == nullptr) return -1;
+AImageReader::getBufferHeight(BufferItem* buffer) {
+ if (buffer == NULL) return -1;
- if (!buffer->crop.isEmpty()) {
- return buffer->crop.getHeight();
+ if (!buffer->mCrop.isEmpty()) {
+ return buffer->mCrop.getHeight();
}
- return buffer->height;
+
+ return buffer->mGraphicBuffer->getHeight();
}
media_status_t
-AImageReader::acquireNextImage(/*out*/AImage** image) {
+AImageReader::acquireNextImage(/*out*/AImage** image, /*out*/int* acquireFenceFd) {
Mutex::Autolock _l(mLock);
- return acquireCpuConsumerImageLocked(image);
+ return acquireImageLocked(image, acquireFenceFd);
}
media_status_t
-AImageReader::acquireLatestImage(/*out*/AImage** image) {
+AImageReader::acquireLatestImage(/*out*/AImage** image, /*out*/int* acquireFenceFd) {
if (image == nullptr) {
return AMEDIA_ERROR_INVALID_PARAMETER;
}
@@ -428,17 +547,26 @@
*image = nullptr;
AImage* prevImage = nullptr;
AImage* nextImage = nullptr;
- media_status_t ret = acquireCpuConsumerImageLocked(&prevImage);
+ media_status_t ret = acquireImageLocked(&prevImage, acquireFenceFd);
if (prevImage == nullptr) {
return ret;
}
for (;;) {
- ret = acquireCpuConsumerImageLocked(&nextImage);
+ ret = acquireImageLocked(&nextImage, acquireFenceFd);
if (nextImage == nullptr) {
*image = prevImage;
return AMEDIA_OK;
}
- prevImage->close();
+
+ if (acquireFenceFd == nullptr) {
+ // No need for release fence here since the prevImage is unused and acquireImageLocked
+ // has already waited for acquired fence to be signaled.
+ prevImage->close();
+ } else {
+ // Use the acquire fence as release fence, so that producer can wait before trying to
+ // refill the buffer.
+ prevImage->close(*acquireFenceFd);
+ }
prevImage->free();
prevImage = nextImage;
nextImage = nullptr;
@@ -450,6 +578,15 @@
int32_t width, int32_t height, int32_t format, int32_t maxImages,
/*out*/AImageReader** reader) {
ALOGV("%s", __FUNCTION__);
+ return AImageReader_newWithUsage(
+ width, height, format, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN, maxImages, reader);
+}
+
+EXPORT
+media_status_t AImageReader_newWithUsage(
+ int32_t width, int32_t height, int32_t format, uint64_t usage,
+ int32_t maxImages, /*out*/ AImageReader** reader) {
+ ALOGV("%s", __FUNCTION__);
if (width < 1 || height < 1) {
ALOGE("%s: image dimension must be positive: w:%d h:%d",
@@ -463,6 +600,12 @@
return AMEDIA_ERROR_INVALID_PARAMETER;
}
+ if (maxImages > BufferQueueDefs::NUM_BUFFER_SLOTS) {
+ ALOGE("%s: max outstanding image count (%d) cannot be larget than %d.",
+ __FUNCTION__, maxImages, BufferQueueDefs::NUM_BUFFER_SLOTS);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
if (!AImageReader::isSupportedFormat(format)) {
ALOGE("%s: format %d is not supported by AImageReader",
__FUNCTION__, format);
@@ -474,8 +617,8 @@
return AMEDIA_ERROR_INVALID_PARAMETER;
}
- //*reader = new AImageReader(width, height, format, maxImages);
- AImageReader* tmpReader = new AImageReader(width, height, format, maxImages);
+ AImageReader* tmpReader = new AImageReader(
+ width, height, format, usage, maxImages);
if (tmpReader == nullptr) {
ALOGE("%s: AImageReader allocation failed", __FUNCTION__);
return AMEDIA_ERROR_UNKNOWN;
@@ -563,23 +706,37 @@
EXPORT
media_status_t AImageReader_acquireNextImage(AImageReader* reader, /*out*/AImage** image) {
ALOGV("%s", __FUNCTION__);
- if (reader == nullptr || image == nullptr) {
- ALOGE("%s: invalid argument. reader %p, maxImages %p",
- __FUNCTION__, reader, image);
- return AMEDIA_ERROR_INVALID_PARAMETER;
- }
- return reader->acquireNextImage(image);
+ return AImageReader_acquireNextImageAsync(reader, image, nullptr);
}
EXPORT
media_status_t AImageReader_acquireLatestImage(AImageReader* reader, /*out*/AImage** image) {
ALOGV("%s", __FUNCTION__);
+ return AImageReader_acquireLatestImageAsync(reader, image, nullptr);
+}
+
+EXPORT
+media_status_t AImageReader_acquireNextImageAsync(
+ AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd) {
+ ALOGV("%s", __FUNCTION__);
if (reader == nullptr || image == nullptr) {
- ALOGE("%s: invalid argument. reader %p, maxImages %p",
+ ALOGE("%s: invalid argument. reader %p, image %p",
__FUNCTION__, reader, image);
return AMEDIA_ERROR_INVALID_PARAMETER;
}
- return reader->acquireLatestImage(image);
+ return reader->acquireNextImage(image, acquireFenceFd);
+}
+
+EXPORT
+media_status_t AImageReader_acquireLatestImageAsync(
+ AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd) {
+ ALOGV("%s", __FUNCTION__);
+ if (reader == nullptr || image == nullptr) {
+ ALOGE("%s: invalid argument. reader %p, image %p",
+ __FUNCTION__, reader, image);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ return reader->acquireLatestImage(image, acquireFenceFd);
}
EXPORT
@@ -594,3 +751,16 @@
reader->setImageListener(listener);
return AMEDIA_OK;
}
+
+EXPORT
+media_status_t AImageReader_setBufferRemovedListener(
+ AImageReader* reader, AImageReader_BufferRemovedListener* listener) {
+ ALOGV("%s", __FUNCTION__);
+ if (reader == nullptr) {
+ ALOGE("%s: invalid argument! reader %p", __FUNCTION__, reader);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ reader->setBufferRemovedListener(listener);
+ return AMEDIA_OK;
+}
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index f4c1187..989c1fd 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -25,7 +25,8 @@
#include <utils/Mutex.h>
#include <utils/StrongPointer.h>
-#include <gui/CpuConsumer.h>
+#include <gui/BufferItem.h>
+#include <gui/BufferItemConsumer.h>
#include <gui/Surface.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -48,11 +49,14 @@
struct AImageReader : public RefBase {
public:
-
static bool isSupportedFormat(int32_t format);
static int getNumPlanesForFormat(int32_t format);
- AImageReader(int32_t width, int32_t height, int32_t format, int32_t maxImages);
+ AImageReader(int32_t width,
+ int32_t height,
+ int32_t format,
+ uint64_t usage,
+ int32_t maxImages);
~AImageReader();
// Inintialize AImageReader, uninitialized or failed to initialize AImageReader
@@ -60,9 +64,10 @@
media_status_t init();
media_status_t setImageListener(AImageReader_ImageListener* listener);
+ media_status_t setBufferRemovedListener(AImageReader_BufferRemovedListener* listener);
- media_status_t acquireNextImage(/*out*/AImage** image);
- media_status_t acquireLatestImage(/*out*/AImage** image);
+ media_status_t acquireNextImage(/*out*/AImage** image, /*out*/int* fenceFd);
+ media_status_t acquireLatestImage(/*out*/AImage** image, /*out*/int* fenceFd);
ANativeWindow* getWindow() const { return mWindow.get(); };
int32_t getWidth() const { return mWidth; };
@@ -70,29 +75,33 @@
int32_t getFormat() const { return mFormat; };
int32_t getMaxImages() const { return mMaxImages; };
-
private:
friend struct AImage; // for grabing reader lock
- media_status_t acquireCpuConsumerImageLocked(/*out*/AImage** image);
- CpuConsumer::LockedBuffer* getLockedBufferLocked();
- void returnLockedBufferLocked(CpuConsumer::LockedBuffer* buffer);
+ BufferItem* getBufferItemLocked();
+ void returnBufferItemLocked(BufferItem* buffer);
+
+ // Called by AImageReader_acquireXXX to acquire a Buffer and setup AImage.
+ media_status_t acquireImageLocked(/*out*/AImage** image, /*out*/int* fenceFd);
// Called by AImage to close image
- void releaseImageLocked(AImage* image);
+ void releaseImageLocked(AImage* image, int releaseFenceFd);
- static int getBufferWidth(CpuConsumer::LockedBuffer* buffer);
- static int getBufferHeight(CpuConsumer::LockedBuffer* buffer);
+ static int getBufferWidth(BufferItem* buffer);
+ static int getBufferHeight(BufferItem* buffer);
media_status_t setImageListenerLocked(AImageReader_ImageListener* listener);
+ media_status_t setBufferRemovedListenerLocked(AImageReader_BufferRemovedListener* listener);
// definition of handler and message
enum {
- kWhatImageAvailable
+ kWhatBufferRemoved,
+ kWhatImageAvailable,
};
static const char* kCallbackFpKey;
static const char* kContextKey;
+ static const char* kGraphicBufferKey;
class CallbackHandler : public AHandler {
public:
CallbackHandler(AImageReader* reader) : mReader(reader) {}
@@ -102,12 +111,15 @@
};
sp<CallbackHandler> mHandler;
sp<ALooper> mCbLooper; // Looper thread where callbacks actually happen on
+ List<BufferItem*> mBuffers;
- List<CpuConsumer::LockedBuffer*> mBuffers;
const int32_t mWidth;
const int32_t mHeight;
const int32_t mFormat;
+ const uint64_t mUsage; // AHARDWAREBUFFER_USAGE_* flags.
const int32_t mMaxImages;
+
+ // TODO(jwcai) Seems completely unused in AImageReader class.
const int32_t mNumPlanes;
struct FrameListener : public ConsumerBase::FrameAvailableListener {
@@ -125,12 +137,28 @@
};
sp<FrameListener> mFrameListener;
+ struct BufferRemovedListener : public BufferItemConsumer::BufferFreedListener {
+ public:
+ explicit BufferRemovedListener(AImageReader* parent) : mReader(parent) {}
+
+ void onBufferFreed(const wp<GraphicBuffer>& graphicBuffer) override;
+
+ media_status_t setBufferRemovedListener(AImageReader_BufferRemovedListener* listener);
+
+ private:
+ AImageReader_BufferRemovedListener mListener = {nullptr, nullptr};
+ wp<AImageReader> mReader;
+ Mutex mLock;
+ };
+ sp<BufferRemovedListener> mBufferRemovedListener;
+
int mHalFormat;
android_dataspace mHalDataSpace;
+ uint64_t mHalUsage;
sp<IGraphicBufferProducer> mProducer;
sp<Surface> mSurface;
- sp<CpuConsumer> mCpuConsumer;
+ sp<BufferItemConsumer> mBufferItemConsumer;
sp<ANativeWindow> mWindow;
List<AImage*> mAcquiredImages;
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 6229982..128edba 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -30,10 +30,12 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaErrors.h>
+#include <media/MediaCodecBuffer.h>
+#include <android/native_window.h>
using namespace android;
@@ -54,6 +56,18 @@
kWhatStopActivityNotifications,
};
+struct AMediaCodecPersistentSurface : public Surface {
+ sp<PersistentSurface> mPersistentSurface;
+ AMediaCodecPersistentSurface(
+ const sp<IGraphicBufferProducer>& igbp,
+ const sp<PersistentSurface>& ps)
+ : Surface(igbp) {
+ mPersistentSurface = ps;
+ }
+ virtual ~AMediaCodecPersistentSurface() {
+ //mPersistentSurface ref will be let go off here
+ }
+};
class CodecHandler: public AHandler {
private:
@@ -268,13 +282,17 @@
EXPORT
uint8_t* AMediaCodec_getInputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
- android::Vector<android::sp<android::ABuffer> > abufs;
+ android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getInputBuffers(&abufs) == 0) {
size_t n = abufs.size();
if (idx >= n) {
ALOGE("buffer index %zu out of range", idx);
return NULL;
}
+ if (abufs[idx] == NULL) {
+ ALOGE("buffer index %zu is NULL", idx);
+ return NULL;
+ }
if (out_size != NULL) {
*out_size = abufs[idx]->capacity();
}
@@ -286,7 +304,7 @@
EXPORT
uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
- android::Vector<android::sp<android::ABuffer> > abufs;
+ android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
size_t n = abufs.size();
if (idx >= n) {
@@ -373,6 +391,94 @@
return translate_error(mData->mCodec->setSurface(surface));
}
+EXPORT
+media_status_t AMediaCodec_createInputSurface(AMediaCodec *mData, ANativeWindow **surface) {
+ if (surface == NULL || mData == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ *surface = NULL;
+
+ sp<IGraphicBufferProducer> igbp = NULL;
+ status_t err = mData->mCodec->createInputSurface(&igbp);
+ if (err != NO_ERROR) {
+ return translate_error(err);
+ }
+
+ *surface = new Surface(igbp);
+ ANativeWindow_acquire(*surface);
+ return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AMediaCodec_createPersistentInputSurface(ANativeWindow **surface) {
+ if (surface == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ *surface = NULL;
+
+ sp<PersistentSurface> ps = MediaCodec::CreatePersistentInputSurface();
+ if (ps == NULL) {
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ sp<IGraphicBufferProducer> igbp = ps->getBufferProducer();
+ if (igbp == NULL) {
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ *surface = new AMediaCodecPersistentSurface(igbp, ps);
+ ANativeWindow_acquire(*surface);
+
+ return AMEDIA_OK;
+}
+
+EXPORT
+media_status_t AMediaCodec_setInputSurface(
+ AMediaCodec *mData, ANativeWindow *surface) {
+
+ if (surface == NULL || mData == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ AMediaCodecPersistentSurface *aMediaPersistentSurface =
+ static_cast<AMediaCodecPersistentSurface *>(surface);
+ if (aMediaPersistentSurface->mPersistentSurface == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ return translate_error(mData->mCodec->setInputSurface(
+ aMediaPersistentSurface->mPersistentSurface));
+}
+
+EXPORT
+media_status_t AMediaCodec_setParameters(
+ AMediaCodec *mData, const AMediaFormat* params) {
+ if (params == NULL || mData == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ sp<AMessage> nativeParams;
+ AMediaFormat_getFormat(params, &nativeParams);
+ ALOGV("setParameters: %s", nativeParams->debugString(0).c_str());
+
+ return translate_error(mData->mCodec->setParameters(nativeParams));
+}
+
+EXPORT
+media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData) {
+
+ if (mData == NULL) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ status_t err = mData->mCodec->signalEndOfInputStream();
+ if (err == INVALID_OPERATION) {
+ return AMEDIA_ERROR_INVALID_OPERATION;
+ }
+
+ return translate_error(err);
+
+}
+
//EXPORT
media_status_t AMediaCodec_setNotificationCallback(AMediaCodec *mData, OnCodecEvent callback,
void *userdata) {
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 2c20e51..51143ac 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -99,11 +99,12 @@
break;
default:
ALOGE("Invalid event DrmPlugin::EventType %d, ignored", (int)eventType);
- return;
+ goto cleanup;
}
(*mListener)(mObj, &sessionId, ndkEventType, extra, data, dataSize);
+ cleanup:
delete [] sessionId.ptr;
delete [] data;
}
@@ -171,7 +172,8 @@
return NULL;
}
- status_t err = drm->createPlugin(uuid);
+ String8 nullPackageName;
+ status_t err = drm->createPlugin(uuid, nullPackageName);
if (err != OK) {
return NULL;
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index 9bdda75..80a4391 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -81,7 +81,8 @@
ssize_t AMediaMuxer_addTrack(AMediaMuxer *muxer, const AMediaFormat *format) {
sp<AMessage> msg;
AMediaFormat_getFormat(format, &msg);
- return translate_error(muxer->mImpl->addTrack(msg));
+ ssize_t ret = muxer->mImpl->addTrack(msg);
+ return (ret >= 0) ? ret : translate_error(ret);
}
EXPORT
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 15eae40..d7443be 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -15,7 +15,7 @@
*/
/**
- * @addtogroup Media Camera
+ * @addtogroup Media
* @{
*/
@@ -40,6 +40,10 @@
#include "NdkMediaError.h"
+#if __ANDROID_API__ >= 26
+#include <android/hardware_buffer.h>
+#endif /* __ANDROID_API__ >= 26 */
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -132,7 +136,7 @@
* <p>
* Corresponding formats:
* <ul>
- * <li>AHardwareBuffer: AHARDWAREBUFFER_FORMAT_R16G16B16A16_SFLOAT</li>
+ * <li>AHardwareBuffer: AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT</li>
* <li>Vulkan: VK_FORMAT_R16G16B16A16_SFLOAT</li>
* <li>OpenGL ES: GL_RGBA16F</li>
* </ul>
@@ -646,7 +650,9 @@
* <li>{@link AMEDIA_ERROR_UNSUPPORTED} if pixel stride is undefined for the format of input
* image.</li>
* <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
- * image has been deleted.</li></ul>
+ * image has been deleted.</li>
+ * <li>{@link AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE} if the {@link AImage} cannot be locked
+ * for CPU access.</li></ul>
*/
media_status_t AImage_getPlanePixelStride(
const AImage* image, int planeIdx, /*out*/int32_t* pixelStride);
@@ -671,7 +677,9 @@
* <li>{@link AMEDIA_ERROR_UNSUPPORTED} if row stride is undefined for the format of input
* image.</li>
* <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
- * image has been deleted.</li></ul>
+ * image has been deleted.</li>
+ * <li>{@link AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE} if the {@link AImage} cannot be locked
+ * for CPU access.</li></ul>
*/
media_status_t AImage_getPlaneRowStride(
const AImage* image, int planeIdx, /*out*/int32_t* rowStride);
@@ -693,7 +701,9 @@
* <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image, data or dataLength is NULL, or
* planeIdx is out of the range of [0, numOfPlanes - 1].</li>
* <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
- * image has been deleted.</li></ul>
+ * image has been deleted.</li>
+ * <li>{@link AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE} if the {@link AImage} cannot be locked
+ * for CPU access.</li></ul>
*/
media_status_t AImage_getPlaneData(
const AImage* image, int planeIdx,
@@ -701,6 +711,53 @@
#endif /* __ANDROID_API__ >= 24 */
+#if __ANDROID_API__ >= 26
+
+/*
+ * Return the image back the the system and delete the AImage object from memory asynchronously.
+ *
+ * <p>Similar to {@link AImage_delete}, do NOT use the image pointer after this method returns.
+ * However, the caller can still hold on to the {@link AHardwareBuffer} returned from this image and
+ * signal the release of the hardware buffer back to the {@link AImageReader}'s queue using
+ * releaseFenceFd.</p>
+ *
+ * @param image The {@link AImage} to be deleted.
+ * @param releaseFenceFd A sync fence fd defined in {@link sync.h}, which signals the release of
+ * underlying {@link AHardwareBuffer}.
+ *
+ * @see sync.h
+ */
+void AImage_deleteAsync(AImage* image, int releaseFenceFd);
+
+/**
+ * Get the hardware buffer handle of the input image intended for GPU and/or hardware access.
+ *
+ * <p>Note that no reference on the returned {@link AHardwareBuffer} handle is acquired
+ * automatically. Once the {@link AImage} or the parent {@link AImageReader} is deleted, the
+ * {@link AHardwareBuffer} handle from previous {@link AImage_getHardwareBuffer} becomes
+ * invalid.</p>
+ *
+ * <p>If the caller ever needs to hold on a reference to the {@link AHardwareBuffer} handle after
+ * the {@link AImage} or the parent {@link AImageReader} is deleted, it must call {@link
+ * AHardwareBuffer_acquire} to acquire an extra reference, and call {@link AHardwareBuffer_release}
+ * once it has finished using it in order to properly deallocate the underlying memory managed by
+ * {@link AHardwareBuffer}. If the caller has acquired extra reference on an {@link AHardwareBuffer}
+ * returned from this function, it must also listen to {@link onBufferFreed} callback to be
+ * notified when the buffer is no longer used by {@link AImageReader}.</p>
+ *
+ * @param image the {@link AImage} of interest.
+ * @param outBuffer The memory area pointed to by buffer will contain the acquired AHardwareBuffer
+ * handle.
+ * @return <ul>
+ * <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ * <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or buffer is NULL</li></ul>
+ *
+ * @see AImageReader_ImageCallback
+ */
+media_status_t AImage_getHardwareBuffer(const AImage* image, /*out*/AHardwareBuffer** buffer);
+
+#endif /* __ANDROID_API__ >= 26 */
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index 8d72c28..59ae507 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -15,7 +15,7 @@
*/
/**
- * @addtogroup Media Camera
+ * @addtogroup Media
* @{
*/
@@ -300,6 +300,143 @@
#endif /* __ANDROID_API__ >= 24 */
+#if __ANDROID_API__ >= 26
+
+/**
+ * AImageReader constructor similar to {@link AImageReader_new} that takes an additional parameter
+ * for the consumer usage. All other parameters and the return values are identical to those passed
+ * to {@line AImageReader_new}.
+ *
+ * @param usage specifies how the consumer will access the AImage, using combination of the
+ * AHARDWAREBUFFER_USAGE flags described in {@link hardware_buffer.h}.
+ * Passing {@link AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN} is equivalent to calling
+ * {@link AImageReader_new} with the same parameters.
+ *
+ * Note that not all format and usage flag combination is supported by the {@link AImageReader}.
+ * Below are the combinations supported by the {@link AImageReader}.
+ * <table>
+ * <tr>
+ * <th>Format</th>
+ * <th>Compatible usage flags</th>
+ * </tr>
+ * <tr>
+ * <td>non-{@link AIMAGE_FORMAT_PRIVATE PRIVATE} formats defined in {@link AImage.h}
+ * </td>
+ * <td>{@link AHARDWAREBUFFER_USAGE_CPU_READ_RARELY} or
+ * {@link AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN}</td>
+ * </tr>
+ * <tr>
+ * <td>{@link AIMAGE_FORMAT_RGBA_8888}</td>
+ * <td>{@link AHARDWAREBUFFER_USAGE_VIDEO_ENCODE} or
+ * {@link AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE}, or combined</td>
+ * </tr>
+ * </table>
+ *
+ * @see AImage
+ * @see AImageReader_new
+ * @see AHardwareBuffer
+ */
+media_status_t AImageReader_newWithUsage(
+ int32_t width, int32_t height, int32_t format, uint64_t usage, int32_t maxImages,
+ /*out*/ AImageReader** reader);
+
+/*
+ * Acquire the next {@link AImage} from the image reader's queue asynchronously.
+ *
+ * <p>AImageReader acquire method similar to {@link AImageReader_acquireNextImage} that takes an
+ * additional parameter for the sync fence. All other parameters and the return values are
+ * identical to those passed to {@link AImageReader_acquireNextImage}.</p>
+ *
+ * @param acquireFenceFd A sync fence fd defined in {@link sync.h}, which is used to signal when the
+ * buffer is ready to consume. When synchronization fence is not needed, fence will be set
+ * to -1 and the {@link AImage} returned is ready for use immediately. Otherwise, user shall
+ * use syscalls such as {@code poll()}, {@code epoll()}, {@code select()} to wait for the
+ * fence fd to change status before attempting to access the {@link AImage} returned.
+ *
+ * @see sync.h
+ * @see sync_get_fence_info
+ */
+media_status_t AImageReader_acquireNextImageAsync(
+ AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd);
+
+/*
+ * Acquire the latest {@link AImage} from the image reader's queue asynchronously, dropping older
+ * images.
+ *
+ * <p>AImageReader acquire method similar to {@link AImageReader_acquireLatestImage} that takes an
+ * additional parameter for the sync fence. All other parameters and the return values are
+ * identical to those passed to {@link AImageReader_acquireLatestImage}.</p>
+ *
+ * @param acquireFenceFd A sync fence fd defined in {@link sync.h}, which is used to signal when the
+ * buffer is ready to consume. When synchronization fence is not needed, fence will be set
+ * to -1 and the {@link AImage} returned is ready for use immediately. Otherwise, user shall
+ * use syscalls such as {@code poll()}, {@code epoll()}, {@code select()} to wait for the
+ * fence fd to change status before attempting to access the {@link AImage} returned.
+ *
+ * @see sync.h
+ * @see sync_get_fence_info
+ */
+media_status_t AImageReader_acquireLatestImageAsync(
+ AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd);
+/**
+ * The definition of {@link AImageReader} buffer removed callback.
+ *
+ * @param context The optional application context provided by user in
+ * {@link AImageReader_setBufferRemovedListener}.
+ * @param reader The {@link AImageReader} of interest.
+ * @param buffer The {@link AHardwareBuffer} that is being removed from this image reader.
+ */
+typedef void (*AImageReader_BufferRemovedCallback)(void* context,
+ AImageReader* reader,
+ AHardwareBuffer* buffer);
+
+typedef struct AImageReader_BufferRemovedListener {
+ /// optional application context.
+ void* context;
+
+ /**
+ * This callback is called when an old {@link AHardwareBuffer} is about to be removed from the
+ * image reader.
+ *
+ * <p>Note that registering this callback is optional unless the user holds on extra reference
+ * to {@link AHardwareBuffer} returned from {@link AImage_getHardwareBuffer} by calling {@link
+ * AHardwareBuffer_acquire} or creating external graphic objects, such as EglImage, from it.</p>
+ *
+ * <p>If the callback is registered, the {@link AImageReader} will hold on the last of its
+ * references to the {@link AHardwareBuffer} until this callback returns. User can use the
+ * callback to get notified that it becomes the last owner of the buffer. It is up to the user
+ * to decide to either 1) immediately release all of its references to the buffer; or 2) keep
+ * using the buffer and release it in future. Note that, if option 2 if used, user of this API
+ * is responsible to deallocate the buffer properly by calling {@link AHardwareBuffer_release}.
+ * </p>
+ *
+ * @see AHardwareBuffer_release
+ * @see AImage_getHardwareBuffer
+ */
+ AImageReader_BufferRemovedCallback onBufferRemoved;
+} AImageReader_BufferRemovedListener;
+
+/**
+ * Set the onBufferRemoved listener of this image reader.
+ *
+ * <p>Note that calling this method will replace previously registered listeners.</p>
+ *
+ * @param reader The image reader of interest.
+ * @param listener the {@link AImageReader_BufferRemovedListener} to be registered. Set this to
+ * NULL if application no longer needs to listen to buffer removed events.
+ *
+ * @return <ul>
+ * <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ * <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader is NULL.</li></ul>
+ *
+ * @see AImage_getHardwareBuffer
+ */
+media_status_t AImageReader_setBufferRemovedListener(
+ AImageReader* reader, AImageReader_BufferRemovedListener* listener);
+
+#endif /* __ANDROID_API__ >= 26 */
+
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index 436e477..7e7e81e 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -30,8 +30,6 @@
#include <stdint.h>
#include <sys/cdefs.h>
-#include <android/native_window.h>
-
#include "NdkMediaCrypto.h"
#include "NdkMediaError.h"
#include "NdkMediaFormat.h"
@@ -40,6 +38,8 @@
extern "C" {
#endif
+struct ANativeWindow;
+
#if __ANDROID_API__ >= 21
struct AMediaCodec;
@@ -208,6 +208,78 @@
media_status_t AMediaCodec_releaseOutputBufferAtTime(
AMediaCodec *mData, size_t idx, int64_t timestampNs);
+/**
+ * Creates a Surface that can be used as the input to encoder, in place of input buffers
+ *
+ * This can only be called after the codec has been configured via
+ * AMediaCodec_configure(..); and before AMediaCodec_start() has been called.
+ *
+ * The application is responsible for releasing the surface by calling
+ * ANativeWindow_release() when done.
+ *
+ * For more details, see the Java documentation for MediaCodec.createInputSurface.
+ */
+media_status_t AMediaCodec_createInputSurface(
+ AMediaCodec *mData, ANativeWindow **surface);
+
+/**
+ * Creates a persistent Surface that can be used as the input to encoder
+ *
+ * Persistent surface can be reused by MediaCodec instances and can be set
+ * on a new instance via AMediaCodec_setInputSurface().
+ * A persistent surface can be connected to at most one instance of MediaCodec
+ * at any point in time.
+ *
+ * The application is responsible for releasing the surface by calling
+ * ANativeWindow_release() when done.
+ *
+ * For more details, see the Java documentation for MediaCodec.createPersistentInputSurface.
+ */
+media_status_t AMediaCodec_createPersistentInputSurface(
+ ANativeWindow **surface);
+
+/**
+ * Set a persistent-surface that can be used as the input to encoder, in place of input buffers
+ *
+ * The surface provided *must* be a persistent surface created via
+ * AMediaCodec_createPersistentInputSurface()
+ * This can only be called after the codec has been configured by calling
+ * AMediaCodec_configure(..); and before AMediaCodec_start() has been called.
+ *
+ * For more details, see the Java documentation for MediaCodec.setInputSurface.
+ */
+media_status_t AMediaCodec_setInputSurface(
+ AMediaCodec *mData, ANativeWindow *surface);
+
+/**
+ * Signal additional parameters to the codec instance.
+ *
+ * Parameters can be communicated only when the codec is running, i.e
+ * after AMediaCodec_start() has been called.
+ *
+ * NOTE: Some of these parameter changes may silently fail to apply.
+ */
+media_status_t AMediaCodec_setParameters(
+ AMediaCodec *mData, const AMediaFormat* params);
+
+/**
+ * Signals end-of-stream on input. Equivalent to submitting an empty buffer with
+ * AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM set.
+ *
+ * Returns AMEDIA_ERROR_INVALID_OPERATION when used with an encoder not in executing state
+ * or not receiving input from a Surface created by AMediaCodec_createInputSurface or
+ * AMediaCodec_createPersistentInputSurface.
+ *
+ * Returns the previous codec error if one exists.
+ *
+ * Returns AMEDIA_OK when completed succesfully.
+ *
+ * For more details, see the Java documentation for MediaCodec.signalEndOfInputStream.
+ */
+media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData);
+
+
+
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 9dd6283..cba4380 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -159,8 +159,7 @@
* to obtain or release keys used to decrypt encrypted content.
* AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
* is delivered to the license server. The opaque key request byte array is
- * returned in KeyRequest.data. The recommended URL to deliver the key request to
- * is returned in KeyRequest.defaultUrl.
+ * returned in KeyRequest.data.
*
* After the app has received the key request response from the server,
* it should deliver to the response to the DRM engine plugin using the method
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index fb00b1d..1b51364 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -45,6 +45,7 @@
AMEDIA_ERROR_UNSUPPORTED = AMEDIA_ERROR_BASE - 2,
AMEDIA_ERROR_INVALID_OBJECT = AMEDIA_ERROR_BASE - 3,
AMEDIA_ERROR_INVALID_PARAMETER = AMEDIA_ERROR_BASE - 4,
+ AMEDIA_ERROR_INVALID_OPERATION = AMEDIA_ERROR_BASE - 5,
AMEDIA_DRM_ERROR_BASE = -20000,
AMEDIA_DRM_NOT_PROVISIONED = AMEDIA_DRM_ERROR_BASE - 1,
@@ -60,6 +61,9 @@
AMEDIA_IMGREADER_ERROR_BASE = -30000,
AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE = AMEDIA_IMGREADER_ERROR_BASE - 1,
AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED = AMEDIA_IMGREADER_ERROR_BASE - 2,
+ AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE = AMEDIA_IMGREADER_ERROR_BASE - 3,
+ AMEDIA_IMGREADER_CANNOT_UNLOCK_IMAGE = AMEDIA_IMGREADER_ERROR_BASE - 4,
+ AMEDIA_IMGREADER_IMAGE_NOT_LOCKED = AMEDIA_IMGREADER_ERROR_BASE - 5,
} media_status_t;
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 7db4d06..d7ad370 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -1,7 +1,9 @@
LIBMEDIANDK {
global:
AImageReader_acquireLatestImage; # introduced=24
+ AImageReader_acquireLatestImageAsync; # introduced=26
AImageReader_acquireNextImage; # introduced=24
+ AImageReader_acquireNextImageAsync; # introduced=26
AImageReader_delete; # introduced=24
AImageReader_getFormat; # introduced=24
AImageReader_getHeight; # introduced=24
@@ -9,10 +11,14 @@
AImageReader_getWidth; # introduced=24
AImageReader_getWindow; # introduced=24
AImageReader_new; # introduced=24
+ AImageReader_newWithUsage; # introduced=26
+ AImageReader_setBufferRemovedListener; # introduced=26
AImageReader_setImageListener; # introduced=24
AImage_delete; # introduced=24
+ AImage_deleteAsync; # introduced=26
AImage_getCropRect; # introduced=24
AImage_getFormat; # introduced=24
+ AImage_getHardwareBuffer; # introduced=26
AImage_getHeight; # introduced=24
AImage_getNumberOfPlanes; # introduced=24
AImage_getPlaneData; # introduced=24
@@ -69,6 +75,11 @@
AMediaCodec_releaseOutputBuffer;
AMediaCodec_releaseOutputBufferAtTime;
AMediaCodec_setOutputSurface; # introduced=24
+ AMediaCodec_setParameters; # introduced=26
+ AMediaCodec_setInputSurface; # introduced=26
+ AMediaCodec_createInputSurface; # introduced=26
+ AMediaCodec_signalEndOfInputStream; # introduced=26
+ AMediaCodec_createPersistentInputSurface; # introduced=26
AMediaCodec_start;
AMediaCodec_stop;
AMediaCrypto_delete;
diff --git a/media/utils/BatteryNotifier.cpp b/media/utils/BatteryNotifier.cpp
index 341d391..09bc042 100644
--- a/media/utils/BatteryNotifier.cpp
+++ b/media/utils/BatteryNotifier.cpp
@@ -29,7 +29,7 @@
BatteryNotifier::getInstance().onBatteryStatServiceDied();
}
-BatteryNotifier::BatteryNotifier() : mVideoRefCount(0), mAudioRefCount(0) {}
+BatteryNotifier::BatteryNotifier() {}
BatteryNotifier::~BatteryNotifier() {
Mutex::Autolock _l(mLock);
@@ -38,77 +38,83 @@
}
}
-void BatteryNotifier::noteStartVideo() {
+void BatteryNotifier::noteStartVideo(uid_t uid) {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- if (mVideoRefCount == 0 && batteryService != nullptr) {
- batteryService->noteStartVideo(AID_MEDIA);
+ if (mVideoRefCounts[uid] == 0 && batteryService != nullptr) {
+ batteryService->noteStartVideo(uid);
}
- mVideoRefCount++;
+ mVideoRefCounts[uid]++;
}
-void BatteryNotifier::noteStopVideo() {
+void BatteryNotifier::noteStopVideo(uid_t uid) {
Mutex::Autolock _l(mLock);
- if (mVideoRefCount == 0) {
- ALOGW("%s: video refcount is broken.", __FUNCTION__);
+ if (mVideoRefCounts.find(uid) == mVideoRefCounts.end()) {
+ ALOGW("%s: video refcount is broken for uid(%d).", __FUNCTION__, (int)uid);
return;
}
sp<IBatteryStats> batteryService = getBatteryService_l();
- mVideoRefCount--;
- if (mVideoRefCount == 0 && batteryService != nullptr) {
- batteryService->noteStopVideo(AID_MEDIA);
+ mVideoRefCounts[uid]--;
+ if (mVideoRefCounts[uid] == 0) {
+ if (batteryService != nullptr) {
+ batteryService->noteStopVideo(uid);
+ }
+ mVideoRefCounts.erase(uid);
}
}
void BatteryNotifier::noteResetVideo() {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- mVideoRefCount = 0;
+ mVideoRefCounts.clear();
if (batteryService != nullptr) {
batteryService->noteResetVideo();
}
}
-void BatteryNotifier::noteStartAudio() {
+void BatteryNotifier::noteStartAudio(uid_t uid) {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- if (mAudioRefCount == 0 && batteryService != nullptr) {
- batteryService->noteStartAudio(AID_AUDIOSERVER);
+ if (mAudioRefCounts[uid] == 0 && batteryService != nullptr) {
+ batteryService->noteStartAudio(uid);
}
- mAudioRefCount++;
+ mAudioRefCounts[uid]++;
}
-void BatteryNotifier::noteStopAudio() {
+void BatteryNotifier::noteStopAudio(uid_t uid) {
Mutex::Autolock _l(mLock);
- if (mAudioRefCount == 0) {
- ALOGW("%s: audio refcount is broken.", __FUNCTION__);
+ if (mAudioRefCounts.find(uid) == mAudioRefCounts.end()) {
+ ALOGW("%s: audio refcount is broken for uid(%d).", __FUNCTION__, (int)uid);
return;
}
sp<IBatteryStats> batteryService = getBatteryService_l();
- mAudioRefCount--;
- if (mAudioRefCount == 0 && batteryService != nullptr) {
- batteryService->noteStopAudio(AID_AUDIOSERVER);
+ mAudioRefCounts[uid]--;
+ if (mAudioRefCounts[uid] == 0) {
+ if (batteryService != nullptr) {
+ batteryService->noteStopAudio(uid);
+ }
+ mAudioRefCounts.erase(uid);
}
}
void BatteryNotifier::noteResetAudio() {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- mAudioRefCount = 0;
+ mAudioRefCounts.clear();
if (batteryService != nullptr) {
batteryService->noteResetAudio();
}
}
-void BatteryNotifier::noteFlashlightOn(const String8& id, int uid) {
+void BatteryNotifier::noteFlashlightOn(const String8& id, uid_t uid) {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- std::pair<String8, int> k = std::make_pair(id, uid);
+ std::pair<String8, uid_t> k = std::make_pair(id, uid);
if (!mFlashlightState[k]) {
mFlashlightState[k] = true;
if (batteryService != nullptr) {
@@ -117,11 +123,11 @@
}
}
-void BatteryNotifier::noteFlashlightOff(const String8& id, int uid) {
+void BatteryNotifier::noteFlashlightOff(const String8& id, uid_t uid) {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- std::pair<String8, int> k = std::make_pair(id, uid);
+ std::pair<String8, uid_t> k = std::make_pair(id, uid);
if (mFlashlightState[k]) {
mFlashlightState[k] = false;
if (batteryService != nullptr) {
@@ -139,10 +145,10 @@
}
}
-void BatteryNotifier::noteStartCamera(const String8& id, int uid) {
+void BatteryNotifier::noteStartCamera(const String8& id, uid_t uid) {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- std::pair<String8, int> k = std::make_pair(id, uid);
+ std::pair<String8, uid_t> k = std::make_pair(id, uid);
if (!mCameraState[k]) {
mCameraState[k] = true;
if (batteryService != nullptr) {
@@ -151,10 +157,10 @@
}
}
-void BatteryNotifier::noteStopCamera(const String8& id, int uid) {
+void BatteryNotifier::noteStopCamera(const String8& id, uid_t uid) {
Mutex::Autolock _l(mLock);
sp<IBatteryStats> batteryService = getBatteryService_l();
- std::pair<String8, int> k = std::make_pair(id, uid);
+ std::pair<String8, uid_t> k = std::make_pair(id, uid);
if (mCameraState[k]) {
mCameraState[k] = false;
if (batteryService != nullptr) {
@@ -176,7 +182,7 @@
Mutex::Autolock _l(mLock);
mBatteryStatService.clear();
mDeathNotifier.clear();
- // Do not reset mVideoRefCount and mAudioRefCount here. The ref
+ // Do not reset mVideoRefCounts and mAudioRefCounts here. The ref
// counting is independent of the battery service availability.
// We need this if battery service becomes available after media
// started.
@@ -205,11 +211,13 @@
// Notify start now if mediaserver or audioserver is already started.
// 1) mediaserver and audioserver is started before batterystats service
// 2) batterystats server may have crashed.
- if (mVideoRefCount > 0) {
- mBatteryStatService->noteStartVideo(AID_MEDIA);
+ std::map<uid_t, int>::iterator it = mVideoRefCounts.begin();
+ for (; it != mVideoRefCounts.end(); ++it) {
+ mBatteryStatService->noteStartVideo(it->first);
}
- if (mAudioRefCount > 0) {
- mBatteryStatService->noteStartAudio(AID_AUDIOSERVER);
+ it = mAudioRefCounts.begin();
+ for (; it != mAudioRefCounts.end(); ++it) {
+ mBatteryStatService->noteStartAudio(it->first);
}
// TODO: Notify for camera and flashlight state as well?
}
diff --git a/media/utils/ISchedulingPolicyService.cpp b/media/utils/ISchedulingPolicyService.cpp
index f5bfe20..22fbc97 100644
--- a/media/utils/ISchedulingPolicyService.cpp
+++ b/media/utils/ISchedulingPolicyService.cpp
@@ -37,13 +37,15 @@
{
}
- virtual int requestPriority(int32_t pid, int32_t tid, int32_t prio, bool asynchronous)
+ virtual int requestPriority(int32_t pid, int32_t tid,
+ int32_t prio, bool isForApp, bool asynchronous)
{
Parcel data, reply;
data.writeInterfaceToken(ISchedulingPolicyService::getInterfaceDescriptor());
data.writeInt32(pid);
data.writeInt32(tid);
data.writeInt32(prio);
+ data.writeBool(isForApp);
uint32_t flags = asynchronous ? IBinder::FLAG_ONEWAY : 0;
status_t status = remote()->transact(REQUEST_PRIORITY_TRANSACTION, data, &reply, flags);
if (status != NO_ERROR) {
diff --git a/media/utils/ISchedulingPolicyService.h b/media/utils/ISchedulingPolicyService.h
index b94b191..1015677 100644
--- a/media/utils/ISchedulingPolicyService.h
+++ b/media/utils/ISchedulingPolicyService.h
@@ -27,7 +27,7 @@
DECLARE_META_INTERFACE(SchedulingPolicyService);
virtual int requestPriority(/*pid_t*/int32_t pid, /*pid_t*/int32_t tid,
- int32_t prio, bool asynchronous) = 0;
+ int32_t prio, bool isForApp, bool asynchronous) = 0;
};
diff --git a/media/utils/SchedulingPolicyService.cpp b/media/utils/SchedulingPolicyService.cpp
index 17ee9bc..d7055ef 100644
--- a/media/utils/SchedulingPolicyService.cpp
+++ b/media/utils/SchedulingPolicyService.cpp
@@ -28,7 +28,7 @@
static const String16 _scheduling_policy("scheduling_policy");
static Mutex sMutex;
-int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool asynchronous)
+int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool isForApp, bool asynchronous)
{
// FIXME merge duplicated code related to service lookup, caching, and error recovery
int ret;
@@ -47,7 +47,7 @@
sSchedulingPolicyService = sps;
sMutex.unlock();
}
- ret = sps->requestPriority(pid, tid, prio, asynchronous);
+ ret = sps->requestPriority(pid, tid, prio, isForApp, asynchronous);
if (ret != DEAD_OBJECT) {
break;
}
diff --git a/media/utils/include/mediautils/BatteryNotifier.h b/media/utils/include/mediautils/BatteryNotifier.h
index 49048042..a4e42ad 100644
--- a/media/utils/include/mediautils/BatteryNotifier.h
+++ b/media/utils/include/mediautils/BatteryNotifier.h
@@ -37,17 +37,17 @@
public:
~BatteryNotifier();
- void noteStartVideo();
- void noteStopVideo();
+ void noteStartVideo(uid_t uid);
+ void noteStopVideo(uid_t uid);
void noteResetVideo();
- void noteStartAudio();
- void noteStopAudio();
+ void noteStartAudio(uid_t uid);
+ void noteStopAudio(uid_t uid);
void noteResetAudio();
- void noteFlashlightOn(const String8& id, int uid);
- void noteFlashlightOff(const String8& id, int uid);
+ void noteFlashlightOn(const String8& id, uid_t uid);
+ void noteFlashlightOff(const String8& id, uid_t uid);
void noteResetFlashlight();
- void noteStartCamera(const String8& id, int uid);
- void noteStopCamera(const String8& id, int uid);
+ void noteStartCamera(const String8& id, uid_t uid);
+ void noteStopCamera(const String8& id, uid_t uid);
void noteResetCamera();
private:
@@ -58,10 +58,10 @@
};
Mutex mLock;
- int mVideoRefCount;
- int mAudioRefCount;
- std::map<std::pair<String8, int>, bool> mFlashlightState;
- std::map<std::pair<String8, int>, bool> mCameraState;
+ std::map<uid_t, int> mVideoRefCounts;
+ std::map<uid_t, int> mAudioRefCounts;
+ std::map<std::pair<String8, uid_t>, bool> mFlashlightState;
+ std::map<std::pair<String8, uid_t>, bool> mCameraState;
sp<IBatteryStats> mBatteryStatService;
sp<DeathNotifier> mDeathNotifier;
diff --git a/media/utils/include/mediautils/SchedulingPolicyService.h b/media/utils/include/mediautils/SchedulingPolicyService.h
index a9870d4..47d8734 100644
--- a/media/utils/include/mediautils/SchedulingPolicyService.h
+++ b/media/utils/include/mediautils/SchedulingPolicyService.h
@@ -24,7 +24,7 @@
// The asynchronous parameter should be 'true' to return immediately,
// after the request is enqueued but not necessarily executed.
// The default value 'false' means to return after request has been enqueued and executed.
-int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool asynchronous = false);
+int requestPriority(pid_t pid, pid_t tid, int32_t prio, bool isForApp, bool asynchronous = false);
} // namespace android
diff --git a/media/vndk/Android.bp b/media/vndk/Android.bp
new file mode 100644
index 0000000..a233d6c
--- /dev/null
+++ b/media/vndk/Android.bp
@@ -0,0 +1,4 @@
+subdirs = [
+ "*",
+]
+
diff --git a/media/vndk/xmlparser/1.0/Android.bp b/media/vndk/xmlparser/1.0/Android.bp
new file mode 100644
index 0000000..c48703c
--- /dev/null
+++ b/media/vndk/xmlparser/1.0/Android.bp
@@ -0,0 +1,37 @@
+cc_library_shared {
+
+ name: "libstagefright_xmlparser@1.0",
+
+ srcs: [
+ "MediaCodecsXmlParser.cpp",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright",
+ "frameworks/av/include",
+ ],
+
+ shared_libs: [
+ "libexpat",
+ "libutils",
+ "liblog",
+ "libcutils",
+ "libstagefright_foundation",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ clang: true,
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ },
+
+}
+
diff --git a/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.cpp b/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.cpp
new file mode 100644
index 0000000..84e5514
--- /dev/null
+++ b/media/vndk/xmlparser/1.0/MediaCodecsXmlParser.cpp
@@ -0,0 +1,862 @@
+/*
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecsXmlParser"
+#include <utils/Log.h>
+
+#include <media/vndk/xmlparser/1.0/MediaCodecsXmlParser.h>
+
+#include <media/MediaCodecInfo.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include <sys/stat.h>
+
+#include <expat.h>
+#include <string>
+
+#define MEDIA_CODECS_CONFIG_FILE_PATH_MAX_LENGTH 256
+
+namespace android {
+
+namespace { // Local variables and functions
+
+const char *kProfilingResults =
+ "/data/misc/media/media_codecs_profiling_results.xml";
+
+// Treblized media codec list will be located in /odm/etc or /vendor/etc.
+const char *kConfigLocationList[] =
+ {"/odm/etc", "/vendor/etc", "/etc"};
+constexpr int kConfigLocationListSize =
+ (sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
+
+bool findMediaCodecListFileFullPath(
+ const char *file_name, std::string *out_path) {
+ for (int i = 0; i < kConfigLocationListSize; i++) {
+ *out_path = std::string(kConfigLocationList[i]) + "/" + file_name;
+ struct stat file_stat;
+ if (stat(out_path->c_str(), &file_stat) == 0 &&
+ S_ISREG(file_stat.st_mode)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// Find TypeInfo by name.
+std::vector<TypeInfo>::iterator findTypeInfo(
+ CodecInfo &codecInfo, const AString &typeName) {
+ return std::find_if(
+ codecInfo.mTypes.begin(), codecInfo.mTypes.end(),
+ [typeName](const auto &typeInfo) {
+ return typeInfo.mName == typeName;
+ });
+}
+
+// Convert a string into a boolean value.
+bool ParseBoolean(const char *s) {
+ if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
+ return true;
+ }
+ char *end;
+ unsigned long res = strtoul(s, &end, 10);
+ return *s != '\0' && *end == '\0' && res > 0;
+}
+
+} // unnamed namespace
+
+MediaCodecsXmlParser::MediaCodecsXmlParser() :
+ mInitCheck(NO_INIT),
+ mUpdate(false) {
+ std::string config_file_path;
+ if (findMediaCodecListFileFullPath(
+ "media_codecs.xml", &config_file_path)) {
+ parseTopLevelXMLFile(config_file_path.c_str(), false);
+ } else {
+ mInitCheck = NAME_NOT_FOUND;
+ }
+ if (findMediaCodecListFileFullPath(
+ "media_codecs_performance.xml", &config_file_path)) {
+ parseTopLevelXMLFile(config_file_path.c_str(), true);
+ }
+ parseTopLevelXMLFile(kProfilingResults, true);
+}
+
+void MediaCodecsXmlParser::parseTopLevelXMLFile(
+ const char *codecs_xml, bool ignore_errors) {
+ // get href_base
+ const char *href_base_end = strrchr(codecs_xml, '/');
+ if (href_base_end != NULL) {
+ mHrefBase = AString(codecs_xml, href_base_end - codecs_xml + 1);
+ }
+
+ mInitCheck = OK; // keeping this here for safety
+ mCurrentSection = SECTION_TOPLEVEL;
+ mDepth = 0;
+
+ parseXMLFile(codecs_xml);
+
+ if (mInitCheck != OK) {
+ if (ignore_errors) {
+ mInitCheck = OK;
+ return;
+ }
+ mCodecInfos.clear();
+ return;
+ }
+}
+
+MediaCodecsXmlParser::~MediaCodecsXmlParser() {
+}
+
+status_t MediaCodecsXmlParser::initCheck() const {
+ return mInitCheck;
+}
+
+void MediaCodecsXmlParser::parseXMLFile(const char *path) {
+ FILE *file = fopen(path, "r");
+
+ if (file == NULL) {
+ ALOGW("unable to open media codecs configuration xml file: %s", path);
+ mInitCheck = NAME_NOT_FOUND;
+ return;
+ }
+
+ ALOGV("Start parsing %s", path);
+ XML_Parser parser = ::XML_ParserCreate(NULL);
+ CHECK(parser != NULL);
+
+ ::XML_SetUserData(parser, this);
+ ::XML_SetElementHandler(
+ parser, StartElementHandlerWrapper, EndElementHandlerWrapper);
+
+ const int BUFF_SIZE = 512;
+ while (mInitCheck == OK) {
+ void *buff = ::XML_GetBuffer(parser, BUFF_SIZE);
+ if (buff == NULL) {
+ ALOGE("failed in call to XML_GetBuffer()");
+ mInitCheck = UNKNOWN_ERROR;
+ break;
+ }
+
+ int bytes_read = ::fread(buff, 1, BUFF_SIZE, file);
+ if (bytes_read < 0) {
+ ALOGE("failed in call to read");
+ mInitCheck = ERROR_IO;
+ break;
+ }
+
+ XML_Status status = ::XML_ParseBuffer(parser, bytes_read, bytes_read == 0);
+ if (status != XML_STATUS_OK) {
+ ALOGE("malformed (%s)", ::XML_ErrorString(::XML_GetErrorCode(parser)));
+ mInitCheck = ERROR_MALFORMED;
+ break;
+ }
+
+ if (bytes_read == 0) {
+ break;
+ }
+ }
+
+ ::XML_ParserFree(parser);
+
+ fclose(file);
+ file = NULL;
+}
+
+// static
+void MediaCodecsXmlParser::StartElementHandlerWrapper(
+ void *me, const char *name, const char **attrs) {
+ static_cast<MediaCodecsXmlParser *>(me)->startElementHandler(name, attrs);
+}
+
+// static
+void MediaCodecsXmlParser::EndElementHandlerWrapper(void *me, const char *name) {
+ static_cast<MediaCodecsXmlParser *>(me)->endElementHandler(name);
+}
+
+status_t MediaCodecsXmlParser::includeXMLFile(const char **attrs) {
+ const char *href = NULL;
+ size_t i = 0;
+ while (attrs[i] != NULL) {
+ if (!strcmp(attrs[i], "href")) {
+ if (attrs[i + 1] == NULL) {
+ return -EINVAL;
+ }
+ href = attrs[i + 1];
+ ++i;
+ } else {
+ ALOGE("includeXMLFile: unrecognized attribute: %s", attrs[i]);
+ return -EINVAL;
+ }
+ ++i;
+ }
+
+ // For security reasons and for simplicity, file names can only contain
+ // [a-zA-Z0-9_.] and must start with media_codecs_ and end with .xml
+ for (i = 0; href[i] != '\0'; i++) {
+ if (href[i] == '.' || href[i] == '_' ||
+ (href[i] >= '0' && href[i] <= '9') ||
+ (href[i] >= 'A' && href[i] <= 'Z') ||
+ (href[i] >= 'a' && href[i] <= 'z')) {
+ continue;
+ }
+ ALOGE("invalid include file name: %s", href);
+ return -EINVAL;
+ }
+
+ AString filename = href;
+ if (!filename.startsWith("media_codecs_") ||
+ !filename.endsWith(".xml")) {
+ ALOGE("invalid include file name: %s", href);
+ return -EINVAL;
+ }
+ filename.insert(mHrefBase, 0);
+
+ parseXMLFile(filename.c_str());
+ return mInitCheck;
+}
+
+void MediaCodecsXmlParser::startElementHandler(
+ const char *name, const char **attrs) {
+ if (mInitCheck != OK) {
+ return;
+ }
+
+ bool inType = true;
+
+ if (!strcmp(name, "Include")) {
+ mInitCheck = includeXMLFile(attrs);
+ if (mInitCheck == OK) {
+ mPastSections.push(mCurrentSection);
+ mCurrentSection = SECTION_INCLUDE;
+ }
+ ++mDepth;
+ return;
+ }
+
+ switch (mCurrentSection) {
+ case SECTION_TOPLEVEL:
+ {
+ if (!strcmp(name, "Decoders")) {
+ mCurrentSection = SECTION_DECODERS;
+ } else if (!strcmp(name, "Encoders")) {
+ mCurrentSection = SECTION_ENCODERS;
+ } else if (!strcmp(name, "Settings")) {
+ mCurrentSection = SECTION_SETTINGS;
+ }
+ break;
+ }
+
+ case SECTION_SETTINGS:
+ {
+ if (!strcmp(name, "Setting")) {
+ mInitCheck = addSettingFromAttributes(attrs);
+ }
+ break;
+ }
+
+ case SECTION_DECODERS:
+ {
+ if (!strcmp(name, "MediaCodec")) {
+ mInitCheck =
+ addMediaCodecFromAttributes(false /* encoder */, attrs);
+
+ mCurrentSection = SECTION_DECODER;
+ }
+ break;
+ }
+
+ case SECTION_ENCODERS:
+ {
+ if (!strcmp(name, "MediaCodec")) {
+ mInitCheck =
+ addMediaCodecFromAttributes(true /* encoder */, attrs);
+
+ mCurrentSection = SECTION_ENCODER;
+ }
+ break;
+ }
+
+ case SECTION_DECODER:
+ case SECTION_ENCODER:
+ {
+ if (!strcmp(name, "Quirk")) {
+ mInitCheck = addQuirk(attrs);
+ } else if (!strcmp(name, "Type")) {
+ mInitCheck = addTypeFromAttributes(attrs, (mCurrentSection == SECTION_ENCODER));
+ mCurrentSection =
+ (mCurrentSection == SECTION_DECODER
+ ? SECTION_DECODER_TYPE : SECTION_ENCODER_TYPE);
+ }
+ }
+ inType = false;
+ // fall through
+
+ case SECTION_DECODER_TYPE:
+ case SECTION_ENCODER_TYPE:
+ {
+ // ignore limits and features specified outside of type
+ bool outside = !inType && mCurrentType == mCodecInfos[mCurrentName].mTypes.end();
+ if (outside && (!strcmp(name, "Limit") || !strcmp(name, "Feature"))) {
+ ALOGW("ignoring %s specified outside of a Type", name);
+ } else if (!strcmp(name, "Limit")) {
+ mInitCheck = addLimit(attrs);
+ } else if (!strcmp(name, "Feature")) {
+ mInitCheck = addFeature(attrs);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ ++mDepth;
+}
+
+void MediaCodecsXmlParser::endElementHandler(const char *name) {
+ if (mInitCheck != OK) {
+ return;
+ }
+
+ switch (mCurrentSection) {
+ case SECTION_SETTINGS:
+ {
+ if (!strcmp(name, "Settings")) {
+ mCurrentSection = SECTION_TOPLEVEL;
+ }
+ break;
+ }
+
+ case SECTION_DECODERS:
+ {
+ if (!strcmp(name, "Decoders")) {
+ mCurrentSection = SECTION_TOPLEVEL;
+ }
+ break;
+ }
+
+ case SECTION_ENCODERS:
+ {
+ if (!strcmp(name, "Encoders")) {
+ mCurrentSection = SECTION_TOPLEVEL;
+ }
+ break;
+ }
+
+ case SECTION_DECODER_TYPE:
+ case SECTION_ENCODER_TYPE:
+ {
+ if (!strcmp(name, "Type")) {
+ mCurrentSection =
+ (mCurrentSection == SECTION_DECODER_TYPE
+ ? SECTION_DECODER : SECTION_ENCODER);
+
+ mCurrentType = mCodecInfos[mCurrentName].mTypes.end();
+ }
+ break;
+ }
+
+ case SECTION_DECODER:
+ {
+ if (!strcmp(name, "MediaCodec")) {
+ mCurrentSection = SECTION_DECODERS;
+ mCurrentName.clear();
+ }
+ break;
+ }
+
+ case SECTION_ENCODER:
+ {
+ if (!strcmp(name, "MediaCodec")) {
+ mCurrentSection = SECTION_ENCODERS;
+ mCurrentName.clear();
+ }
+ break;
+ }
+
+ case SECTION_INCLUDE:
+ {
+ if (!strcmp(name, "Include") && mPastSections.size() > 0) {
+ mCurrentSection = mPastSections.top();
+ mPastSections.pop();
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ --mDepth;
+}
+
+status_t MediaCodecsXmlParser::addSettingFromAttributes(const char **attrs) {
+ const char *name = NULL;
+ const char *value = NULL;
+ const char *update = NULL;
+
+ size_t i = 0;
+ while (attrs[i] != NULL) {
+ if (!strcmp(attrs[i], "name")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addSettingFromAttributes: name is null");
+ return -EINVAL;
+ }
+ name = attrs[i + 1];
+ ++i;
+ } else if (!strcmp(attrs[i], "value")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addSettingFromAttributes: value is null");
+ return -EINVAL;
+ }
+ value = attrs[i + 1];
+ ++i;
+ } else if (!strcmp(attrs[i], "update")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addSettingFromAttributes: update is null");
+ return -EINVAL;
+ }
+ update = attrs[i + 1];
+ ++i;
+ } else {
+ ALOGE("addSettingFromAttributes: unrecognized attribute: %s", attrs[i]);
+ return -EINVAL;
+ }
+
+ ++i;
+ }
+
+ if (name == NULL || value == NULL) {
+ ALOGE("addSettingFromAttributes: name or value unspecified");
+ return -EINVAL;
+ }
+
+ mUpdate = (update != NULL) && ParseBoolean(update);
+ if (mUpdate != (mGlobalSettings.count(name) > 0)) {
+ ALOGE("addSettingFromAttributes: updating non-existing setting");
+ return -EINVAL;
+ }
+ mGlobalSettings[name] = value;
+
+ return OK;
+}
+
+status_t MediaCodecsXmlParser::addMediaCodecFromAttributes(
+ bool encoder, const char **attrs) {
+ const char *name = NULL;
+ const char *type = NULL;
+ const char *update = NULL;
+
+ size_t i = 0;
+ while (attrs[i] != NULL) {
+ if (!strcmp(attrs[i], "name")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addMediaCodecFromAttributes: name is null");
+ return -EINVAL;
+ }
+ name = attrs[i + 1];
+ ++i;
+ } else if (!strcmp(attrs[i], "type")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addMediaCodecFromAttributes: type is null");
+ return -EINVAL;
+ }
+ type = attrs[i + 1];
+ ++i;
+ } else if (!strcmp(attrs[i], "update")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addMediaCodecFromAttributes: update is null");
+ return -EINVAL;
+ }
+ update = attrs[i + 1];
+ ++i;
+ } else {
+ ALOGE("addMediaCodecFromAttributes: unrecognized attribute: %s", attrs[i]);
+ return -EINVAL;
+ }
+
+ ++i;
+ }
+
+ if (name == NULL) {
+ ALOGE("addMediaCodecFromAttributes: name not found");
+ return -EINVAL;
+ }
+
+ mUpdate = (update != NULL) && ParseBoolean(update);
+ if (mUpdate != (mCodecInfos.count(name) > 0)) {
+ ALOGE("addMediaCodecFromAttributes: updating non-existing codec or vice versa");
+ return -EINVAL;
+ }
+
+ CodecInfo *info = &mCodecInfos[name];
+ if (mUpdate) {
+ // existing codec
+ mCurrentName = name;
+ mCurrentType = info->mTypes.begin();
+ if (type != NULL) {
+ // existing type
+ mCurrentType = findTypeInfo(*info, type);
+ if (mCurrentType == info->mTypes.end()) {
+ ALOGE("addMediaCodecFromAttributes: updating non-existing type");
+ return -EINVAL;
+ }
+ }
+ } else {
+ // new codec
+ mCurrentName = name;
+ mQuirks[name].clear();
+ info->mTypes.clear();
+ info->mTypes.emplace_back();
+ mCurrentType = --info->mTypes.end();
+ mCurrentType->mName = type;
+ info->mIsEncoder = encoder;
+ }
+
+ return OK;
+}
+
+status_t MediaCodecsXmlParser::addQuirk(const char **attrs) {
+ const char *name = NULL;
+
+ size_t i = 0;
+ while (attrs[i] != NULL) {
+ if (!strcmp(attrs[i], "name")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addQuirk: name is null");
+ return -EINVAL;
+ }
+ name = attrs[i + 1];
+ ++i;
+ } else {
+ ALOGE("addQuirk: unrecognized attribute: %s", attrs[i]);
+ return -EINVAL;
+ }
+
+ ++i;
+ }
+
+ if (name == NULL) {
+ ALOGE("addQuirk: name not found");
+ return -EINVAL;
+ }
+
+ mQuirks[mCurrentName].emplace_back(name);
+ return OK;
+}
+
+status_t MediaCodecsXmlParser::addTypeFromAttributes(const char **attrs, bool encoder) {
+ const char *name = NULL;
+ const char *update = NULL;
+
+ size_t i = 0;
+ while (attrs[i] != NULL) {
+ if (!strcmp(attrs[i], "name")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addTypeFromAttributes: name is null");
+ return -EINVAL;
+ }
+ name = attrs[i + 1];
+ ++i;
+ } else if (!strcmp(attrs[i], "update")) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addTypeFromAttributes: update is null");
+ return -EINVAL;
+ }
+ update = attrs[i + 1];
+ ++i;
+ } else {
+ ALOGE("addTypeFromAttributes: unrecognized attribute: %s", attrs[i]);
+ return -EINVAL;
+ }
+
+ ++i;
+ }
+
+ if (name == NULL) {
+ return -EINVAL;
+ }
+
+ CodecInfo *info = &mCodecInfos[mCurrentName];
+ info->mIsEncoder = encoder;
+ mCurrentType = findTypeInfo(*info, name);
+ if (!mUpdate) {
+ if (mCurrentType != info->mTypes.end()) {
+ ALOGE("addTypeFromAttributes: re-defining existing type without update");
+ return -EINVAL;
+ }
+ info->mTypes.emplace_back();
+ mCurrentType = --info->mTypes.end();
+ } else if (mCurrentType == info->mTypes.end()) {
+ ALOGE("addTypeFromAttributes: updating non-existing type");
+ return -EINVAL;
+ }
+
+ return OK;
+}
+
+static status_t limitFoundMissingAttr(const AString &name, const char *attr, bool found = true) {
+ ALOGE("limit '%s' with %s'%s' attribute", name.c_str(),
+ (found ? "" : "no "), attr);
+ return -EINVAL;
+}
+
+static status_t limitError(const AString &name, const char *msg) {
+ ALOGE("limit '%s' %s", name.c_str(), msg);
+ return -EINVAL;
+}
+
+static status_t limitInvalidAttr(const AString &name, const char *attr, const AString &value) {
+ ALOGE("limit '%s' with invalid '%s' attribute (%s)", name.c_str(),
+ attr, value.c_str());
+ return -EINVAL;
+}
+
+status_t MediaCodecsXmlParser::addLimit(const char **attrs) {
+ sp<AMessage> msg = new AMessage();
+
+ size_t i = 0;
+ while (attrs[i] != NULL) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addLimit: limit is not given");
+ return -EINVAL;
+ }
+
+ // attributes with values
+ if (!strcmp(attrs[i], "name")
+ || !strcmp(attrs[i], "default")
+ || !strcmp(attrs[i], "in")
+ || !strcmp(attrs[i], "max")
+ || !strcmp(attrs[i], "min")
+ || !strcmp(attrs[i], "range")
+ || !strcmp(attrs[i], "ranges")
+ || !strcmp(attrs[i], "scale")
+ || !strcmp(attrs[i], "value")) {
+ msg->setString(attrs[i], attrs[i + 1]);
+ ++i;
+ } else {
+ ALOGE("addLimit: unrecognized limit: %s", attrs[i]);
+ return -EINVAL;
+ }
+ ++i;
+ }
+
+ AString name;
+ if (!msg->findString("name", &name)) {
+ ALOGE("limit with no 'name' attribute");
+ return -EINVAL;
+ }
+
+ // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio,
+ // measured-frame-rate, measured-blocks-per-second: range
+ // quality: range + default + [scale]
+ // complexity: range + default
+ bool found;
+ if (mCurrentType == mCodecInfos[mCurrentName].mTypes.end()) {
+ ALOGW("ignoring null type");
+ return OK;
+ }
+
+ if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
+ || name == "blocks-per-second" || name == "complexity"
+ || name == "frame-rate" || name == "quality" || name == "size"
+ || name == "measured-blocks-per-second" || name.startsWith("measured-frame-rate-")) {
+ AString min, max;
+ if (msg->findString("min", &min) && msg->findString("max", &max)) {
+ min.append("-");
+ min.append(max);
+ if (msg->contains("range") || msg->contains("value")) {
+ return limitError(name, "has 'min' and 'max' as well as 'range' or "
+ "'value' attributes");
+ }
+ msg->setString("range", min);
+ } else if (msg->contains("min") || msg->contains("max")) {
+ return limitError(name, "has only 'min' or 'max' attribute");
+ } else if (msg->findString("value", &max)) {
+ min = max;
+ min.append("-");
+ min.append(max);
+ if (msg->contains("range")) {
+ return limitError(name, "has both 'range' and 'value' attributes");
+ }
+ msg->setString("range", min);
+ }
+
+ AString range, scale = "linear", def, in_;
+ if (!msg->findString("range", &range)) {
+ return limitError(name, "with no 'range', 'value' or 'min'/'max' attributes");
+ }
+
+ if ((name == "quality" || name == "complexity") ^
+ (found = msg->findString("default", &def))) {
+ return limitFoundMissingAttr(name, "default", found);
+ }
+ if (name != "quality" && msg->findString("scale", &scale)) {
+ return limitFoundMissingAttr(name, "scale");
+ }
+ if ((name == "aspect-ratio") ^ (found = msg->findString("in", &in_))) {
+ return limitFoundMissingAttr(name, "in", found);
+ }
+
+ if (name == "aspect-ratio") {
+ if (!(in_ == "pixels") && !(in_ == "blocks")) {
+ return limitInvalidAttr(name, "in", in_);
+ }
+ in_.erase(5, 1); // (pixel|block)-aspect-ratio
+ in_.append("-");
+ in_.append(name);
+ name = in_;
+ }
+ if (name == "quality") {
+ mCurrentType->mDetails["quality-scale"] = scale;
+ }
+ if (name == "quality" || name == "complexity") {
+ AString tag = name;
+ tag.append("-default");
+ mCurrentType->mDetails[tag] = def;
+ }
+ AString tag = name;
+ tag.append("-range");
+ mCurrentType->mDetails[tag] = range;
+ } else {
+ AString max, value, ranges;
+ if (msg->contains("default")) {
+ return limitFoundMissingAttr(name, "default");
+ } else if (msg->contains("in")) {
+ return limitFoundMissingAttr(name, "in");
+ } else if ((name == "channel-count" || name == "concurrent-instances") ^
+ (found = msg->findString("max", &max))) {
+ return limitFoundMissingAttr(name, "max", found);
+ } else if (msg->contains("min")) {
+ return limitFoundMissingAttr(name, "min");
+ } else if (msg->contains("range")) {
+ return limitFoundMissingAttr(name, "range");
+ } else if ((name == "sample-rate") ^
+ (found = msg->findString("ranges", &ranges))) {
+ return limitFoundMissingAttr(name, "ranges", found);
+ } else if (msg->contains("scale")) {
+ return limitFoundMissingAttr(name, "scale");
+ } else if ((name == "alignment" || name == "block-size") ^
+ (found = msg->findString("value", &value))) {
+ return limitFoundMissingAttr(name, "value", found);
+ }
+
+ if (max.size()) {
+ AString tag = "max-";
+ tag.append(name);
+ mCurrentType->mDetails[tag] = max;
+ } else if (value.size()) {
+ mCurrentType->mDetails[name] = value;
+ } else if (ranges.size()) {
+ AString tag = name;
+ tag.append("-ranges");
+ mCurrentType->mDetails[tag] = ranges;
+ } else {
+ ALOGW("Ignoring unrecognized limit '%s'", name.c_str());
+ }
+ }
+
+ return OK;
+}
+
+status_t MediaCodecsXmlParser::addFeature(const char **attrs) {
+ size_t i = 0;
+ const char *name = NULL;
+ int32_t optional = -1;
+ int32_t required = -1;
+ const char *value = NULL;
+
+ while (attrs[i] != NULL) {
+ if (attrs[i + 1] == NULL) {
+ ALOGE("addFeature: feature is not given");
+ return -EINVAL;
+ }
+
+ // attributes with values
+ if (!strcmp(attrs[i], "name")) {
+ name = attrs[i + 1];
+ ++i;
+ } else if (!strcmp(attrs[i], "optional") || !strcmp(attrs[i], "required")) {
+ int value = (int)ParseBoolean(attrs[i + 1]);
+ if (!strcmp(attrs[i], "optional")) {
+ optional = value;
+ } else {
+ required = value;
+ }
+ ++i;
+ } else if (!strcmp(attrs[i], "value")) {
+ value = attrs[i + 1];
+ ++i;
+ } else {
+ ALOGE("addFeature: unrecognized attribute: %s", attrs[i]);
+ return -EINVAL;
+ }
+ ++i;
+ }
+ if (name == NULL) {
+ ALOGE("feature with no 'name' attribute");
+ return -EINVAL;
+ }
+
+ if (optional == required && optional != -1) {
+ ALOGE("feature '%s' is both/neither optional and required", name);
+ return -EINVAL;
+ }
+
+ if (mCurrentType == mCodecInfos[mCurrentName].mTypes.end()) {
+ ALOGW("ignoring null type");
+ return OK;
+ }
+ if (value != NULL) {
+ mCurrentType->mStringFeatures[name] = value;
+ } else {
+ mCurrentType->mBoolFeatures[name] = (required == 1) || (optional == 0);
+ }
+ return OK;
+}
+
+void MediaCodecsXmlParser::getGlobalSettings(
+ std::map<AString, AString> *settings) const {
+ settings->clear();
+ settings->insert(mGlobalSettings.begin(), mGlobalSettings.end());
+}
+
+status_t MediaCodecsXmlParser::getCodecInfo(const char *name, CodecInfo *info) const {
+ if (mCodecInfos.count(name) == 0) {
+ ALOGE("Codec not found with name '%s'", name);
+ return NAME_NOT_FOUND;
+ }
+ *info = mCodecInfos.at(name);
+ return OK;
+}
+
+status_t MediaCodecsXmlParser::getQuirks(const char *name, std::vector<AString> *quirks) const {
+ if (mQuirks.count(name) == 0) {
+ ALOGE("Codec not found with name '%s'", name);
+ return NAME_NOT_FOUND;
+ }
+ quirks->clear();
+ quirks->insert(quirks->end(), mQuirks.at(name).begin(), mQuirks.at(name).end());
+ return OK;
+}
+
+} // namespace android
diff --git a/media/vndk/xmlparser/Android.bp b/media/vndk/xmlparser/Android.bp
new file mode 100644
index 0000000..a233d6c
--- /dev/null
+++ b/media/vndk/xmlparser/Android.bp
@@ -0,0 +1,4 @@
+subdirs = [
+ "*",
+]
+
diff --git a/radio/IRadio.cpp b/radio/IRadio.cpp
index e6dbdc3..72f3b68 100644
--- a/radio/IRadio.cpp
+++ b/radio/IRadio.cpp
@@ -113,7 +113,7 @@
if (status == NO_ERROR) {
status = (status_t)reply.readInt32();
if (status == NO_ERROR) {
- int muteread = reply.readInt32();
+ int32_t muteread = reply.readInt32();
*mute = muteread != 0;
}
}
@@ -146,12 +146,12 @@
return status;
}
- virtual status_t tune(unsigned int channel, unsigned int subChannel)
+ virtual status_t tune(uint32_t channel, uint32_t subChannel)
{
Parcel data, reply;
data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
- data.writeInt32(channel);
- data.writeInt32(subChannel);
+ data.writeUint32(channel);
+ data.writeUint32(subChannel);
status_t status = remote()->transact(TUNE, data, &reply);
if (status == NO_ERROR) {
status = (status_t)reply.readInt32();
@@ -173,7 +173,7 @@
virtual status_t getProgramInformation(struct radio_program_info *info)
{
Parcel data, reply;
- if (info == NULL) {
+ if (info == nullptr || info->metadata == nullptr) {
return BAD_VALUE;
}
radio_metadata_t *metadata = info->metadata;
@@ -183,22 +183,19 @@
status = (status_t)reply.readInt32();
if (status == NO_ERROR) {
reply.read(info, sizeof(struct radio_program_info));
+ // restore local metadata pointer
info->metadata = metadata;
- if (metadata == NULL) {
- return status;
+
+ uint32_t metadataSize = reply.readUint32();
+ if (metadataSize != 0) {
+ radio_metadata_t *newMetadata = (radio_metadata_t *)malloc(metadataSize);
+ if (newMetadata == NULL) {
+ return NO_MEMORY;
+ }
+ reply.read(newMetadata, metadataSize);
+ status = radio_metadata_add_metadata(&info->metadata, newMetadata);
+ free(newMetadata);
}
- size_t size = (size_t)reply.readInt32();
- if (size == 0) {
- return status;
- }
- metadata =
- (radio_metadata_t *)calloc(size / sizeof(unsigned int), sizeof(unsigned int));
- if (metadata == NULL) {
- return NO_MEMORY;
- }
- reply.read(metadata, size);
- status = radio_metadata_add_metadata(&info->metadata, metadata);
- free(metadata);
}
}
return status;
@@ -289,8 +286,8 @@
}
case TUNE: {
CHECK_INTERFACE(IRadio, data, reply);
- unsigned int channel = (unsigned int)data.readInt32();
- unsigned int subChannel = (unsigned int)data.readInt32();
+ uint32_t channel = data.readUint32();
+ uint32_t subChannel = data.readUint32();
status_t status = tune(channel, subChannel);
reply->writeInt32(status);
return NO_ERROR;
@@ -310,13 +307,12 @@
reply->writeInt32(status);
if (status == NO_ERROR) {
reply->write(&info, sizeof(struct radio_program_info));
- int count = radio_metadata_get_count(info.metadata);
- if (count > 0) {
+ if (radio_metadata_get_count(info.metadata) > 0) {
size_t size = radio_metadata_get_size(info.metadata);
- reply->writeInt32(size);
+ reply->writeUint32((uint32_t)size);
reply->write(info.metadata, size);
} else {
- reply->writeInt32(0);
+ reply->writeUint32(0);
}
}
return NO_ERROR;
diff --git a/radio/IRadioService.cpp b/radio/IRadioService.cpp
index be7d21e..72e3a61 100644
--- a/radio/IRadioService.cpp
+++ b/radio/IRadioService.cpp
@@ -16,8 +16,7 @@
*/
#define LOG_TAG "BpRadioService"
-//
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <utils/Errors.h>
@@ -58,12 +57,12 @@
}
Parcel data, reply;
data.writeInterfaceToken(IRadioService::getInterfaceDescriptor());
- unsigned int numModulesReq = (properties == NULL) ? 0 : *numModules;
+ uint32_t numModulesReq = (properties == NULL) ? 0 : *numModules;
data.writeInt32(numModulesReq);
status_t status = remote()->transact(LIST_MODULES, data, &reply);
if (status == NO_ERROR) {
status = (status_t)reply.readInt32();
- *numModules = (unsigned int)reply.readInt32();
+ *numModules = (uint32_t)reply.readInt32();
}
ALOGV("listModules() status %d got *numModules %d", status, *numModules);
if (status == NO_ERROR) {
@@ -120,11 +119,11 @@
switch(code) {
case LIST_MODULES: {
CHECK_INTERFACE(IRadioService, data, reply);
- unsigned int numModulesReq = data.readInt32();
+ uint32_t numModulesReq = data.readInt32();
if (numModulesReq > MAX_ITEMS_PER_LIST) {
numModulesReq = MAX_ITEMS_PER_LIST;
}
- unsigned int numModules = numModulesReq;
+ uint32_t numModules = numModulesReq;
struct radio_properties *properties =
(struct radio_properties *)calloc(numModulesReq,
sizeof(struct radio_properties));
diff --git a/radio/Radio.cpp b/radio/Radio.cpp
index 57c7e89..9ddd221 100644
--- a/radio/Radio.cpp
+++ b/radio/Radio.cpp
@@ -240,20 +240,31 @@
return;
}
+ // The event layout in shared memory is:
+ // sizeof(struct radio_event) bytes : the event itself
+ // 4 bytes : metadata size or 0
+ // N bytes : metadata if present
struct radio_event *event = (struct radio_event *)eventMemory->pointer();
+ uint32_t metadataOffset = sizeof(struct radio_event) + sizeof(uint32_t);
+ uint32_t metadataSize = *(uint32_t *)((uint8_t *)event + metadataOffset - sizeof(uint32_t));
+
// restore local metadata pointer from offset
switch (event->type) {
case RADIO_EVENT_TUNED:
case RADIO_EVENT_AF_SWITCH:
- if (event->info.metadata != NULL) {
+ if (metadataSize != 0) {
event->info.metadata =
- (radio_metadata_t *)((char *)event + (size_t)event->info.metadata);
+ (radio_metadata_t *)((uint8_t *)event + metadataOffset);
+ } else {
+ event->info.metadata = 0;
}
break;
case RADIO_EVENT_METADATA:
- if (event->metadata != NULL) {
+ if (metadataSize != 0) {
event->metadata =
- (radio_metadata_t *)((char *)event + (size_t)event->metadata);
+ (radio_metadata_t *)((uint8_t *)event + metadataOffset);
+ } else {
+ event->metadata = 0;
}
break;
default:
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 42b139a..d0454d4 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -28,20 +28,19 @@
AudioStreamOut.cpp \
SpdifStreamOut.cpp \
Effects.cpp \
- AudioMixer.cpp.arm \
- BufferProviders.cpp \
PatchPanel.cpp \
- StateQueue.cpp
+ StateQueue.cpp \
+ BufLog.cpp \
+ TypedLogger.cpp
LOCAL_C_INCLUDES := \
frameworks/av/services/audiopolicy \
frameworks/av/services/medialog \
- external/sonic \
- $(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils)
LOCAL_SHARED_LIBRARIES := \
- libaudioresampler \
+ libaudiohal \
+ libaudioprocessing \
libaudiospdif \
libaudioutils \
libcutils \
@@ -52,18 +51,14 @@
libmedialogservice \
libmediautils \
libnbaio \
- libhardware \
- libhardware_legacy \
- libeffects \
libpowermanager \
libserviceutility \
- libsonic \
libmediautils \
- libmemunreachable
+ libmemunreachable \
+ libmedia_helper
LOCAL_STATIC_LIBRARIES := \
libcpustats \
- libmedia_helper
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -89,59 +84,4 @@
include $(BUILD_SHARED_LIBRARY)
-#
-# build audio resampler test tool
-#
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- test-resample.cpp \
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils)
-
-LOCAL_STATIC_LIBRARIES := \
- libsndfile
-
-LOCAL_SHARED_LIBRARIES := \
- libaudioresampler \
- libaudioutils \
- libdl \
- libcutils \
- libutils \
- liblog
-
-LOCAL_MODULE:= test-resample
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_EXECUTABLE)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- AudioResampler.cpp.arm \
- AudioResamplerCubic.cpp.arm \
- AudioResamplerSinc.cpp.arm \
- AudioResamplerDyn.cpp.arm
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils)
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libdl \
- liblog
-
-LOCAL_MODULE := libaudioresampler
-
-LOCAL_CFLAGS := -Werror -Wall
-
-# uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
-#LOCAL_CFLAGS += -DUSE_NEON=false
-
-include $(BUILD_SHARED_LIBRARY)
-
include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index fcb6992..2c33fc2 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -31,6 +31,11 @@
#include <utils/Log.h>
#include <utils/Trace.h>
#include <binder/Parcel.h>
+#include <media/audiohal/DeviceHalInterface.h>
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/AudioParameter.h>
+#include <media/TypeConverter.h>
#include <memunreachable/memunreachable.h>
#include <utils/String16.h>
#include <utils/threads.h>
@@ -39,18 +44,15 @@
#include <cutils/properties.h>
#include <system/audio.h>
-#include <hardware/audio.h>
-#include "AudioMixer.h"
#include "AudioFlinger.h"
#include "ServiceUtilities.h"
#include <media/AudioResamplerPublic.h>
-#include <media/EffectsFactoryApi.h>
-#include <audio_effects/effect_visualizer.h>
-#include <audio_effects/effect_ns.h>
-#include <audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_visualizer.h>
+#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_aec.h>
#include <audio_utils/primitives.h>
@@ -64,6 +66,11 @@
#include <mediautils/BatteryNotifier.h>
#include <private/android_filesystem_config.h>
+//#define BUFLOG_NDEBUG 0
+#include <BufLog.h>
+
+#include "TypedLogger.h"
+
// ----------------------------------------------------------------------------
// Note: the following macro is used for extremely verbose logging message. In
@@ -84,12 +91,14 @@
static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
static const char kHardwareLockedString[] = "Hardware lock is taken\n";
static const char kClientLockedString[] = "Client lock is taken\n";
+static const char kNoEffectsFactory[] = "Effects Factory is absent\n";
nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
uint32_t AudioFlinger::mScreenState;
+
#ifdef TEE_SINK
bool AudioFlinger::mTeeSinkInputEnabled = false;
bool AudioFlinger::mTeeSinkOutputEnabled = false;
@@ -104,75 +113,38 @@
// we define a minimum time during which a global effect is considered enabled.
static const nsecs_t kMinGlobalEffectEnabletimeNs = seconds(7200);
-// ----------------------------------------------------------------------------
+Mutex gLock;
+wp<AudioFlinger> gAudioFlinger;
-const char *formatToString(audio_format_t format) {
- switch (audio_get_main_format(format)) {
- case AUDIO_FORMAT_PCM:
- switch (format) {
- case AUDIO_FORMAT_PCM_16_BIT: return "pcm16";
- case AUDIO_FORMAT_PCM_8_BIT: return "pcm8";
- case AUDIO_FORMAT_PCM_32_BIT: return "pcm32";
- case AUDIO_FORMAT_PCM_8_24_BIT: return "pcm8.24";
- case AUDIO_FORMAT_PCM_FLOAT: return "pcmfloat";
- case AUDIO_FORMAT_PCM_24_BIT_PACKED: return "pcm24";
- default:
- break;
- }
- break;
- case AUDIO_FORMAT_MP3: return "mp3";
- case AUDIO_FORMAT_AMR_NB: return "amr-nb";
- case AUDIO_FORMAT_AMR_WB: return "amr-wb";
- case AUDIO_FORMAT_AAC: return "aac";
- case AUDIO_FORMAT_HE_AAC_V1: return "he-aac-v1";
- case AUDIO_FORMAT_HE_AAC_V2: return "he-aac-v2";
- case AUDIO_FORMAT_VORBIS: return "vorbis";
- case AUDIO_FORMAT_OPUS: return "opus";
- case AUDIO_FORMAT_AC3: return "ac-3";
- case AUDIO_FORMAT_E_AC3: return "e-ac-3";
- case AUDIO_FORMAT_IEC61937: return "iec61937";
- case AUDIO_FORMAT_DTS: return "dts";
- case AUDIO_FORMAT_DTS_HD: return "dts-hd";
- case AUDIO_FORMAT_DOLBY_TRUEHD: return "dolby-truehd";
- default:
- break;
+// Keep a strong reference to media.log service around forever.
+// The service is within our parent process so it can never die in a way that we could observe.
+// These two variables are const after initialization.
+static sp<IBinder> sMediaLogServiceAsBinder;
+static sp<IMediaLogService> sMediaLogService;
+
+static pthread_once_t sMediaLogOnce = PTHREAD_ONCE_INIT;
+
+static void sMediaLogInit()
+{
+ sMediaLogServiceAsBinder = defaultServiceManager()->getService(String16("media.log"));
+ if (sMediaLogServiceAsBinder != 0) {
+ sMediaLogService = interface_cast<IMediaLogService>(sMediaLogServiceAsBinder);
}
- return "unknown";
}
-static int load_audio_interface(const char *if_name, audio_hw_device_t **dev)
-{
- const hw_module_t *mod;
- int rc;
+// ----------------------------------------------------------------------------
- rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
- ALOGE_IF(rc, "%s couldn't load audio hw module %s.%s (%s)", __func__,
- AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
- if (rc) {
- goto out;
- }
- rc = audio_hw_device_open(mod, dev);
- ALOGE_IF(rc, "%s couldn't open audio hw device in %s.%s (%s)", __func__,
- AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
- if (rc) {
- goto out;
- }
- if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
- ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
- rc = BAD_VALUE;
- goto out;
- }
- return 0;
-
-out:
- *dev = NULL;
- return rc;
+std::string formatToString(audio_format_t format) {
+ std::string result;
+ FormatConverter::toString(format, result);
+ return result;
}
// ----------------------------------------------------------------------------
AudioFlinger::AudioFlinger()
: BnAudioFlinger(),
+ mMediaLogNotifier(new AudioFlinger::MediaLogNotifier()),
mPrimaryHardwareDev(NULL),
mAudioHwDevs(NULL),
mHardwareStatus(AUDIO_HW_IDLE),
@@ -197,6 +169,7 @@
if (doLog) {
mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
MemoryHeapBase::READ_ONLY);
+ (void) pthread_once(&sMediaLogOnce, sMediaLogInit);
}
// reset battery stats.
@@ -204,6 +177,11 @@
// in bad state, reset the state upon service start.
BatteryNotifier::getInstance().noteResetAudio();
+ mDevicesFactoryHal = DevicesFactoryHalInterface::create();
+ mEffectsFactoryHal = EffectsFactoryHalInterface::create();
+
+ mMediaLogNotifier->run("MediaLogNotifier");
+
#ifdef TEE_SINK
char value[PROPERTY_VALUE_MAX];
(void) property_get("ro.debuggable", value, "0");
@@ -247,6 +225,8 @@
mPatchPanel = new PatchPanel(this);
mMode = AUDIO_MODE_NORMAL;
+
+ gAudioFlinger = this;
}
AudioFlinger::~AudioFlinger()
@@ -262,24 +242,98 @@
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
// no mHardwareLock needed, as there are no other references to this
- audio_hw_device_close(mAudioHwDevs.valueAt(i)->hwDevice());
delete mAudioHwDevs.valueAt(i);
}
// Tell media.log service about any old writers that still need to be unregistered
- if (mLogMemoryDealer != 0) {
- sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
- if (binder != 0) {
- sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
- for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
- sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
- mUnregisteredWriters.pop();
- mediaLogService->unregisterWriter(iMemory);
- }
+ if (sMediaLogService != 0) {
+ for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
+ sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
+ mUnregisteredWriters.pop();
+ sMediaLogService->unregisterWriter(iMemory);
}
}
}
+//static
+__attribute__ ((visibility ("default")))
+status_t MmapStreamInterface::openMmapStream(MmapStreamInterface::stream_direction_t direction,
+ const audio_attributes_t *attr,
+ audio_config_base_t *config,
+ const MmapStreamInterface::Client& client,
+ audio_port_handle_t *deviceId,
+ const sp<MmapStreamCallback>& callback,
+ sp<MmapStreamInterface>& interface)
+{
+ sp<AudioFlinger> af;
+ {
+ Mutex::Autolock _l(gLock);
+ af = gAudioFlinger.promote();
+ }
+ status_t ret = NO_INIT;
+ if (af != 0) {
+ ret = af->openMmapStream(
+ direction, attr, config, client, deviceId, callback, interface);
+ }
+ return ret;
+}
+
+status_t AudioFlinger::openMmapStream(MmapStreamInterface::stream_direction_t direction,
+ const audio_attributes_t *attr,
+ audio_config_base_t *config,
+ const MmapStreamInterface::Client& client,
+ audio_port_handle_t *deviceId,
+ const sp<MmapStreamCallback>& callback,
+ sp<MmapStreamInterface>& interface)
+{
+ status_t ret = initCheck();
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+
+ audio_session_t sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT;
+ audio_io_handle_t io;
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+ if (direction == MmapStreamInterface::DIRECTION_OUTPUT) {
+ audio_config_t fullConfig = AUDIO_CONFIG_INITIALIZER;
+ fullConfig.sample_rate = config->sample_rate;
+ fullConfig.channel_mask = config->channel_mask;
+ fullConfig.format = config->format;
+ ret = AudioSystem::getOutputForAttr(attr, &io,
+ sessionId,
+ &streamType, client.clientUid,
+ &fullConfig,
+ (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ |
+ AUDIO_OUTPUT_FLAG_DIRECT),
+ *deviceId, &portId);
+ } else {
+ ret = AudioSystem::getInputForAttr(attr, &io,
+ sessionId,
+ client.clientPid,
+ client.clientUid,
+ config,
+ AUDIO_INPUT_FLAG_MMAP_NOIRQ, *deviceId, &portId);
+ }
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+
+ // at this stage, a MmapThread was created when openOutput() or openInput() was called by
+ // audio policy manager and we can retrieve it
+ sp<MmapThread> thread = mMmapThreads.valueFor(io);
+ if (thread != 0) {
+ interface = new MmapThreadHandle(thread);
+ thread->configure(attr, streamType, sessionId, callback, portId);
+ } else {
+ ret = NO_INIT;
+ }
+
+ ALOGV("%s done status %d portId %d", __FUNCTION__, ret, portId);
+
+ return ret;
+}
+
static const char * const audio_interfaces[] = {
AUDIO_HARDWARE_MODULE_ID_PRIMARY,
AUDIO_HARDWARE_MODULE_ID_A2DP,
@@ -301,10 +355,12 @@
// then try to find a module supporting the requested device.
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(i);
- audio_hw_device_t *dev = audioHwDevice->hwDevice();
- if ((dev->get_supported_devices != NULL) &&
- (dev->get_supported_devices(dev) & devices) == devices)
+ sp<DeviceHalInterface> dev = audioHwDevice->hwDevice();
+ uint32_t supportedDevices;
+ if (dev->getSupportedDevices(&supportedDevices) == OK &&
+ (supportedDevices & devices) == devices) {
return audioHwDevice;
+ }
}
} else {
// check a match for the requested module handle
@@ -418,7 +474,12 @@
write(fd, result.string(), result.size());
}
- EffectDumpEffects(fd);
+ if (mEffectsFactoryHal != 0) {
+ mEffectsFactoryHal->dumpEffects(fd);
+ } else {
+ String8 result(kNoEffectsFactory);
+ write(fd, result.string(), result.size());
+ }
dumpClients(fd, args);
if (clientLocked) {
@@ -437,6 +498,11 @@
mRecordThreads.valueAt(i)->dump(fd, args);
}
+ // dump mmap threads
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ mMmapThreads.valueAt(i)->dump(fd, args);
+ }
+
// dump orphan effect chains
if (mOrphanEffectChains.size() != 0) {
write(fd, " Orphan Effect Chains\n", strlen(" Orphan Effect Chains\n"));
@@ -446,8 +512,8 @@
}
// dump all hardware devs
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
- dev->dump(dev, fd);
+ sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+ dev->dump(fd);
}
#ifdef TEE_SINK
@@ -457,19 +523,18 @@
}
#endif
+ BUFLOG_RESET;
+
if (locked) {
mLock.unlock();
}
// append a copy of media.log here by forwarding fd to it, but don't attempt
// to lookup the service if it's not running, as it will block for a second
- if (mLogMemoryDealer != 0) {
- sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
- if (binder != 0) {
- dprintf(fd, "\nmedia.log:\n");
- Vector<String16> args;
- binder->dump(fd, args);
- }
+ if (sMediaLogServiceAsBinder != 0) {
+ dprintf(fd, "\nmedia.log:\n");
+ Vector<String16> args;
+ sMediaLogServiceAsBinder->dump(fd, args);
}
// check for optional arguments
@@ -514,16 +579,11 @@
sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name)
{
- // If there is no memory allocated for logs, return a dummy writer that does nothing
- if (mLogMemoryDealer == 0) {
+ // If there is no memory allocated for logs, return a dummy writer that does nothing.
+ // Similarly if we can't contact the media.log service, also return a dummy writer.
+ if (mLogMemoryDealer == 0 || sMediaLogService == 0) {
return new NBLog::Writer();
}
- sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
- // Similarly if we can't contact the media.log service, also return a dummy writer
- if (binder == 0) {
- return new NBLog::Writer();
- }
- sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size));
// If allocation fails, consult the vector of previously unregistered writers
// and garbage-collect one or more them until an allocation succeeds
@@ -534,7 +594,7 @@
// Pick the oldest stale writer to garbage-collect
sp<IMemory> iMemory(mUnregisteredWriters[0]->getIMemory());
mUnregisteredWriters.removeAt(0);
- mediaLogService->unregisterWriter(iMemory);
+ sMediaLogService->unregisterWriter(iMemory);
// Now the media.log remote reference to IMemory is gone. When our last local
// reference to IMemory also drops to zero at end of this block,
// the IMemory destructor will deallocate the region from mLogMemoryDealer.
@@ -550,8 +610,11 @@
return new NBLog::Writer();
}
success:
- mediaLogService->registerWriter(shared, size, name);
- return new NBLog::Writer(size, shared);
+ NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->pointer();
+ new((void *) sharedRawPtr) NBLog::Shared(); // placement new here, but the corresponding
+ // explicit destructor not needed since it is POD
+ sMediaLogService->registerWriter(shared, size, name);
+ return new NBLog::Writer(shared, size);
}
void AudioFlinger::unregisterWriter(const sp<NBLog::Writer>& writer)
@@ -585,7 +648,8 @@
pid_t tid,
audio_session_t *sessionId,
int clientUid,
- status_t *status)
+ status_t *status,
+ audio_port_handle_t portId)
{
sp<PlaybackThread::Track> track;
sp<TrackHandle> trackHandle;
@@ -678,7 +742,8 @@
ALOGV("createTrack() lSessionId: %d", lSessionId);
track = thread->createTrack_l(client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, clientUid, &lStatus);
+ channelMask, frameCount, sharedBuffer, lSessionId, flags, tid,
+ clientUid, &lStatus, portId);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
// we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
@@ -809,7 +874,7 @@
mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
if (dev->canSetMasterVolume()) {
- dev->hwDevice()->set_master_volume(dev->hwDevice(), value);
+ dev->hwDevice()->setMasterVolume(value);
}
mHardwareStatus = AUDIO_HW_IDLE;
}
@@ -846,9 +911,9 @@
{ // scope for the lock
AutoMutex lock(mHardwareLock);
- audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+ sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
mHardwareStatus = AUDIO_HW_SET_MODE;
- ret = dev->set_mode(dev, mode);
+ ret = dev->setMode(mode);
mHardwareStatus = AUDIO_HW_IDLE;
}
@@ -877,8 +942,8 @@
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_MIC_MUTE;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
- status_t result = dev->set_mic_mute(dev, state);
+ sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+ status_t result = dev->setMicMute(state);
if (result != NO_ERROR) {
ret = result;
}
@@ -898,8 +963,8 @@
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_GET_MIC_MUTE;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
- status_t result = dev->get_mic_mute(dev, &state);
+ sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+ status_t result = dev->getMicMute(&state);
if (result == NO_ERROR) {
mute = mute && state;
}
@@ -931,7 +996,7 @@
mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
if (dev->canSetMasterMute()) {
- dev->hwDevice()->set_master_mute(dev->hwDevice(), muted);
+ dev->hwDevice()->setMasterMute(muted);
}
mHardwareStatus = AUDIO_HW_IDLE;
}
@@ -940,11 +1005,9 @@
// assigned to HALs which do not have master mute support will apply master
// mute during the mix operation. Threads with HALs which do support master
// mute will simply ignore the setting.
- for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- if (mPlaybackThreads.valueAt(i)->isDuplicating()) {
- continue;
- }
- mPlaybackThreads.valueAt(i)->setMasterMute(muted);
+ Vector<VolumeInterface *> volumeInterfaces = getAllVolumeInterfaces_l();
+ for (size_t i = 0; i < volumeInterfaces.size(); i++) {
+ volumeInterfaces[i]->setMasterMute(muted);
}
return NO_ERROR;
@@ -975,12 +1038,12 @@
status_t AudioFlinger::checkStreamType(audio_stream_type_t stream) const
{
if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
- ALOGW("setStreamVolume() invalid stream %d", stream);
+ ALOGW("checkStreamType() invalid stream %d", stream);
return BAD_VALUE;
}
pid_t caller = IPCThreadState::self()->getCallingPid();
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT && caller != getpid_cached) {
- ALOGW("setStreamVolume() pid %d cannot use internal stream type %d", caller, stream);
+ ALOGW("checkStreamType() pid %d cannot use internal stream type %d", caller, stream);
return PERMISSION_DENIED;
}
@@ -1002,22 +1065,22 @@
ALOG_ASSERT(stream != AUDIO_STREAM_PATCH, "attempt to change AUDIO_STREAM_PATCH volume");
AutoMutex lock(mLock);
- PlaybackThread *thread = NULL;
+ Vector<VolumeInterface *> volumeInterfaces;
if (output != AUDIO_IO_HANDLE_NONE) {
- thread = checkPlaybackThread_l(output);
- if (thread == NULL) {
+ VolumeInterface *volumeInterface = getVolumeInterface_l(output);
+ if (volumeInterface == NULL) {
return BAD_VALUE;
}
+ volumeInterfaces.add(volumeInterface);
}
mStreamTypes[stream].volume = value;
- if (thread == NULL) {
- for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
- }
- } else {
- thread->setStreamVolume(stream, value);
+ if (volumeInterfaces.size() == 0) {
+ volumeInterfaces = getAllVolumeInterfaces_l();
+ }
+ for (size_t i = 0; i < volumeInterfaces.size(); i++) {
+ volumeInterfaces[i]->setStreamVolume(stream, value);
}
return NO_ERROR;
@@ -1043,8 +1106,10 @@
AutoMutex lock(mLock);
mStreamTypes[stream].mute = muted;
- for (size_t i = 0; i < mPlaybackThreads.size(); i++)
- mPlaybackThreads.valueAt(i)->setStreamMute(stream, muted);
+ Vector<VolumeInterface *> volumeInterfaces = getAllVolumeInterfaces_l();
+ for (size_t i = 0; i < volumeInterfaces.size(); i++) {
+ volumeInterfaces[i]->setStreamMute(stream, muted);
+ }
return NO_ERROR;
}
@@ -1059,11 +1124,12 @@
AutoMutex lock(mLock);
float volume;
if (output != AUDIO_IO_HANDLE_NONE) {
- PlaybackThread *thread = checkPlaybackThread_l(output);
- if (thread == NULL) {
- return 0.0f;
+ VolumeInterface *volumeInterface = getVolumeInterface_l(output);
+ if (volumeInterface != NULL) {
+ volume = volumeInterface->streamVolume(stream);
+ } else {
+ volume = 0.0f;
}
- volume = thread->streamVolume(stream);
} else {
volume = streamVolume_l(stream);
}
@@ -1109,8 +1175,8 @@
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_SET_PARAMETER;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
- status_t result = dev->set_parameters(dev, keyValuePairs.string());
+ sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+ status_t result = dev->setParameters(keyValuePairs);
// return success if at least one audio device accepts the parameters as not all
// HALs are requested to support all parameters. If no audio device supports the
// requested parameters, the last error is reported.
@@ -1123,8 +1189,8 @@
// disable AEC and NS if the device is a BT SCO headset supporting those pre processings
AudioParameter param = AudioParameter(keyValuePairs);
String8 value;
- if (param.get(String8(AUDIO_PARAMETER_KEY_BT_NREC), value) == NO_ERROR) {
- bool btNrecIsOff = (value == AUDIO_PARAMETER_VALUE_OFF);
+ if (param.get(String8(AudioParameter::keyBtNrec), value) == NO_ERROR) {
+ bool btNrecIsOff = (value == AudioParameter::valueOff);
if (mBtNrecIsOff != btNrecIsOff) {
for (size_t i = 0; i < mRecordThreads.size(); i++) {
sp<RecordThread> thread = mRecordThreads.valueAt(i);
@@ -1148,7 +1214,7 @@
}
String8 screenState;
if (param.get(String8(AudioParameter::keyScreenState), screenState) == NO_ERROR) {
- bool isOff = screenState == "off";
+ bool isOff = (screenState == AudioParameter::valueOff);
if (isOff != (AudioFlinger::mScreenState & 1)) {
AudioFlinger::mScreenState = ((AudioFlinger::mScreenState & ~1) + 2) | isOff;
}
@@ -1164,6 +1230,9 @@
thread = checkPlaybackThread_l(ioHandle);
if (thread == 0) {
thread = checkRecordThread_l(ioHandle);
+ if (thread == 0) {
+ thread = checkMmapThread_l(ioHandle);
+ }
} else if (thread == primaryPlaybackThread_l()) {
// indicate output device change to all input threads for pre processing
AudioParameter param = AudioParameter(keyValuePairs);
@@ -1191,29 +1260,31 @@
String8 out_s8;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- char *s;
+ String8 s;
+ status_t result;
{
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_GET_PARAMETER;
- audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
- s = dev->get_parameters(dev, keys.string());
+ sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+ result = dev->getParameters(keys, &s);
mHardwareStatus = AUDIO_HW_IDLE;
}
- out_s8 += String8(s ? s : "");
- free(s);
+ if (result == OK) out_s8 += s;
}
return out_s8;
}
- PlaybackThread *playbackThread = checkPlaybackThread_l(ioHandle);
- if (playbackThread != NULL) {
- return playbackThread->getParameters(keys);
+ ThreadBase *thread = (ThreadBase *)checkPlaybackThread_l(ioHandle);
+ if (thread == NULL) {
+ thread = (ThreadBase *)checkRecordThread_l(ioHandle);
+ if (thread == NULL) {
+ thread = (ThreadBase *)checkMmapThread_l(ioHandle);
+ if (thread == NULL) {
+ String8("");
+ }
+ }
}
- RecordThread *recordThread = checkRecordThread_l(ioHandle);
- if (recordThread != NULL) {
- return recordThread->getParameters(keys);
- }
- return String8("");
+ return thread->getParameters(keys);
}
size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
@@ -1237,14 +1308,14 @@
proposed.channel_mask = channelMask;
proposed.format = format;
- audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+ sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
size_t frames;
for (;;) {
// Note: config is currently a const parameter for get_input_buffer_size()
// but we use a copy from proposed in case config changes from the call.
config = proposed;
- frames = dev->get_input_buffer_size(dev, &config);
- if (frames != 0) {
+ status_t result = dev->getInputBufferSize(&config, &frames);
+ if (result == OK && frames != 0) {
break; // hal success, config is the result
}
// change one parameter of the configuration each iteration to a more "common" value
@@ -1291,9 +1362,9 @@
}
AutoMutex lock(mHardwareLock);
- audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+ sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
mHardwareStatus = AUDIO_HW_SET_VOICE_VOLUME;
- ret = dev->set_voice_volume(dev, value);
+ ret = dev->setVoiceVolume(value);
mHardwareStatus = AUDIO_HW_IDLE;
return ret;
@@ -1465,6 +1536,41 @@
mAudioFlinger->removeNotificationClient(mPid);
}
+// ----------------------------------------------------------------------------
+AudioFlinger::MediaLogNotifier::MediaLogNotifier()
+ : mPendingRequests(false) {}
+
+
+void AudioFlinger::MediaLogNotifier::requestMerge() {
+ AutoMutex _l(mMutex);
+ mPendingRequests = true;
+ mCond.signal();
+}
+
+bool AudioFlinger::MediaLogNotifier::threadLoop() {
+ // Should already have been checked, but just in case
+ if (sMediaLogService == 0) {
+ return false;
+ }
+ // Wait until there are pending requests
+ {
+ AutoMutex _l(mMutex);
+ mPendingRequests = false; // to ignore past requests
+ while (!mPendingRequests) {
+ mCond.wait(mMutex);
+ // TODO may also need an exitPending check
+ }
+ mPendingRequests = false;
+ }
+ // Execute the actual MediaLogService binder call and ignore extra requests for a while
+ sMediaLogService->requestMergeWakeup();
+ usleep(kPostTriggerSleepPeriod);
+ return true;
+}
+
+void AudioFlinger::requestLogMerge() {
+ mMediaLogNotifier->requestMerge();
+}
// ----------------------------------------------------------------------------
@@ -1483,7 +1589,8 @@
size_t *notificationFrames,
sp<IMemory>& cblk,
sp<IMemory>& buffers,
- status_t *status)
+ status_t *status,
+ audio_port_handle_t portId)
{
sp<RecordThread::RecordTrack> recordTrack;
sp<RecordHandle> recordHandle;
@@ -1567,7 +1674,7 @@
recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
frameCount, lSessionId, notificationFrames,
- clientUid, flags, tid, &lStatus);
+ clientUid, flags, tid, &lStatus, portId);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
if (lStatus == NO_ERROR) {
@@ -1631,16 +1738,16 @@
}
}
- audio_hw_device_t *dev;
+ sp<DeviceHalInterface> dev;
- int rc = load_audio_interface(name, &dev);
+ int rc = mDevicesFactoryHal->openDevice(name, &dev);
if (rc) {
ALOGE("loadHwModule() error %d loading module %s", rc, name);
return AUDIO_MODULE_HANDLE_NONE;
}
mHardwareStatus = AUDIO_HW_INIT;
- rc = dev->init_check(dev);
+ rc = dev->initCheck();
mHardwareStatus = AUDIO_HW_IDLE;
if (rc) {
ALOGE("loadHwModule() init check error %d for module %s", rc, name);
@@ -1658,32 +1765,26 @@
if (0 == mAudioHwDevs.size()) {
mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
- if (NULL != dev->get_master_volume) {
- float mv;
- if (OK == dev->get_master_volume(dev, &mv)) {
- mMasterVolume = mv;
- }
+ float mv;
+ if (OK == dev->getMasterVolume(&mv)) {
+ mMasterVolume = mv;
}
mHardwareStatus = AUDIO_HW_GET_MASTER_MUTE;
- if (NULL != dev->get_master_mute) {
- bool mm;
- if (OK == dev->get_master_mute(dev, &mm)) {
- mMasterMute = mm;
- }
+ bool mm;
+ if (OK == dev->getMasterMute(&mm)) {
+ mMasterMute = mm;
}
}
mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
- if ((NULL != dev->set_master_volume) &&
- (OK == dev->set_master_volume(dev, mMasterVolume))) {
+ if (OK == dev->setMasterVolume(mMasterVolume)) {
flags = static_cast<AudioHwDevice::Flags>(flags |
AudioHwDevice::AHWD_CAN_SET_MASTER_VOLUME);
}
mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
- if ((NULL != dev->set_master_mute) &&
- (OK == dev->set_master_mute(dev, mMasterMute))) {
+ if (OK == dev->setMasterMute(mMasterMute)) {
flags = static_cast<AudioHwDevice::Flags>(flags |
AudioHwDevice::AHWD_CAN_SET_MASTER_MUTE);
}
@@ -1694,8 +1795,7 @@
audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
- ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
- name, dev->common.module->name, dev->common.module->id, handle);
+ ALOGI("loadHwModule() Loaded %s audio interface, handle %d", name, handle);
return handle;
@@ -1745,16 +1845,18 @@
return mHwAvSyncIds.valueAt(index);
}
- audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+ sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
if (dev == NULL) {
return AUDIO_HW_SYNC_INVALID;
}
- char *reply = dev->get_parameters(dev, AUDIO_PARAMETER_HW_AV_SYNC);
- AudioParameter param = AudioParameter(String8(reply));
- free(reply);
+ String8 reply;
+ AudioParameter param;
+ if (dev->getParameters(String8(AudioParameter::keyHwAvSync), &reply) == OK) {
+ param = AudioParameter(reply);
+ }
int value;
- if (param.getInt(String8(AUDIO_PARAMETER_HW_AV_SYNC), value) != NO_ERROR) {
+ if (param.getInt(String8(AudioParameter::keyHwAvSync), value) != NO_ERROR) {
ALOGW("getAudioHwSyncForSession error getting sync for session %d", sessionId);
return AUDIO_HW_SYNC_INVALID;
}
@@ -1776,7 +1878,7 @@
uint32_t sessions = thread->hasAudioSession(sessionId);
if (sessions & ThreadBase::TRACK_SESSION) {
AudioParameter param = AudioParameter();
- param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), value);
+ param.addInt(String8(AudioParameter::keyStreamHwAvSync), value);
thread->setParameters(param.toString());
break;
}
@@ -1814,7 +1916,7 @@
audio_hw_sync_t syncId = mHwAvSyncIds.valueAt(index);
ALOGV("setAudioHwSyncForSession_l found ID %d for session %d", syncId, sessionId);
AudioParameter param = AudioParameter();
- param.addInt(String8(AUDIO_PARAMETER_STREAM_HW_AV_SYNC), syncId);
+ param.addInt(String8(AudioParameter::keyStreamHwAvSync), syncId);
thread->setParameters(param.toString());
}
}
@@ -1823,7 +1925,7 @@
// ----------------------------------------------------------------------------
-sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module,
+sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
audio_devices_t devices,
@@ -1879,22 +1981,34 @@
mHardwareStatus = AUDIO_HW_IDLE;
if (status == NO_ERROR) {
-
- PlaybackThread *thread;
- if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
- thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
- ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);
- } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
- || !isValidPcmSinkFormat(config->format)
- || !isValidPcmSinkChannelMask(config->channel_mask)) {
- thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
- ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread);
+ if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) {
+ sp<MmapPlaybackThread> thread =
+ new MmapPlaybackThread(this, *output, outHwDev, outputStream,
+ devices, AUDIO_DEVICE_NONE, mSystemReady);
+ mMmapThreads.add(*output, thread);
+ ALOGV("openOutput_l() created mmap playback thread: ID %d thread %p",
+ *output, thread.get());
+ return thread;
} else {
- thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
- ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread);
+ sp<PlaybackThread> thread;
+ if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+ thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
+ ALOGV("openOutput_l() created offload output: ID %d thread %p",
+ *output, thread.get());
+ } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
+ || !isValidPcmSinkFormat(config->format)
+ || !isValidPcmSinkChannelMask(config->channel_mask)) {
+ thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
+ ALOGV("openOutput_l() created direct output: ID %d thread %p",
+ *output, thread.get());
+ } else {
+ thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
+ ALOGV("openOutput_l() created mixer output: ID %d thread %p",
+ *output, thread.get());
+ }
+ mPlaybackThreads.add(*output, thread);
+ return thread;
}
- mPlaybackThreads.add(*output, thread);
- return thread;
}
return 0;
@@ -1908,8 +2022,9 @@
uint32_t *latencyMs,
audio_output_flags_t flags)
{
- ALOGI("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
- module,
+ ALOGI("openOutput() this %p, module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, "
+ "flags %x",
+ this, module,
(devices != NULL) ? *devices : 0,
config->sample_rate,
config->format,
@@ -1922,22 +2037,28 @@
Mutex::Autolock _l(mLock);
- sp<PlaybackThread> thread = openOutput_l(module, output, config, *devices, address, flags);
+ sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
if (thread != 0) {
- *latencyMs = thread->latency();
+ if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ *latencyMs = playbackThread->latency();
- // notify client processes of the new output creation
- thread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
+ // notify client processes of the new output creation
+ playbackThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
- // the first primary output opened designates the primary hw device
- if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
- ALOGI("Using module %d has the primary audio interface", module);
- mPrimaryHardwareDev = thread->getOutput()->audioHwDev;
+ // the first primary output opened designates the primary hw device
+ if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
+ ALOGI("Using module %d as the primary audio interface", module);
+ mPrimaryHardwareDev = playbackThread->getOutput()->audioHwDev;
- AutoMutex lock(mHardwareLock);
- mHardwareStatus = AUDIO_HW_SET_MODE;
- mPrimaryHardwareDev->hwDevice()->set_mode(mPrimaryHardwareDev->hwDevice(), mMode);
- mHardwareStatus = AUDIO_HW_IDLE;
+ AutoMutex lock(mHardwareLock);
+ mHardwareStatus = AUDIO_HW_SET_MODE;
+ mPrimaryHardwareDev->hwDevice()->setMode(mMode);
+ mHardwareStatus = AUDIO_HW_IDLE;
+ }
+ } else {
+ MmapThread *mmapThread = (MmapThread *)thread.get();
+ mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
}
return NO_ERROR;
}
@@ -1976,54 +2097,68 @@
{
// keep strong reference on the playback thread so that
// it is not destroyed while exit() is executed
- sp<PlaybackThread> thread;
+ sp<PlaybackThread> playbackThread;
+ sp<MmapPlaybackThread> mmapThread;
{
Mutex::Autolock _l(mLock);
- thread = checkPlaybackThread_l(output);
- if (thread == NULL) {
- return BAD_VALUE;
- }
+ playbackThread = checkPlaybackThread_l(output);
+ if (playbackThread != NULL) {
+ ALOGV("closeOutput() %d", output);
- ALOGV("closeOutput() %d", output);
-
- if (thread->type() == ThreadBase::MIXER) {
- for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- if (mPlaybackThreads.valueAt(i)->isDuplicating()) {
- DuplicatingThread *dupThread =
- (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
- dupThread->removeOutputTrack((MixerThread *)thread.get());
+ if (playbackThread->type() == ThreadBase::MIXER) {
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ if (mPlaybackThreads.valueAt(i)->isDuplicating()) {
+ DuplicatingThread *dupThread =
+ (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
+ dupThread->removeOutputTrack((MixerThread *)playbackThread.get());
+ }
}
}
- }
- mPlaybackThreads.removeItem(output);
- // save all effects to the default thread
- if (mPlaybackThreads.size()) {
- PlaybackThread *dstThread = checkPlaybackThread_l(mPlaybackThreads.keyAt(0));
- if (dstThread != NULL) {
- // audioflinger lock is held here so the acquisition order of thread locks does not
- // matter
- Mutex::Autolock _dl(dstThread->mLock);
- Mutex::Autolock _sl(thread->mLock);
- Vector< sp<EffectChain> > effectChains = thread->getEffectChains_l();
- for (size_t i = 0; i < effectChains.size(); i ++) {
- moveEffectChain_l(effectChains[i]->sessionId(), thread.get(), dstThread, true);
+ mPlaybackThreads.removeItem(output);
+ // save all effects to the default thread
+ if (mPlaybackThreads.size()) {
+ PlaybackThread *dstThread = checkPlaybackThread_l(mPlaybackThreads.keyAt(0));
+ if (dstThread != NULL) {
+ // audioflinger lock is held so order of thread lock acquisition doesn't matter
+ Mutex::Autolock _dl(dstThread->mLock);
+ Mutex::Autolock _sl(playbackThread->mLock);
+ Vector< sp<EffectChain> > effectChains = playbackThread->getEffectChains_l();
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ moveEffectChain_l(effectChains[i]->sessionId(), playbackThread.get(),
+ dstThread, true);
+ }
}
}
+ } else {
+ mmapThread = (MmapPlaybackThread *)checkMmapThread_l(output);
+ if (mmapThread == 0) {
+ return BAD_VALUE;
+ }
+ mMmapThreads.removeItem(output);
+ ALOGD("closing mmapThread %p", mmapThread.get());
}
const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
ioDesc->mIoHandle = output;
ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
}
- thread->exit();
// The thread entity (active unit of execution) is no longer running here,
// but the ThreadBase container still exists.
- if (!thread->isDuplicating()) {
- closeOutputFinish(thread);
+ if (playbackThread != 0) {
+ playbackThread->exit();
+ if (!playbackThread->isDuplicating()) {
+ closeOutputFinish(playbackThread);
+ }
+ } else if (mmapThread != 0) {
+ ALOGD("mmapThread exit()");
+ mmapThread->exit();
+ AudioStreamOut *out = mmapThread->clearOutput();
+ ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
+ // from now on thread->mOutput is NULL
+ delete out;
}
-
return NO_ERROR;
}
@@ -2032,7 +2167,6 @@
AudioStreamOut *out = thread->clearOutput();
ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
// from now on thread->mOutput is NULL
- out->hwDev()->close_output_stream(out->hwDev(), out->stream);
delete out;
}
@@ -2088,7 +2222,7 @@
return BAD_VALUE;
}
- sp<RecordThread> thread = openInput_l(module, input, config, *devices, address, source, flags);
+ sp<ThreadBase> thread = openInput_l(module, input, config, *devices, address, source, flags);
if (thread != 0) {
// notify client processes of the new input creation
@@ -2098,7 +2232,7 @@
return NO_INIT;
}
-sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t module,
+sp<AudioFlinger::ThreadBase> AudioFlinger::openInput_l(audio_module_handle_t module,
audio_io_handle_t *input,
audio_config_t *config,
audio_devices_t devices,
@@ -2127,13 +2261,14 @@
}
audio_config_t halconfig = *config;
- audio_hw_device_t *inHwHal = inHwDev->hwDevice();
- audio_stream_in_t *inStream = NULL;
- status_t status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
- &inStream, flags, address.string(), source);
- ALOGV("openInput_l() openInputStream returned input %p, SamplingRate %d"
+ sp<DeviceHalInterface> inHwHal = inHwDev->hwDevice();
+ sp<StreamInHalInterface> inStream;
+ status_t status = inHwHal->openInputStream(
+ *input, devices, &halconfig, flags, address.string(), source, &inStream);
+ ALOGV("openInput_l() openInputStream returned input %p, devices %x, SamplingRate %d"
", Format %#x, Channels %x, flags %#x, status %d addr %s",
- inStream,
+ inStream.get(),
+ devices,
halconfig.sample_rate,
halconfig.format,
halconfig.channel_mask,
@@ -2150,81 +2285,90 @@
(audio_channel_count_from_in_mask(config->channel_mask) <= FCC_8)) {
// FIXME describe the change proposed by HAL (save old values so we can log them here)
ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
- inStream = NULL;
- status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
- &inStream, flags, address.string(), source);
+ inStream.clear();
+ status = inHwHal->openInputStream(
+ *input, devices, &halconfig, flags, address.string(), source, &inStream);
// FIXME log this new status; HAL should not propose any further changes
}
- if (status == NO_ERROR && inStream != NULL) {
-
-#ifdef TEE_SINK
- // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
- // or (re-)create if current Pipe is idle and does not match the new format
- sp<NBAIO_Sink> teeSink;
- enum {
- TEE_SINK_NO, // don't copy input
- TEE_SINK_NEW, // copy input using a new pipe
- TEE_SINK_OLD, // copy input using an existing pipe
- } kind;
- NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
- audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
- if (!mTeeSinkInputEnabled) {
- kind = TEE_SINK_NO;
- } else if (!Format_isValid(format)) {
- kind = TEE_SINK_NO;
- } else if (mRecordTeeSink == 0) {
- kind = TEE_SINK_NEW;
- } else if (mRecordTeeSink->getStrongCount() != 1) {
- kind = TEE_SINK_NO;
- } else if (Format_isEqual(format, mRecordTeeSink->format())) {
- kind = TEE_SINK_OLD;
- } else {
- kind = TEE_SINK_NEW;
- }
- switch (kind) {
- case TEE_SINK_NEW: {
- Pipe *pipe = new Pipe(mTeeSinkInputFrames, format);
- size_t numCounterOffers = 0;
- const NBAIO_Format offers[1] = {format};
- ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- PipeReader *pipeReader = new PipeReader(*pipe);
- numCounterOffers = 0;
- index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
- ALOG_ASSERT(index == 0);
- mRecordTeeSink = pipe;
- mRecordTeeSource = pipeReader;
- teeSink = pipe;
- }
- break;
- case TEE_SINK_OLD:
- teeSink = mRecordTeeSink;
- break;
- case TEE_SINK_NO:
- default:
- break;
- }
-#endif
-
+ if (status == NO_ERROR && inStream != 0) {
AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream, flags);
-
- // Start record thread
- // RecordThread requires both input and output device indication to forward to audio
- // pre processing modules
- sp<RecordThread> thread = new RecordThread(this,
- inputStream,
- *input,
- primaryOutputDevice_l(),
- devices,
- mSystemReady
+ if ((flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
+ sp<MmapCaptureThread> thread =
+ new MmapCaptureThread(this, *input,
+ inHwDev, inputStream,
+ primaryOutputDevice_l(), devices, mSystemReady);
+ mMmapThreads.add(*input, thread);
+ ALOGV("openInput_l() created mmap capture thread: ID %d thread %p", *input,
+ thread.get());
+ return thread;
+ } else {
#ifdef TEE_SINK
- , teeSink
+ // Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
+ // or (re-)create if current Pipe is idle and does not match the new format
+ sp<NBAIO_Sink> teeSink;
+ enum {
+ TEE_SINK_NO, // don't copy input
+ TEE_SINK_NEW, // copy input using a new pipe
+ TEE_SINK_OLD, // copy input using an existing pipe
+ } kind;
+ NBAIO_Format format = Format_from_SR_C(halconfig.sample_rate,
+ audio_channel_count_from_in_mask(halconfig.channel_mask), halconfig.format);
+ if (!mTeeSinkInputEnabled) {
+ kind = TEE_SINK_NO;
+ } else if (!Format_isValid(format)) {
+ kind = TEE_SINK_NO;
+ } else if (mRecordTeeSink == 0) {
+ kind = TEE_SINK_NEW;
+ } else if (mRecordTeeSink->getStrongCount() != 1) {
+ kind = TEE_SINK_NO;
+ } else if (Format_isEqual(format, mRecordTeeSink->format())) {
+ kind = TEE_SINK_OLD;
+ } else {
+ kind = TEE_SINK_NEW;
+ }
+ switch (kind) {
+ case TEE_SINK_NEW: {
+ Pipe *pipe = new Pipe(mTeeSinkInputFrames, format);
+ size_t numCounterOffers = 0;
+ const NBAIO_Format offers[1] = {format};
+ ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ PipeReader *pipeReader = new PipeReader(*pipe);
+ numCounterOffers = 0;
+ index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
+ ALOG_ASSERT(index == 0);
+ mRecordTeeSink = pipe;
+ mRecordTeeSource = pipeReader;
+ teeSink = pipe;
+ }
+ break;
+ case TEE_SINK_OLD:
+ teeSink = mRecordTeeSink;
+ break;
+ case TEE_SINK_NO:
+ default:
+ break;
+ }
#endif
- );
- mRecordThreads.add(*input, thread);
- ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
- return thread;
+
+ // Start record thread
+ // RecordThread requires both input and output device indication to forward to audio
+ // pre processing modules
+ sp<RecordThread> thread = new RecordThread(this,
+ inputStream,
+ *input,
+ primaryOutputDevice_l(),
+ devices,
+ mSystemReady
+#ifdef TEE_SINK
+ , teeSink
+#endif
+ );
+ mRecordThreads.add(*input, thread);
+ ALOGV("openInput_l() created record thread: ID %d thread %p", *input, thread.get());
+ return thread;
+ }
}
*input = AUDIO_IO_HANDLE_NONE;
@@ -2240,60 +2384,73 @@
{
// keep strong reference on the record thread so that
// it is not destroyed while exit() is executed
- sp<RecordThread> thread;
+ sp<RecordThread> recordThread;
+ sp<MmapCaptureThread> mmapThread;
{
Mutex::Autolock _l(mLock);
- thread = checkRecordThread_l(input);
- if (thread == 0) {
- return BAD_VALUE;
- }
+ recordThread = checkRecordThread_l(input);
+ if (recordThread != 0) {
+ ALOGV("closeInput() %d", input);
- ALOGV("closeInput() %d", input);
-
- // If we still have effect chains, it means that a client still holds a handle
- // on at least one effect. We must either move the chain to an existing thread with the
- // same session ID or put it aside in case a new record thread is opened for a
- // new capture on the same session
- sp<EffectChain> chain;
- {
- Mutex::Autolock _sl(thread->mLock);
- Vector< sp<EffectChain> > effectChains = thread->getEffectChains_l();
- // Note: maximum one chain per record thread
- if (effectChains.size() != 0) {
- chain = effectChains[0];
- }
- }
- if (chain != 0) {
- // first check if a record thread is already opened with a client on the same session.
- // This should only happen in case of overlap between one thread tear down and the
- // creation of its replacement
- size_t i;
- for (i = 0; i < mRecordThreads.size(); i++) {
- sp<RecordThread> t = mRecordThreads.valueAt(i);
- if (t == thread) {
- continue;
- }
- if (t->hasAudioSession(chain->sessionId()) != 0) {
- Mutex::Autolock _l(t->mLock);
- ALOGV("closeInput() found thread %d for effect session %d",
- t->id(), chain->sessionId());
- t->addEffectChain_l(chain);
- break;
+ // If we still have effect chains, it means that a client still holds a handle
+ // on at least one effect. We must either move the chain to an existing thread with the
+ // same session ID or put it aside in case a new record thread is opened for a
+ // new capture on the same session
+ sp<EffectChain> chain;
+ {
+ Mutex::Autolock _sl(recordThread->mLock);
+ Vector< sp<EffectChain> > effectChains = recordThread->getEffectChains_l();
+ // Note: maximum one chain per record thread
+ if (effectChains.size() != 0) {
+ chain = effectChains[0];
}
}
- // put the chain aside if we could not find a record thread with the same session id.
- if (i == mRecordThreads.size()) {
- putOrphanEffectChain_l(chain);
+ if (chain != 0) {
+ // first check if a record thread is already opened with a client on same session.
+ // This should only happen in case of overlap between one thread tear down and the
+ // creation of its replacement
+ size_t i;
+ for (i = 0; i < mRecordThreads.size(); i++) {
+ sp<RecordThread> t = mRecordThreads.valueAt(i);
+ if (t == recordThread) {
+ continue;
+ }
+ if (t->hasAudioSession(chain->sessionId()) != 0) {
+ Mutex::Autolock _l(t->mLock);
+ ALOGV("closeInput() found thread %d for effect session %d",
+ t->id(), chain->sessionId());
+ t->addEffectChain_l(chain);
+ break;
+ }
+ }
+ // put the chain aside if we could not find a record thread with the same session id
+ if (i == mRecordThreads.size()) {
+ putOrphanEffectChain_l(chain);
+ }
}
+ mRecordThreads.removeItem(input);
+ } else {
+ mmapThread = (MmapCaptureThread *)checkMmapThread_l(input);
+ if (mmapThread == 0) {
+ return BAD_VALUE;
+ }
+ mMmapThreads.removeItem(input);
}
const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
ioDesc->mIoHandle = input;
ioConfigChanged(AUDIO_INPUT_CLOSED, ioDesc);
- mRecordThreads.removeItem(input);
}
// FIXME: calling thread->exit() without mLock held should not be needed anymore now that
// we have a different lock for notification client
- closeInputFinish(thread);
+ if (recordThread != 0) {
+ closeInputFinish(recordThread);
+ } else if (mmapThread != 0) {
+ mmapThread->exit();
+ AudioStreamIn *in = mmapThread->clearInput();
+ ALOG_ASSERT(in != NULL, "in shouldn't be NULL");
+ // from now on thread->mInput is NULL
+ delete in;
+ }
return NO_ERROR;
}
@@ -2303,7 +2460,6 @@
AudioStreamIn *in = thread->clearInput();
ALOG_ASSERT(in != NULL, "in shouldn't be NULL");
// from now on thread->mInput is NULL
- in->hwDev()->close_input_stream(in->hwDev(), in->stream);
delete in;
}
@@ -2322,7 +2478,9 @@
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
thread->invalidateTracks(stream);
}
-
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ mMmapThreads[i]->invalidateTracks(stream);
+ }
return NO_ERROR;
}
@@ -2472,16 +2630,18 @@
// checkThread_l() must be called with AudioFlinger::mLock held
AudioFlinger::ThreadBase *AudioFlinger::checkThread_l(audio_io_handle_t ioHandle) const
{
- ThreadBase *thread = NULL;
- switch (audio_unique_id_get_use(ioHandle)) {
- case AUDIO_UNIQUE_ID_USE_OUTPUT:
- thread = checkPlaybackThread_l(ioHandle);
- break;
- case AUDIO_UNIQUE_ID_USE_INPUT:
- thread = checkRecordThread_l(ioHandle);
- break;
- default:
- break;
+ ThreadBase *thread = checkMmapThread_l(ioHandle);
+ if (thread == 0) {
+ switch (audio_unique_id_get_use(ioHandle)) {
+ case AUDIO_UNIQUE_ID_USE_OUTPUT:
+ thread = checkPlaybackThread_l(ioHandle);
+ break;
+ case AUDIO_UNIQUE_ID_USE_INPUT:
+ thread = checkRecordThread_l(ioHandle);
+ break;
+ default:
+ break;
+ }
}
return thread;
}
@@ -2505,6 +2665,46 @@
return mRecordThreads.valueFor(input).get();
}
+// checkMmapThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::MmapThread *AudioFlinger::checkMmapThread_l(audio_io_handle_t io) const
+{
+ return mMmapThreads.valueFor(io).get();
+}
+
+
+// checkPlaybackThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::VolumeInterface *AudioFlinger::getVolumeInterface_l(audio_io_handle_t output) const
+{
+ VolumeInterface *volumeInterface = mPlaybackThreads.valueFor(output).get();
+ if (volumeInterface == nullptr) {
+ MmapThread *mmapThread = mMmapThreads.valueFor(output).get();
+ if (mmapThread != nullptr) {
+ if (mmapThread->isOutput()) {
+ MmapPlaybackThread *mmapPlaybackThread =
+ static_cast<MmapPlaybackThread *>(mmapThread);
+ volumeInterface = mmapPlaybackThread;
+ }
+ }
+ }
+ return volumeInterface;
+}
+
+Vector <AudioFlinger::VolumeInterface *> AudioFlinger::getAllVolumeInterfaces_l() const
+{
+ Vector <VolumeInterface *> volumeInterfaces;
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ volumeInterfaces.add(mPlaybackThreads.valueAt(i).get());
+ }
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ if (mMmapThreads.valueAt(i)->isOutput()) {
+ MmapPlaybackThread *mmapPlaybackThread =
+ static_cast<MmapPlaybackThread *>(mMmapThreads.valueAt(i).get());
+ volumeInterfaces.add(mmapPlaybackThread);
+ }
+ }
+ return volumeInterfaces;
+}
+
audio_unique_id_t AudioFlinger::nextUniqueId(audio_unique_id_use_t use)
{
// This is the internal API, so it is OK to assert on bad parameter.
@@ -2607,24 +2807,39 @@
// Effect management
// ----------------------------------------------------------------------------
+sp<EffectsFactoryHalInterface> AudioFlinger::getEffectsFactory() {
+ return mEffectsFactoryHal;
+}
status_t AudioFlinger::queryNumberEffects(uint32_t *numEffects) const
{
Mutex::Autolock _l(mLock);
- return EffectQueryNumberEffects(numEffects);
+ if (mEffectsFactoryHal.get()) {
+ return mEffectsFactoryHal->queryNumberEffects(numEffects);
+ } else {
+ return -ENODEV;
+ }
}
status_t AudioFlinger::queryEffect(uint32_t index, effect_descriptor_t *descriptor) const
{
Mutex::Autolock _l(mLock);
- return EffectQueryEffect(index, descriptor);
+ if (mEffectsFactoryHal.get()) {
+ return mEffectsFactoryHal->getDescriptor(index, descriptor);
+ } else {
+ return -ENODEV;
+ }
}
status_t AudioFlinger::getEffectDescriptor(const effect_uuid_t *pUuid,
effect_descriptor_t *descriptor) const
{
Mutex::Autolock _l(mLock);
- return EffectGetDescriptor(pUuid, descriptor);
+ if (mEffectsFactoryHal.get()) {
+ return mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
+ } else {
+ return -ENODEV;
+ }
}
@@ -2635,6 +2850,7 @@
audio_io_handle_t io,
audio_session_t sessionId,
const String16& opPackageName,
+ pid_t pid,
status_t *status,
int *id,
int *enabled)
@@ -2643,9 +2859,17 @@
sp<EffectHandle> handle;
effect_descriptor_t desc;
- pid_t pid = IPCThreadState::self()->getCallingPid();
- ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d",
- pid, effectClient.get(), priority, sessionId, io);
+ const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+ if (pid == -1 || !isTrustedCallingUid(callingUid)) {
+ const pid_t callingPid = IPCThreadState::self()->getCallingPid();
+ ALOGW_IF(pid != -1 && pid != callingPid,
+ "%s uid %d pid %d tried to pass itself off as pid %d",
+ __func__, callingUid, callingPid, pid);
+ pid = callingPid;
+ }
+
+ ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d, factory %p",
+ pid, effectClient.get(), priority, sessionId, io, mEffectsFactoryHal.get());
if (pDesc == NULL) {
lStatus = BAD_VALUE;
@@ -2665,10 +2889,15 @@
goto Exit;
}
+ if (mEffectsFactoryHal == 0) {
+ lStatus = NO_INIT;
+ goto Exit;
+ }
+
{
- if (!EffectIsNullUuid(&pDesc->uuid)) {
+ if (!EffectsFactoryHalInterface::isNullUuid(&pDesc->uuid)) {
// if uuid is specified, request effect descriptor
- lStatus = EffectGetDescriptor(&pDesc->uuid, &desc);
+ lStatus = mEffectsFactoryHal->getDescriptor(&pDesc->uuid, &desc);
if (lStatus < 0) {
ALOGW("createEffect() error %d from EffectGetDescriptor", lStatus);
goto Exit;
@@ -2676,7 +2905,7 @@
} else {
// if uuid is not specified, look for an available implementation
// of the required type in effect factory
- if (EffectIsNullUuid(&pDesc->type)) {
+ if (EffectsFactoryHalInterface::isNullUuid(&pDesc->type)) {
ALOGW("createEffect() no effect type");
lStatus = BAD_VALUE;
goto Exit;
@@ -2686,13 +2915,13 @@
d.flags = 0; // prevent compiler warning
bool found = false;
- lStatus = EffectQueryNumberEffects(&numEffects);
+ lStatus = mEffectsFactoryHal->queryNumberEffects(&numEffects);
if (lStatus < 0) {
ALOGW("createEffect() error %d from EffectQueryNumberEffects", lStatus);
goto Exit;
}
for (uint32_t i = 0; i < numEffects; i++) {
- lStatus = EffectQueryEffect(i, &desc);
+ lStatus = mEffectsFactoryHal->getDescriptor(i, &desc);
if (lStatus < 0) {
ALOGW("createEffect() error %d from EffectQueryEffect", lStatus);
continue;
@@ -2767,7 +2996,7 @@
break;
}
}
- if (io == 0) {
+ if (io == AUDIO_IO_HANDLE_NONE) {
for (size_t i = 0; i < mRecordThreads.size(); i++) {
if (mRecordThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
io = mRecordThreads.keyAt(i);
@@ -2775,6 +3004,14 @@
}
}
}
+ if (io == AUDIO_IO_HANDLE_NONE) {
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ if (mMmapThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
+ io = mMmapThreads.keyAt(i);
+ break;
+ }
+ }
+ }
// If no output thread contains the requested session ID, default to
// first output. The effect chain will be moved to the correct output
// thread when a track with the same session ID is created
@@ -2787,9 +3024,12 @@
if (thread == NULL) {
thread = checkPlaybackThread_l(io);
if (thread == NULL) {
- ALOGE("createEffect() unknown output thread");
- lStatus = BAD_VALUE;
- goto Exit;
+ thread = checkMmapThread_l(io);
+ if (thread == NULL) {
+ ALOGE("createEffect() unknown output thread");
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
}
} else {
// Check if one effect chain was awaiting for an effect to be created on this
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 8f5a7cd..75b4e4c 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -19,11 +19,14 @@
#define ANDROID_AUDIO_FLINGER_H
#include "Configuration.h"
+#include <deque>
+#include <map>
#include <stdint.h>
#include <sys/types.h>
#include <limits.h>
#include <cutils/compiler.h>
+#include <cutils/properties.h>
#include <media/IAudioFlinger.h>
#include <media/IAudioFlingerClient.h>
@@ -31,6 +34,8 @@
#include <media/IAudioRecord.h>
#include <media/AudioSystem.h>
#include <media/AudioTrack.h>
+#include <media/MmapStreamInterface.h>
+#include <media/MmapStreamCallback.h>
#include <utils/Atomic.h>
#include <utils/Errors.h>
@@ -43,21 +48,25 @@
#include <binder/MemoryDealer.h>
#include <system/audio.h>
-#include <hardware/audio.h>
-#include <hardware/audio_policy.h>
+#include <system/audio_policy.h>
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
#include <media/AudioBufferProvider.h>
+#include <media/AudioMixer.h>
#include <media/ExtendedAudioBufferProvider.h>
+#include <media/LinearMap.h>
+#include <media/VolumeShaper.h>
+
+#include <audio_utils/SimpleLog.h>
#include "FastCapture.h"
#include "FastMixer.h"
#include <media/nbaio/NBAIO.h>
#include "AudioWatchdog.h"
-#include "AudioMixer.h"
#include "AudioStreamOut.h"
#include "SpdifStreamOut.h"
#include "AudioHwDevice.h"
-#include "LinearMap.h"
#include <powermanager/IPowerManager.h>
@@ -71,8 +80,12 @@
class AudioMixer;
class AudioBuffer;
class AudioResampler;
+class DeviceHalInterface;
+class DevicesFactoryHalInterface;
+class EffectsFactoryHalInterface;
class FastMixer;
class PassthruBufferProvider;
+class RecordBufferConverter;
class ServerProxy;
// ----------------------------------------------------------------------------
@@ -92,6 +105,7 @@
public BnAudioFlinger
{
friend class BinderService<AudioFlinger>; // for AudioFlinger()
+
public:
static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
@@ -111,7 +125,8 @@
pid_t tid,
audio_session_t *sessionId,
int clientUid,
- status_t *status /*non-NULL*/);
+ status_t *status /*non-NULL*/,
+ audio_port_handle_t portId);
virtual sp<IAudioRecord> openRecord(
audio_io_handle_t input,
@@ -128,7 +143,8 @@
size_t *notificationFrames,
sp<IMemory>& cblk,
sp<IMemory>& buffers,
- status_t *status /*non-NULL*/);
+ status_t *status /*non-NULL*/,
+ audio_port_handle_t portId);
virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const;
virtual audio_format_t format(audio_io_handle_t output) const;
@@ -220,6 +236,7 @@
audio_io_handle_t io,
audio_session_t sessionId,
const String16& opPackageName,
+ pid_t pid,
status_t *status /*non-NULL*/,
int *id,
int *enabled);
@@ -271,6 +288,15 @@
sp<NBLog::Writer> newWriter_l(size_t size, const char *name);
void unregisterWriter(const sp<NBLog::Writer>& writer);
+ sp<EffectsFactoryHalInterface> getEffectsFactory();
+
+ status_t openMmapStream(MmapStreamInterface::stream_direction_t direction,
+ const audio_attributes_t *attr,
+ audio_config_base_t *config,
+ const MmapStreamInterface::Client& client,
+ audio_port_handle_t *deviceId,
+ const sp<MmapStreamCallback>& callback,
+ sp<MmapStreamInterface>& interface);
private:
static const size_t kLogMemorySize = 40 * 1024;
sp<MemoryDealer> mLogMemoryDealer; // == 0 when NBLog is disabled
@@ -278,6 +304,7 @@
// for as long as possible. The memory is only freed when it is needed for another log writer.
Vector< sp<NBLog::Writer> > mUnregisteredWriters;
Mutex mUnregisteredWritersLock;
+
public:
class SyncEvent;
@@ -447,6 +474,38 @@
const sp<IAudioFlingerClient> mAudioFlingerClient;
};
+ // --- MediaLogNotifier ---
+ // Thread in charge of notifying MediaLogService to start merging.
+ // Receives requests from AudioFlinger's binder activity. It is used to reduce the amount of
+ // binder calls to MediaLogService in case of bursts of AudioFlinger binder calls.
+ class MediaLogNotifier : public Thread {
+ public:
+ MediaLogNotifier();
+
+ // Requests a MediaLogService notification. It's ignored if there has recently been another
+ void requestMerge();
+ private:
+ // Every iteration blocks waiting for a request, then interacts with MediaLogService to
+ // start merging.
+ // As every MediaLogService binder call is expensive, once it gets a request it ignores the
+ // following ones for a period of time.
+ virtual bool threadLoop() override;
+
+ bool mPendingRequests;
+
+ // Mutex and condition variable around mPendingRequests' value
+ Mutex mMutex;
+ Condition mCond;
+
+ // Duration of the sleep period after a processed request
+ static const int kPostTriggerSleepPeriod = 1000000;
+ };
+
+ const sp<MediaLogNotifier> mMediaLogNotifier;
+
+ // This is a helper that is called during incoming binder calls.
+ void requestLogMerge();
+
class TrackHandle;
class RecordHandle;
class RecordThread;
@@ -494,6 +553,10 @@
virtual void pause();
virtual status_t attachAuxEffect(int effectId);
virtual status_t setParameters(const String8& keyValuePairs);
+ virtual VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) override;
+ virtual sp<VolumeShaper::State> getVolumeShaperState(int id) override;
virtual status_t getTimestamp(AudioTimestamp& timestamp);
virtual void signal(); // signal playback thread for a change in control block
@@ -521,19 +584,42 @@
void stop_nonvirtual();
};
+ // Mmap stream control interface implementation. Each MmapThreadHandle controls one
+ // MmapPlaybackThread or MmapCaptureThread instance.
+ class MmapThreadHandle : public MmapStreamInterface {
+ public:
+ explicit MmapThreadHandle(const sp<MmapThread>& thread);
+ virtual ~MmapThreadHandle();
+
+ // MmapStreamInterface virtuals
+ virtual status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+ virtual status_t getMmapPosition(struct audio_mmap_position *position);
+ virtual status_t start(const MmapStreamInterface::Client& client,
+ audio_port_handle_t *handle);
+ virtual status_t stop(audio_port_handle_t handle);
+ virtual status_t standby();
+
+ private:
+ sp<MmapThread> mThread;
+ };
ThreadBase *checkThread_l(audio_io_handle_t ioHandle) const;
PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
- sp<RecordThread> openInput_l(audio_module_handle_t module,
+ MmapThread *checkMmapThread_l(audio_io_handle_t io) const;
+ VolumeInterface *getVolumeInterface_l(audio_io_handle_t output) const;
+ Vector <VolumeInterface *> getAllVolumeInterfaces_l() const;
+
+ sp<ThreadBase> openInput_l(audio_module_handle_t module,
audio_io_handle_t *input,
audio_config_t *config,
audio_devices_t device,
const String8& address,
audio_source_t source,
audio_input_flags_t flags);
- sp<PlaybackThread> openOutput_l(audio_module_handle_t module,
+ sp<ThreadBase> openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
audio_devices_t devices,
@@ -612,12 +698,12 @@
struct AudioStreamIn {
AudioHwDevice* const audioHwDev;
- audio_stream_in_t* const stream;
+ sp<StreamInHalInterface> stream;
audio_input_flags_t flags;
- audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
+ sp<DeviceHalInterface> hwDev() const { return audioHwDev->hwDevice(); }
- AudioStreamIn(AudioHwDevice *dev, audio_stream_in_t *in, audio_input_flags_t flags) :
+ AudioStreamIn(AudioHwDevice *dev, sp<StreamInHalInterface> in, audio_input_flags_t flags) :
audioHwDev(dev), stream(in), flags(flags) {}
};
@@ -646,6 +732,8 @@
AudioHwDevice* mPrimaryHardwareDev; // mAudioHwDevs[0] or NULL
DefaultKeyedVector<audio_module_handle_t, AudioHwDevice*> mAudioHwDevs;
+ sp<DevicesFactoryHalInterface> mDevicesFactoryHal;
+
// for dump, indicates which hardware operation is currently in progress (but not stream ops)
enum hardware_call_state {
AUDIO_HW_IDLE = 0, // no operation in progress
@@ -708,6 +796,12 @@
// list of sessions for which a valid HW A/V sync ID was retrieved from the HAL
DefaultKeyedVector< audio_session_t , audio_hw_sync_t >mHwAvSyncIds;
+
+ // list of MMAP stream control threads. Those threads allow for wake lock, routing
+ // and volume control for activity on the associated MMAP stream at the HAL.
+ // Audio data transfer is directly handled by the client creating the MMAP stream
+ DefaultKeyedVector< audio_io_handle_t, sp<MmapThread> > mMmapThreads;
+
private:
sp<Client> registerPid(pid_t pid); // always returns non-0
@@ -761,16 +855,17 @@
nsecs_t mGlobalEffectEnableTime; // when a global effect was last enabled
sp<PatchPanel> mPatchPanel;
+ sp<EffectsFactoryHalInterface> mEffectsFactoryHal;
bool mSystemReady;
};
#undef INCLUDING_FROM_AUDIOFLINGER_H
-const char *formatToString(audio_format_t format);
-String8 inputFlagsToString(audio_input_flags_t flags);
-String8 outputFlagsToString(audio_output_flags_t flags);
-String8 devicesToString(audio_devices_t devices);
+std::string formatToString(audio_format_t format);
+std::string inputFlagsToString(audio_input_flags_t flags);
+std::string outputFlagsToString(audio_output_flags_t flags);
+std::string devicesToString(audio_devices_t devices);
const char *sourceToString(audio_source_t source);
// ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 7494930..b109d06 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -18,7 +18,7 @@
#define LOG_TAG "AudioHwDevice"
//#define LOG_NDEBUG 0
-#include <hardware/audio.h>
+#include <system/audio.h>
#include <utils/Log.h>
#include <audio_utils/spdif/SPDIFEncoder.h>
@@ -93,5 +93,10 @@
return status;
}
+bool AudioHwDevice::supportsAudioPatches() const {
+ bool result;
+ return mHwDevice->supportsAudioPatches(&result) == OK ? result : false;
+}
+
}; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index b9f65c1..eb826c6 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -22,11 +22,10 @@
#include <stdlib.h>
#include <sys/types.h>
-#include <hardware/audio.h>
+#include <media/audiohal/DeviceHalInterface.h>
#include <utils/Errors.h>
#include <system/audio.h>
-
namespace android {
class AudioStreamOut;
@@ -40,7 +39,7 @@
AudioHwDevice(audio_module_handle_t handle,
const char *moduleName,
- audio_hw_device_t *hwDevice,
+ sp<DeviceHalInterface> hwDevice,
Flags flags)
: mHandle(handle)
, mModuleName(strdup(moduleName))
@@ -58,8 +57,7 @@
audio_module_handle_t handle() const { return mHandle; }
const char *moduleName() const { return mModuleName; }
- audio_hw_device_t *hwDevice() const { return mHwDevice; }
- uint32_t version() const { return mHwDevice->common.version; }
+ sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
/** This method creates and opens the audio hardware output stream.
* The "address" parameter qualifies the "devices" audio device type if needed.
@@ -76,10 +74,12 @@
struct audio_config *config,
const char *address);
+ bool supportsAudioPatches() const;
+
private:
const audio_module_handle_t mHandle;
const char * const mModuleName;
- audio_hw_device_t * const mHwDevice;
+ sp<DeviceHalInterface> mHwDevice;
const Flags mFlags;
};
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
deleted file mode 100644
index 41065ae..0000000
--- a/services/audioflinger/AudioMixer.cpp
+++ /dev/null
@@ -1,2085 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "AudioMixer"
-//#define LOG_NDEBUG 0
-
-#include "Configuration.h"
-#include <stdint.h>
-#include <string.h>
-#include <stdlib.h>
-#include <math.h>
-#include <sys/types.h>
-
-#include <utils/Errors.h>
-#include <utils/Log.h>
-
-#include <cutils/compiler.h>
-#include <utils/Debug.h>
-
-#include <system/audio.h>
-
-#include <audio_utils/primitives.h>
-#include <audio_utils/format.h>
-
-#include "AudioMixerOps.h"
-#include "AudioMixer.h"
-
-// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer.
-#ifndef FCC_2
-#define FCC_2 2
-#endif
-
-// Look for MONO_HACK for any Mono hack involving legacy mono channel to
-// stereo channel conversion.
-
-/* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is
- * being used. This is a considerable amount of log spam, so don't enable unless you
- * are verifying the hook based code.
- */
-//#define VERY_VERY_VERBOSE_LOGGING
-#ifdef VERY_VERY_VERBOSE_LOGGING
-#define ALOGVV ALOGV
-//define ALOGVV printf // for test-mixer.cpp
-#else
-#define ALOGVV(a...) do { } while (0)
-#endif
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
-#endif
-
-// TODO: Move these macro/inlines to a header file.
-template <typename T>
-static inline
-T max(const T& x, const T& y) {
- return x > y ? x : y;
-}
-
-// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
-// original code will be used for stereo sinks, the new mixer for multichannel.
-static const bool kUseNewMixer = true;
-
-// Set kUseFloat to true to allow floating input into the mixer engine.
-// If kUseNewMixer is false, this is ignored or may be overridden internally
-// because of downmix/upmix support.
-static const bool kUseFloat = true;
-
-// Set to default copy buffer size in frames for input processing.
-static const size_t kCopyBufferFrameCount = 256;
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-template <typename T>
-T min(const T& a, const T& b)
-{
- return a < b ? a : b;
-}
-
-// ----------------------------------------------------------------------------
-
-// Ensure mConfiguredNames bitmask is initialized properly on all architectures.
-// The value of 1 << x is undefined in C when x >= 32.
-
-AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate, uint32_t maxNumTracks)
- : mTrackNames(0), mConfiguredNames((maxNumTracks >= 32 ? 0 : 1 << maxNumTracks) - 1),
- mSampleRate(sampleRate)
-{
- ALOG_ASSERT(maxNumTracks <= MAX_NUM_TRACKS, "maxNumTracks %u > MAX_NUM_TRACKS %u",
- maxNumTracks, MAX_NUM_TRACKS);
-
- // AudioMixer is not yet capable of more than 32 active track inputs
- ALOG_ASSERT(32 >= MAX_NUM_TRACKS, "bad MAX_NUM_TRACKS %d", MAX_NUM_TRACKS);
-
- pthread_once(&sOnceControl, &sInitRoutine);
-
- mState.enabledTracks= 0;
- mState.needsChanged = 0;
- mState.frameCount = frameCount;
- mState.hook = process__nop;
- mState.outputTemp = NULL;
- mState.resampleTemp = NULL;
- mState.mLog = &mDummyLog;
- // mState.reserved
-
- // FIXME Most of the following initialization is probably redundant since
- // tracks[i] should only be referenced if (mTrackNames & (1 << i)) != 0
- // and mTrackNames is initially 0. However, leave it here until that's verified.
- track_t* t = mState.tracks;
- for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
- t->resampler = NULL;
- t->downmixerBufferProvider = NULL;
- t->mReformatBufferProvider = NULL;
- t->mTimestretchBufferProvider = NULL;
- t++;
- }
-
-}
-
-AudioMixer::~AudioMixer()
-{
- track_t* t = mState.tracks;
- for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
- delete t->resampler;
- delete t->downmixerBufferProvider;
- delete t->mReformatBufferProvider;
- delete t->mTimestretchBufferProvider;
- t++;
- }
- delete [] mState.outputTemp;
- delete [] mState.resampleTemp;
-}
-
-void AudioMixer::setLog(NBLog::Writer *log)
-{
- mState.mLog = log;
-}
-
-static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
- return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
-}
-
-int AudioMixer::getTrackName(audio_channel_mask_t channelMask,
- audio_format_t format, int sessionId)
-{
- if (!isValidPcmTrackFormat(format)) {
- ALOGE("AudioMixer::getTrackName invalid format (%#x)", format);
- return -1;
- }
- uint32_t names = (~mTrackNames) & mConfiguredNames;
- if (names != 0) {
- int n = __builtin_ctz(names);
- ALOGV("add track (%d)", n);
- // assume default parameters for the track, except where noted below
- track_t* t = &mState.tracks[n];
- t->needs = 0;
-
- // Integer volume.
- // Currently integer volume is kept for the legacy integer mixer.
- // Will be removed when the legacy mixer path is removed.
- t->volume[0] = UNITY_GAIN_INT;
- t->volume[1] = UNITY_GAIN_INT;
- t->prevVolume[0] = UNITY_GAIN_INT << 16;
- t->prevVolume[1] = UNITY_GAIN_INT << 16;
- t->volumeInc[0] = 0;
- t->volumeInc[1] = 0;
- t->auxLevel = 0;
- t->auxInc = 0;
- t->prevAuxLevel = 0;
-
- // Floating point volume.
- t->mVolume[0] = UNITY_GAIN_FLOAT;
- t->mVolume[1] = UNITY_GAIN_FLOAT;
- t->mPrevVolume[0] = UNITY_GAIN_FLOAT;
- t->mPrevVolume[1] = UNITY_GAIN_FLOAT;
- t->mVolumeInc[0] = 0.;
- t->mVolumeInc[1] = 0.;
- t->mAuxLevel = 0.;
- t->mAuxInc = 0.;
- t->mPrevAuxLevel = 0.;
-
- // no initialization needed
- // t->frameCount
- t->channelCount = audio_channel_count_from_out_mask(channelMask);
- t->enabled = false;
- ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
- "Non-stereo channel mask: %d\n", channelMask);
- t->channelMask = channelMask;
- t->sessionId = sessionId;
- // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
- t->bufferProvider = NULL;
- t->buffer.raw = NULL;
- // no initialization needed
- // t->buffer.frameCount
- t->hook = NULL;
- t->in = NULL;
- t->resampler = NULL;
- t->sampleRate = mSampleRate;
- // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
- t->mainBuffer = NULL;
- t->auxBuffer = NULL;
- t->mInputBufferProvider = NULL;
- t->mReformatBufferProvider = NULL;
- t->downmixerBufferProvider = NULL;
- t->mPostDownmixReformatBufferProvider = NULL;
- t->mTimestretchBufferProvider = NULL;
- t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
- t->mFormat = format;
- t->mMixerInFormat = selectMixerInFormat(format);
- t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
- t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
- AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
- t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
- t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
- // Check the downmixing (or upmixing) requirements.
- status_t status = t->prepareForDownmix();
- if (status != OK) {
- ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
- return -1;
- }
- // prepareForDownmix() may change mDownmixRequiresFormat
- ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
- t->prepareForReformat();
- mTrackNames |= 1 << n;
- return TRACK0 + n;
- }
- ALOGE("AudioMixer::getTrackName out of available tracks");
- return -1;
-}
-
-void AudioMixer::invalidateState(uint32_t mask)
-{
- if (mask != 0) {
- mState.needsChanged |= mask;
- mState.hook = process__validate;
- }
- }
-
-// Called when channel masks have changed for a track name
-// TODO: Fix DownmixerBufferProvider not to (possibly) change mixer input format,
-// which will simplify this logic.
-bool AudioMixer::setChannelMasks(int name,
- audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
- track_t &track = mState.tracks[name];
-
- if (trackChannelMask == track.channelMask
- && mixerChannelMask == track.mMixerChannelMask) {
- return false; // no need to change
- }
- // always recompute for both channel masks even if only one has changed.
- const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
- const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
- const bool mixerChannelCountChanged = track.mMixerChannelCount != mixerChannelCount;
-
- ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX)
- && trackChannelCount
- && mixerChannelCount);
- track.channelMask = trackChannelMask;
- track.channelCount = trackChannelCount;
- track.mMixerChannelMask = mixerChannelMask;
- track.mMixerChannelCount = mixerChannelCount;
-
- // channel masks have changed, does this track need a downmixer?
- // update to try using our desired format (if we aren't already using it)
- const audio_format_t prevDownmixerFormat = track.mDownmixRequiresFormat;
- const status_t status = mState.tracks[name].prepareForDownmix();
- ALOGE_IF(status != OK,
- "prepareForDownmix error %d, track channel mask %#x, mixer channel mask %#x",
- status, track.channelMask, track.mMixerChannelMask);
-
- if (prevDownmixerFormat != track.mDownmixRequiresFormat) {
- track.prepareForReformat(); // because of downmixer, track format may change!
- }
-
- if (track.resampler && mixerChannelCountChanged) {
- // resampler channels may have changed.
- const uint32_t resetToSampleRate = track.sampleRate;
- delete track.resampler;
- track.resampler = NULL;
- track.sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
- // recreate the resampler with updated format, channels, saved sampleRate.
- track.setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
- }
- return true;
-}
-
-void AudioMixer::track_t::unprepareForDownmix() {
- ALOGV("AudioMixer::unprepareForDownmix(%p)", this);
-
- mDownmixRequiresFormat = AUDIO_FORMAT_INVALID;
- if (downmixerBufferProvider != NULL) {
- // this track had previously been configured with a downmixer, delete it
- ALOGV(" deleting old downmixer");
- delete downmixerBufferProvider;
- downmixerBufferProvider = NULL;
- reconfigureBufferProviders();
- } else {
- ALOGV(" nothing to do, no downmixer to delete");
- }
-}
-
-status_t AudioMixer::track_t::prepareForDownmix()
-{
- ALOGV("AudioMixer::prepareForDownmix(%p) with mask 0x%x",
- this, channelMask);
-
- // discard the previous downmixer if there was one
- unprepareForDownmix();
- // MONO_HACK Only remix (upmix or downmix) if the track and mixer/device channel masks
- // are not the same and not handled internally, as mono -> stereo currently is.
- if (channelMask == mMixerChannelMask
- || (channelMask == AUDIO_CHANNEL_OUT_MONO
- && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
- return NO_ERROR;
- }
- // DownmixerBufferProvider is only used for position masks.
- if (audio_channel_mask_get_representation(channelMask)
- == AUDIO_CHANNEL_REPRESENTATION_POSITION
- && DownmixerBufferProvider::isMultichannelCapable()) {
- DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(channelMask,
- mMixerChannelMask,
- AUDIO_FORMAT_PCM_16_BIT /* TODO: use mMixerInFormat, now only PCM 16 */,
- sampleRate, sessionId, kCopyBufferFrameCount);
-
- if (pDbp->isValid()) { // if constructor completed properly
- mDownmixRequiresFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
- downmixerBufferProvider = pDbp;
- reconfigureBufferProviders();
- return NO_ERROR;
- }
- delete pDbp;
- }
-
- // Effect downmixer does not accept the channel conversion. Let's use our remixer.
- RemixBufferProvider* pRbp = new RemixBufferProvider(channelMask,
- mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount);
- // Remix always finds a conversion whereas Downmixer effect above may fail.
- downmixerBufferProvider = pRbp;
- reconfigureBufferProviders();
- return NO_ERROR;
-}
-
-void AudioMixer::track_t::unprepareForReformat() {
- ALOGV("AudioMixer::unprepareForReformat(%p)", this);
- bool requiresReconfigure = false;
- if (mReformatBufferProvider != NULL) {
- delete mReformatBufferProvider;
- mReformatBufferProvider = NULL;
- requiresReconfigure = true;
- }
- if (mPostDownmixReformatBufferProvider != NULL) {
- delete mPostDownmixReformatBufferProvider;
- mPostDownmixReformatBufferProvider = NULL;
- requiresReconfigure = true;
- }
- if (requiresReconfigure) {
- reconfigureBufferProviders();
- }
-}
-
-status_t AudioMixer::track_t::prepareForReformat()
-{
- ALOGV("AudioMixer::prepareForReformat(%p) with format %#x", this, mFormat);
- // discard previous reformatters
- unprepareForReformat();
- // only configure reformatters as needed
- const audio_format_t targetFormat = mDownmixRequiresFormat != AUDIO_FORMAT_INVALID
- ? mDownmixRequiresFormat : mMixerInFormat;
- bool requiresReconfigure = false;
- if (mFormat != targetFormat) {
- mReformatBufferProvider = new ReformatBufferProvider(
- audio_channel_count_from_out_mask(channelMask),
- mFormat,
- targetFormat,
- kCopyBufferFrameCount);
- requiresReconfigure = true;
- }
- if (targetFormat != mMixerInFormat) {
- mPostDownmixReformatBufferProvider = new ReformatBufferProvider(
- audio_channel_count_from_out_mask(mMixerChannelMask),
- targetFormat,
- mMixerInFormat,
- kCopyBufferFrameCount);
- requiresReconfigure = true;
- }
- if (requiresReconfigure) {
- reconfigureBufferProviders();
- }
- return NO_ERROR;
-}
-
-void AudioMixer::track_t::reconfigureBufferProviders()
-{
- bufferProvider = mInputBufferProvider;
- if (mReformatBufferProvider) {
- mReformatBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mReformatBufferProvider;
- }
- if (downmixerBufferProvider) {
- downmixerBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = downmixerBufferProvider;
- }
- if (mPostDownmixReformatBufferProvider) {
- mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mPostDownmixReformatBufferProvider;
- }
- if (mTimestretchBufferProvider) {
- mTimestretchBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mTimestretchBufferProvider;
- }
-}
-
-void AudioMixer::deleteTrackName(int name)
-{
- ALOGV("AudioMixer::deleteTrackName(%d)", name);
- name -= TRACK0;
- LOG_ALWAYS_FATAL_IF(name < 0 || name >= (int)MAX_NUM_TRACKS, "bad track name %d", name);
- ALOGV("deleteTrackName(%d)", name);
- track_t& track(mState.tracks[ name ]);
- if (track.enabled) {
- track.enabled = false;
- invalidateState(1<<name);
- }
- // delete the resampler
- delete track.resampler;
- track.resampler = NULL;
- // delete the downmixer
- mState.tracks[name].unprepareForDownmix();
- // delete the reformatter
- mState.tracks[name].unprepareForReformat();
- // delete the timestretch provider
- delete track.mTimestretchBufferProvider;
- track.mTimestretchBufferProvider = NULL;
- mTrackNames &= ~(1<<name);
-}
-
-void AudioMixer::enable(int name)
-{
- name -= TRACK0;
- ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
- track_t& track = mState.tracks[name];
-
- if (!track.enabled) {
- track.enabled = true;
- ALOGV("enable(%d)", name);
- invalidateState(1 << name);
- }
-}
-
-void AudioMixer::disable(int name)
-{
- name -= TRACK0;
- ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
- track_t& track = mState.tracks[name];
-
- if (track.enabled) {
- track.enabled = false;
- ALOGV("disable(%d)", name);
- invalidateState(1 << name);
- }
-}
-
-/* Sets the volume ramp variables for the AudioMixer.
- *
- * The volume ramp variables are used to transition from the previous
- * volume to the set volume. ramp controls the duration of the transition.
- * Its value is typically one state framecount period, but may also be 0,
- * meaning "immediate."
- *
- * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment
- * even if there is a nonzero floating point increment (in that case, the volume
- * change is immediate). This restriction should be changed when the legacy mixer
- * is removed (see #2).
- * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed
- * when no longer needed.
- *
- * @param newVolume set volume target in floating point [0.0, 1.0].
- * @param ramp number of frames to increment over. if ramp is 0, the volume
- * should be set immediately. Currently ramp should not exceed 65535 (frames).
- * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return.
- * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return.
- * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return.
- * @param pSetVolume pointer to the float target volume, set on return.
- * @param pPrevVolume pointer to the float previous volume, set on return.
- * @param pVolumeInc pointer to the float increment per output audio frame, set on return.
- * @return true if the volume has changed, false if volume is same.
- */
-static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
- int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
- float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
- // check floating point volume to see if it is identical to the previously
- // set volume.
- // We do not use a tolerance here (and reject changes too small)
- // as it may be confusing to use a different value than the one set.
- // If the resulting volume is too small to ramp, it is a direct set of the volume.
- if (newVolume == *pSetVolume) {
- return false;
- }
- if (newVolume < 0) {
- newVolume = 0; // should not have negative volumes
- } else {
- switch (fpclassify(newVolume)) {
- case FP_SUBNORMAL:
- case FP_NAN:
- newVolume = 0;
- break;
- case FP_ZERO:
- break; // zero volume is fine
- case FP_INFINITE:
- // Infinite volume could be handled consistently since
- // floating point math saturates at infinities,
- // but we limit volume to unity gain float.
- // ramp = 0; break;
- //
- newVolume = AudioMixer::UNITY_GAIN_FLOAT;
- break;
- case FP_NORMAL:
- default:
- // Floating point does not have problems with overflow wrap
- // that integer has. However, we limit the volume to
- // unity gain here.
- // TODO: Revisit the volume limitation and perhaps parameterize.
- if (newVolume > AudioMixer::UNITY_GAIN_FLOAT) {
- newVolume = AudioMixer::UNITY_GAIN_FLOAT;
- }
- break;
- }
- }
-
- // set floating point volume ramp
- if (ramp != 0) {
- // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
- // is no computational mismatch; hence equality is checked here.
- ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
- " prev:%f set_to:%f", *pPrevVolume, *pSetVolume);
- const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
- const float maxv = max(newVolume, *pPrevVolume); // could be inf, cannot be nan, subnormal
-
- if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
- && maxv + inc != maxv) { // inc must make forward progress
- *pVolumeInc = inc;
- // ramp is set now.
- // Note: if newVolume is 0, then near the end of the ramp,
- // it may be possible that the ramped volume may be subnormal or
- // temporarily negative by a small amount or subnormal due to floating
- // point inaccuracies.
- } else {
- ramp = 0; // ramp not allowed
- }
- }
-
- // compute and check integer volume, no need to check negative values
- // The integer volume is limited to "unity_gain" to avoid wrapping and other
- // audio artifacts, so it never reaches the range limit of U4.28.
- // We safely use signed 16 and 32 bit integers here.
- const float scaledVolume = newVolume * AudioMixer::UNITY_GAIN_INT; // not neg, subnormal, nan
- const int32_t intVolume = (scaledVolume >= (float)AudioMixer::UNITY_GAIN_INT) ?
- AudioMixer::UNITY_GAIN_INT : (int32_t)scaledVolume;
-
- // set integer volume ramp
- if (ramp != 0) {
- // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
- // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
- // is no computational mismatch; hence equality is checked here.
- ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
- " prev:%d set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
- const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
-
- if (inc != 0) { // inc must make forward progress
- *pIntVolumeInc = inc;
- } else {
- ramp = 0; // ramp not allowed
- }
- }
-
- // if no ramp, or ramp not allowed, then clear float and integer increments
- if (ramp == 0) {
- *pVolumeInc = 0;
- *pPrevVolume = newVolume;
- *pIntVolumeInc = 0;
- *pIntPrevVolume = intVolume << 16;
- }
- *pSetVolume = newVolume;
- *pIntSetVolume = intVolume;
- return true;
-}
-
-void AudioMixer::setParameter(int name, int target, int param, void *value)
-{
- name -= TRACK0;
- ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
- track_t& track = mState.tracks[name];
-
- int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
- int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
-
- switch (target) {
-
- case TRACK:
- switch (param) {
- case CHANNEL_MASK: {
- const audio_channel_mask_t trackChannelMask =
- static_cast<audio_channel_mask_t>(valueInt);
- if (setChannelMasks(name, trackChannelMask, track.mMixerChannelMask)) {
- ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
- invalidateState(1 << name);
- }
- } break;
- case MAIN_BUFFER:
- if (track.mainBuffer != valueBuf) {
- track.mainBuffer = valueBuf;
- ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
- invalidateState(1 << name);
- }
- break;
- case AUX_BUFFER:
- if (track.auxBuffer != valueBuf) {
- track.auxBuffer = valueBuf;
- ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
- invalidateState(1 << name);
- }
- break;
- case FORMAT: {
- audio_format_t format = static_cast<audio_format_t>(valueInt);
- if (track.mFormat != format) {
- ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
- track.mFormat = format;
- ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
- track.prepareForReformat();
- invalidateState(1 << name);
- }
- } break;
- // FIXME do we want to support setting the downmix type from AudioFlinger?
- // for a specific track? or per mixer?
- /* case DOWNMIX_TYPE:
- break */
- case MIXER_FORMAT: {
- audio_format_t format = static_cast<audio_format_t>(valueInt);
- if (track.mMixerFormat != format) {
- track.mMixerFormat = format;
- ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
- }
- } break;
- case MIXER_CHANNEL_MASK: {
- const audio_channel_mask_t mixerChannelMask =
- static_cast<audio_channel_mask_t>(valueInt);
- if (setChannelMasks(name, track.channelMask, mixerChannelMask)) {
- ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
- invalidateState(1 << name);
- }
- } break;
- default:
- LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
- }
- break;
-
- case RESAMPLE:
- switch (param) {
- case SAMPLE_RATE:
- ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
- if (track.setResampler(uint32_t(valueInt), mSampleRate)) {
- ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
- uint32_t(valueInt));
- invalidateState(1 << name);
- }
- break;
- case RESET:
- track.resetResampler();
- invalidateState(1 << name);
- break;
- case REMOVE:
- delete track.resampler;
- track.resampler = NULL;
- track.sampleRate = mSampleRate;
- invalidateState(1 << name);
- break;
- default:
- LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
- }
- break;
-
- case RAMP_VOLUME:
- case VOLUME:
- switch (param) {
- case AUXLEVEL:
- if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
- target == RAMP_VOLUME ? mState.frameCount : 0,
- &track.auxLevel, &track.prevAuxLevel, &track.auxInc,
- &track.mAuxLevel, &track.mPrevAuxLevel, &track.mAuxInc)) {
- ALOGV("setParameter(%s, AUXLEVEL: %04x)",
- target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track.auxLevel);
- invalidateState(1 << name);
- }
- break;
- default:
- if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
- if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
- target == RAMP_VOLUME ? mState.frameCount : 0,
- &track.volume[param - VOLUME0], &track.prevVolume[param - VOLUME0],
- &track.volumeInc[param - VOLUME0],
- &track.mVolume[param - VOLUME0], &track.mPrevVolume[param - VOLUME0],
- &track.mVolumeInc[param - VOLUME0])) {
- ALOGV("setParameter(%s, VOLUME%d: %04x)",
- target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
- track.volume[param - VOLUME0]);
- invalidateState(1 << name);
- }
- } else {
- LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
- }
- }
- break;
- case TIMESTRETCH:
- switch (param) {
- case PLAYBACK_RATE: {
- const AudioPlaybackRate *playbackRate =
- reinterpret_cast<AudioPlaybackRate*>(value);
- ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
- "bad parameters speed %f, pitch %f",playbackRate->mSpeed,
- playbackRate->mPitch);
- if (track.setPlaybackRate(*playbackRate)) {
- ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
- "%f %f %d %d",
- playbackRate->mSpeed,
- playbackRate->mPitch,
- playbackRate->mStretchMode,
- playbackRate->mFallbackMode);
- // invalidateState(1 << name);
- }
- } break;
- default:
- LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
- }
- break;
-
- default:
- LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
- }
-}
-
-bool AudioMixer::track_t::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
-{
- if (trackSampleRate != devSampleRate || resampler != NULL) {
- if (sampleRate != trackSampleRate) {
- sampleRate = trackSampleRate;
- if (resampler == NULL) {
- ALOGV("Creating resampler from track %d Hz to device %d Hz",
- trackSampleRate, devSampleRate);
- AudioResampler::src_quality quality;
- // force lowest quality level resampler if use case isn't music or video
- // FIXME this is flawed for dynamic sample rates, as we choose the resampler
- // quality level based on the initial ratio, but that could change later.
- // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
- if (isMusicRate(trackSampleRate)) {
- quality = AudioResampler::DEFAULT_QUALITY;
- } else {
- quality = AudioResampler::DYN_LOW_QUALITY;
- }
-
- // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
- // but if none exists, it is the channel count (1 for mono).
- const int resamplerChannelCount = downmixerBufferProvider != NULL
- ? mMixerChannelCount : channelCount;
- ALOGVV("Creating resampler:"
- " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
- mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
- resampler = AudioResampler::create(
- mMixerInFormat,
- resamplerChannelCount,
- devSampleRate, quality);
- }
- return true;
- }
- }
- return false;
-}
-
-bool AudioMixer::track_t::setPlaybackRate(const AudioPlaybackRate &playbackRate)
-{
- if ((mTimestretchBufferProvider == NULL &&
- fabs(playbackRate.mSpeed - mPlaybackRate.mSpeed) < AUDIO_TIMESTRETCH_SPEED_MIN_DELTA &&
- fabs(playbackRate.mPitch - mPlaybackRate.mPitch) < AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) ||
- isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
- return false;
- }
- mPlaybackRate = playbackRate;
- if (mTimestretchBufferProvider == NULL) {
- // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
- // but if none exists, it is the channel count (1 for mono).
- const int timestretchChannelCount = downmixerBufferProvider != NULL
- ? mMixerChannelCount : channelCount;
- mTimestretchBufferProvider = new TimestretchBufferProvider(timestretchChannelCount,
- mMixerInFormat, sampleRate, playbackRate);
- reconfigureBufferProviders();
- } else {
- reinterpret_cast<TimestretchBufferProvider*>(mTimestretchBufferProvider)
- ->setPlaybackRate(playbackRate);
- }
- return true;
-}
-
-/* Checks to see if the volume ramp has completed and clears the increment
- * variables appropriately.
- *
- * FIXME: There is code to handle int/float ramp variable switchover should it not
- * complete within a mixer buffer processing call, but it is preferred to avoid switchover
- * due to precision issues. The switchover code is included for legacy code purposes
- * and can be removed once the integer volume is removed.
- *
- * It is not sufficient to clear only the volumeInc integer variable because
- * if one channel requires ramping, all channels are ramped.
- *
- * There is a bit of duplicated code here, but it keeps backward compatibility.
- */
-inline void AudioMixer::track_t::adjustVolumeRamp(bool aux, bool useFloat)
-{
- if (useFloat) {
- for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
- if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
- (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
- volumeInc[i] = 0;
- prevVolume[i] = volume[i] << 16;
- mVolumeInc[i] = 0.;
- mPrevVolume[i] = mVolume[i];
- } else {
- //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
- prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
- }
- }
- } else {
- for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
- if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
- ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
- volumeInc[i] = 0;
- prevVolume[i] = volume[i] << 16;
- mVolumeInc[i] = 0.;
- mPrevVolume[i] = mVolume[i];
- } else {
- //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]);
- mPrevVolume[i] = float_from_u4_28(prevVolume[i]);
- }
- }
- }
- /* TODO: aux is always integer regardless of output buffer type */
- if (aux) {
- if (((auxInc>0) && (((prevAuxLevel+auxInc)>>16) >= auxLevel)) ||
- ((auxInc<0) && (((prevAuxLevel+auxInc)>>16) <= auxLevel))) {
- auxInc = 0;
- prevAuxLevel = auxLevel << 16;
- mAuxInc = 0.;
- mPrevAuxLevel = mAuxLevel;
- } else {
- //ALOGV("aux ramp: %d %d %d", auxLevel << 16, prevAuxLevel, auxInc);
- }
- }
-}
-
-size_t AudioMixer::getUnreleasedFrames(int name) const
-{
- name -= TRACK0;
- if (uint32_t(name) < MAX_NUM_TRACKS) {
- return mState.tracks[name].getUnreleasedFrames();
- }
- return 0;
-}
-
-void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
-{
- name -= TRACK0;
- ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
-
- if (mState.tracks[name].mInputBufferProvider == bufferProvider) {
- return; // don't reset any buffer providers if identical.
- }
- if (mState.tracks[name].mReformatBufferProvider != NULL) {
- mState.tracks[name].mReformatBufferProvider->reset();
- } else if (mState.tracks[name].downmixerBufferProvider != NULL) {
- mState.tracks[name].downmixerBufferProvider->reset();
- } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) {
- mState.tracks[name].mPostDownmixReformatBufferProvider->reset();
- } else if (mState.tracks[name].mTimestretchBufferProvider != NULL) {
- mState.tracks[name].mTimestretchBufferProvider->reset();
- }
-
- mState.tracks[name].mInputBufferProvider = bufferProvider;
- mState.tracks[name].reconfigureBufferProviders();
-}
-
-
-void AudioMixer::process()
-{
- mState.hook(&mState);
-}
-
-
-void AudioMixer::process__validate(state_t* state)
-{
- ALOGW_IF(!state->needsChanged,
- "in process__validate() but nothing's invalid");
-
- uint32_t changed = state->needsChanged;
- state->needsChanged = 0; // clear the validation flag
-
- // recompute which tracks are enabled / disabled
- uint32_t enabled = 0;
- uint32_t disabled = 0;
- while (changed) {
- const int i = 31 - __builtin_clz(changed);
- const uint32_t mask = 1<<i;
- changed &= ~mask;
- track_t& t = state->tracks[i];
- (t.enabled ? enabled : disabled) |= mask;
- }
- state->enabledTracks &= ~disabled;
- state->enabledTracks |= enabled;
-
- // compute everything we need...
- int countActiveTracks = 0;
- // TODO: fix all16BitsStereNoResample logic to
- // either properly handle muted tracks (it should ignore them)
- // or remove altogether as an obsolete optimization.
- bool all16BitsStereoNoResample = true;
- bool resampling = false;
- bool volumeRamp = false;
- uint32_t en = state->enabledTracks;
- while (en) {
- const int i = 31 - __builtin_clz(en);
- en &= ~(1<<i);
-
- countActiveTracks++;
- track_t& t = state->tracks[i];
- uint32_t n = 0;
- // FIXME can overflow (mask is only 3 bits)
- n |= NEEDS_CHANNEL_1 + t.channelCount - 1;
- if (t.doesResample()) {
- n |= NEEDS_RESAMPLE;
- }
- if (t.auxLevel != 0 && t.auxBuffer != NULL) {
- n |= NEEDS_AUX;
- }
-
- if (t.volumeInc[0]|t.volumeInc[1]) {
- volumeRamp = true;
- } else if (!t.doesResample() && t.volumeRL == 0) {
- n |= NEEDS_MUTE;
- }
- t.needs = n;
-
- if (n & NEEDS_MUTE) {
- t.hook = track__nop;
- } else {
- if (n & NEEDS_AUX) {
- all16BitsStereoNoResample = false;
- }
- if (n & NEEDS_RESAMPLE) {
- all16BitsStereoNoResample = false;
- resampling = true;
- t.hook = getTrackHook(TRACKTYPE_RESAMPLE, t.mMixerChannelCount,
- t.mMixerInFormat, t.mMixerFormat);
- ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
- "Track %d needs downmix + resample", i);
- } else {
- if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
- t.hook = getTrackHook(
- (t.mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO // TODO: MONO_HACK
- && t.channelMask == AUDIO_CHANNEL_OUT_MONO)
- ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
- t.mMixerChannelCount,
- t.mMixerInFormat, t.mMixerFormat);
- all16BitsStereoNoResample = false;
- }
- if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
- t.hook = getTrackHook(TRACKTYPE_NORESAMPLE, t.mMixerChannelCount,
- t.mMixerInFormat, t.mMixerFormat);
- ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
- "Track %d needs downmix", i);
- }
- }
- }
- }
-
- // select the processing hooks
- state->hook = process__nop;
- if (countActiveTracks > 0) {
- if (resampling) {
- if (!state->outputTemp) {
- state->outputTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
- }
- if (!state->resampleTemp) {
- state->resampleTemp = new int32_t[MAX_NUM_CHANNELS * state->frameCount];
- }
- state->hook = process__genericResampling;
- } else {
- if (state->outputTemp) {
- delete [] state->outputTemp;
- state->outputTemp = NULL;
- }
- if (state->resampleTemp) {
- delete [] state->resampleTemp;
- state->resampleTemp = NULL;
- }
- state->hook = process__genericNoResampling;
- if (all16BitsStereoNoResample && !volumeRamp) {
- if (countActiveTracks == 1) {
- const int i = 31 - __builtin_clz(state->enabledTracks);
- track_t& t = state->tracks[i];
- if ((t.needs & NEEDS_MUTE) == 0) {
- // The check prevents a muted track from acquiring a process hook.
- //
- // This is dangerous if the track is MONO as that requires
- // special case handling due to implicit channel duplication.
- // Stereo or Multichannel should actually be fine here.
- state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
- t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
- }
- }
- }
- }
- }
-
- ALOGV("mixer configuration change: %d activeTracks (%08x) "
- "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
- countActiveTracks, state->enabledTracks,
- all16BitsStereoNoResample, resampling, volumeRamp);
-
- state->hook(state);
-
- // Now that the volume ramp has been done, set optimal state and
- // track hooks for subsequent mixer process
- if (countActiveTracks > 0) {
- bool allMuted = true;
- uint32_t en = state->enabledTracks;
- while (en) {
- const int i = 31 - __builtin_clz(en);
- en &= ~(1<<i);
- track_t& t = state->tracks[i];
- if (!t.doesResample() && t.volumeRL == 0) {
- t.needs |= NEEDS_MUTE;
- t.hook = track__nop;
- } else {
- allMuted = false;
- }
- }
- if (allMuted) {
- state->hook = process__nop;
- } else if (all16BitsStereoNoResample) {
- if (countActiveTracks == 1) {
- const int i = 31 - __builtin_clz(state->enabledTracks);
- track_t& t = state->tracks[i];
- // Muted single tracks handled by allMuted above.
- state->hook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
- t.mMixerChannelCount, t.mMixerInFormat, t.mMixerFormat);
- }
- }
- }
-}
-
-
-void AudioMixer::track__genericResample(track_t* t, int32_t* out, size_t outFrameCount,
- int32_t* temp, int32_t* aux)
-{
- ALOGVV("track__genericResample\n");
- t->resampler->setSampleRate(t->sampleRate);
-
- // ramp gain - resample to temp buffer and scale/mix in 2nd step
- if (aux != NULL) {
- // always resample with unity gain when sending to auxiliary buffer to be able
- // to apply send level after resampling
- t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
- memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(int32_t));
- t->resampler->resample(temp, outFrameCount, t->bufferProvider);
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
- volumeRampStereo(t, out, outFrameCount, temp, aux);
- } else {
- volumeStereo(t, out, outFrameCount, temp, aux);
- }
- } else {
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
- t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
- memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
- t->resampler->resample(temp, outFrameCount, t->bufferProvider);
- volumeRampStereo(t, out, outFrameCount, temp, aux);
- }
-
- // constant gain
- else {
- t->resampler->setVolume(t->mVolume[0], t->mVolume[1]);
- t->resampler->resample(out, outFrameCount, t->bufferProvider);
- }
- }
-}
-
-void AudioMixer::track__nop(track_t* t __unused, int32_t* out __unused,
- size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
-{
-}
-
-void AudioMixer::volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
- int32_t* aux)
-{
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
-
- //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- // ramp volume
- if (CC_UNLIKELY(aux != NULL)) {
- int32_t va = t->prevAuxLevel;
- const int32_t vaInc = t->auxInc;
- int32_t l;
- int32_t r;
-
- do {
- l = (*temp++ >> 12);
- r = (*temp++ >> 12);
- *out++ += (vl >> 16) * l;
- *out++ += (vr >> 16) * r;
- *aux++ += (va >> 17) * (l + r);
- vl += vlInc;
- vr += vrInc;
- va += vaInc;
- } while (--frameCount);
- t->prevAuxLevel = va;
- } else {
- do {
- *out++ += (vl >> 16) * (*temp++ >> 12);
- *out++ += (vr >> 16) * (*temp++ >> 12);
- vl += vlInc;
- vr += vrInc;
- } while (--frameCount);
- }
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->adjustVolumeRamp(aux != NULL);
-}
-
-void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
- int32_t* aux)
-{
- const int16_t vl = t->volume[0];
- const int16_t vr = t->volume[1];
-
- if (CC_UNLIKELY(aux != NULL)) {
- const int16_t va = t->auxLevel;
- do {
- int16_t l = (int16_t)(*temp++ >> 12);
- int16_t r = (int16_t)(*temp++ >> 12);
- out[0] = mulAdd(l, vl, out[0]);
- int16_t a = (int16_t)(((int32_t)l + r) >> 1);
- out[1] = mulAdd(r, vr, out[1]);
- out += 2;
- aux[0] = mulAdd(a, va, aux[0]);
- aux++;
- } while (--frameCount);
- } else {
- do {
- int16_t l = (int16_t)(*temp++ >> 12);
- int16_t r = (int16_t)(*temp++ >> 12);
- out[0] = mulAdd(l, vl, out[0]);
- out[1] = mulAdd(r, vr, out[1]);
- out += 2;
- } while (--frameCount);
- }
-}
-
-void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount,
- int32_t* temp __unused, int32_t* aux)
-{
- ALOGVV("track__16BitsStereo\n");
- const int16_t *in = static_cast<const int16_t *>(t->in);
-
- if (CC_UNLIKELY(aux != NULL)) {
- int32_t l;
- int32_t r;
- // ramp gain
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- int32_t va = t->prevAuxLevel;
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
- const int32_t vaInc = t->auxInc;
- // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- do {
- l = (int32_t)*in++;
- r = (int32_t)*in++;
- *out++ += (vl >> 16) * l;
- *out++ += (vr >> 16) * r;
- *aux++ += (va >> 17) * (l + r);
- vl += vlInc;
- vr += vrInc;
- va += vaInc;
- } while (--frameCount);
-
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->prevAuxLevel = va;
- t->adjustVolumeRamp(true);
- }
-
- // constant gain
- else {
- const uint32_t vrl = t->volumeRL;
- const int16_t va = (int16_t)t->auxLevel;
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
- in += 2;
- out[0] = mulAddRL(1, rl, vrl, out[0]);
- out[1] = mulAddRL(0, rl, vrl, out[1]);
- out += 2;
- aux[0] = mulAdd(a, va, aux[0]);
- aux++;
- } while (--frameCount);
- }
- } else {
- // ramp gain
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
-
- // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- do {
- *out++ += (vl >> 16) * (int32_t) *in++;
- *out++ += (vr >> 16) * (int32_t) *in++;
- vl += vlInc;
- vr += vrInc;
- } while (--frameCount);
-
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->adjustVolumeRamp(false);
- }
-
- // constant gain
- else {
- const uint32_t vrl = t->volumeRL;
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- in += 2;
- out[0] = mulAddRL(1, rl, vrl, out[0]);
- out[1] = mulAddRL(0, rl, vrl, out[1]);
- out += 2;
- } while (--frameCount);
- }
- }
- t->in = in;
-}
-
-void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount,
- int32_t* temp __unused, int32_t* aux)
-{
- ALOGVV("track__16BitsMono\n");
- const int16_t *in = static_cast<int16_t const *>(t->in);
-
- if (CC_UNLIKELY(aux != NULL)) {
- // ramp gain
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- int32_t va = t->prevAuxLevel;
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
- const int32_t vaInc = t->auxInc;
-
- // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- do {
- int32_t l = *in++;
- *out++ += (vl >> 16) * l;
- *out++ += (vr >> 16) * l;
- *aux++ += (va >> 16) * l;
- vl += vlInc;
- vr += vrInc;
- va += vaInc;
- } while (--frameCount);
-
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->prevAuxLevel = va;
- t->adjustVolumeRamp(true);
- }
- // constant gain
- else {
- const int16_t vl = t->volume[0];
- const int16_t vr = t->volume[1];
- const int16_t va = (int16_t)t->auxLevel;
- do {
- int16_t l = *in++;
- out[0] = mulAdd(l, vl, out[0]);
- out[1] = mulAdd(l, vr, out[1]);
- out += 2;
- aux[0] = mulAdd(l, va, aux[0]);
- aux++;
- } while (--frameCount);
- }
- } else {
- // ramp gain
- if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
- int32_t vl = t->prevVolume[0];
- int32_t vr = t->prevVolume[1];
- const int32_t vlInc = t->volumeInc[0];
- const int32_t vrInc = t->volumeInc[1];
-
- // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
- // t, vlInc/65536.0f, vl/65536.0f, t->volume[0],
- // (vl + vlInc*frameCount)/65536.0f, frameCount);
-
- do {
- int32_t l = *in++;
- *out++ += (vl >> 16) * l;
- *out++ += (vr >> 16) * l;
- vl += vlInc;
- vr += vrInc;
- } while (--frameCount);
-
- t->prevVolume[0] = vl;
- t->prevVolume[1] = vr;
- t->adjustVolumeRamp(false);
- }
- // constant gain
- else {
- const int16_t vl = t->volume[0];
- const int16_t vr = t->volume[1];
- do {
- int16_t l = *in++;
- out[0] = mulAdd(l, vl, out[0]);
- out[1] = mulAdd(l, vr, out[1]);
- out += 2;
- } while (--frameCount);
- }
- }
- t->in = in;
-}
-
-// no-op case
-void AudioMixer::process__nop(state_t* state)
-{
- ALOGVV("process__nop\n");
- uint32_t e0 = state->enabledTracks;
- while (e0) {
- // process by group of tracks with same output buffer to
- // avoid multiple memset() on same buffer
- uint32_t e1 = e0, e2 = e0;
- int i = 31 - __builtin_clz(e1);
- {
- track_t& t1 = state->tracks[i];
- e2 &= ~(1<<i);
- while (e2) {
- i = 31 - __builtin_clz(e2);
- e2 &= ~(1<<i);
- track_t& t2 = state->tracks[i];
- if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
- e1 &= ~(1<<i);
- }
- }
- e0 &= ~(e1);
-
- memset(t1.mainBuffer, 0, state->frameCount * t1.mMixerChannelCount
- * audio_bytes_per_sample(t1.mMixerFormat));
- }
-
- while (e1) {
- i = 31 - __builtin_clz(e1);
- e1 &= ~(1<<i);
- {
- track_t& t3 = state->tracks[i];
- size_t outFrames = state->frameCount;
- while (outFrames) {
- t3.buffer.frameCount = outFrames;
- t3.bufferProvider->getNextBuffer(&t3.buffer);
- if (t3.buffer.raw == NULL) break;
- outFrames -= t3.buffer.frameCount;
- t3.bufferProvider->releaseBuffer(&t3.buffer);
- }
- }
- }
- }
-}
-
-// generic code without resampling
-void AudioMixer::process__genericNoResampling(state_t* state)
-{
- ALOGVV("process__genericNoResampling\n");
- int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
-
- // acquire each track's buffer
- uint32_t enabledTracks = state->enabledTracks;
- uint32_t e0 = enabledTracks;
- while (e0) {
- const int i = 31 - __builtin_clz(e0);
- e0 &= ~(1<<i);
- track_t& t = state->tracks[i];
- t.buffer.frameCount = state->frameCount;
- t.bufferProvider->getNextBuffer(&t.buffer);
- t.frameCount = t.buffer.frameCount;
- t.in = t.buffer.raw;
- }
-
- e0 = enabledTracks;
- while (e0) {
- // process by group of tracks with same output buffer to
- // optimize cache use
- uint32_t e1 = e0, e2 = e0;
- int j = 31 - __builtin_clz(e1);
- track_t& t1 = state->tracks[j];
- e2 &= ~(1<<j);
- while (e2) {
- j = 31 - __builtin_clz(e2);
- e2 &= ~(1<<j);
- track_t& t2 = state->tracks[j];
- if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
- e1 &= ~(1<<j);
- }
- }
- e0 &= ~(e1);
- // this assumes output 16 bits stereo, no resampling
- int32_t *out = t1.mainBuffer;
- size_t numFrames = 0;
- do {
- memset(outTemp, 0, sizeof(outTemp));
- e2 = e1;
- while (e2) {
- const int i = 31 - __builtin_clz(e2);
- e2 &= ~(1<<i);
- track_t& t = state->tracks[i];
- size_t outFrames = BLOCKSIZE;
- int32_t *aux = NULL;
- if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
- aux = t.auxBuffer + numFrames;
- }
- while (outFrames) {
- // t.in == NULL can happen if the track was flushed just after having
- // been enabled for mixing.
- if (t.in == NULL) {
- enabledTracks &= ~(1<<i);
- e1 &= ~(1<<i);
- break;
- }
- size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
- if (inFrames > 0) {
- t.hook(&t, outTemp + (BLOCKSIZE - outFrames) * t.mMixerChannelCount,
- inFrames, state->resampleTemp, aux);
- t.frameCount -= inFrames;
- outFrames -= inFrames;
- if (CC_UNLIKELY(aux != NULL)) {
- aux += inFrames;
- }
- }
- if (t.frameCount == 0 && outFrames) {
- t.bufferProvider->releaseBuffer(&t.buffer);
- t.buffer.frameCount = (state->frameCount - numFrames) -
- (BLOCKSIZE - outFrames);
- t.bufferProvider->getNextBuffer(&t.buffer);
- t.in = t.buffer.raw;
- if (t.in == NULL) {
- enabledTracks &= ~(1<<i);
- e1 &= ~(1<<i);
- break;
- }
- t.frameCount = t.buffer.frameCount;
- }
- }
- }
-
- convertMixerFormat(out, t1.mMixerFormat, outTemp, t1.mMixerInFormat,
- BLOCKSIZE * t1.mMixerChannelCount);
- // TODO: fix ugly casting due to choice of out pointer type
- out = reinterpret_cast<int32_t*>((uint8_t*)out
- + BLOCKSIZE * t1.mMixerChannelCount
- * audio_bytes_per_sample(t1.mMixerFormat));
- numFrames += BLOCKSIZE;
- } while (numFrames < state->frameCount);
- }
-
- // release each track's buffer
- e0 = enabledTracks;
- while (e0) {
- const int i = 31 - __builtin_clz(e0);
- e0 &= ~(1<<i);
- track_t& t = state->tracks[i];
- t.bufferProvider->releaseBuffer(&t.buffer);
- }
-}
-
-
-// generic code with resampling
-void AudioMixer::process__genericResampling(state_t* state)
-{
- ALOGVV("process__genericResampling\n");
- // this const just means that local variable outTemp doesn't change
- int32_t* const outTemp = state->outputTemp;
- size_t numFrames = state->frameCount;
-
- uint32_t e0 = state->enabledTracks;
- while (e0) {
- // process by group of tracks with same output buffer
- // to optimize cache use
- uint32_t e1 = e0, e2 = e0;
- int j = 31 - __builtin_clz(e1);
- track_t& t1 = state->tracks[j];
- e2 &= ~(1<<j);
- while (e2) {
- j = 31 - __builtin_clz(e2);
- e2 &= ~(1<<j);
- track_t& t2 = state->tracks[j];
- if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
- e1 &= ~(1<<j);
- }
- }
- e0 &= ~(e1);
- int32_t *out = t1.mainBuffer;
- memset(outTemp, 0, sizeof(*outTemp) * t1.mMixerChannelCount * state->frameCount);
- while (e1) {
- const int i = 31 - __builtin_clz(e1);
- e1 &= ~(1<<i);
- track_t& t = state->tracks[i];
- int32_t *aux = NULL;
- if (CC_UNLIKELY(t.needs & NEEDS_AUX)) {
- aux = t.auxBuffer;
- }
-
- // this is a little goofy, on the resampling case we don't
- // acquire/release the buffers because it's done by
- // the resampler.
- if (t.needs & NEEDS_RESAMPLE) {
- t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
- } else {
-
- size_t outFrames = 0;
-
- while (outFrames < numFrames) {
- t.buffer.frameCount = numFrames - outFrames;
- t.bufferProvider->getNextBuffer(&t.buffer);
- t.in = t.buffer.raw;
- // t.in == NULL can happen if the track was flushed just after having
- // been enabled for mixing.
- if (t.in == NULL) break;
-
- if (CC_UNLIKELY(aux != NULL)) {
- aux += outFrames;
- }
- t.hook(&t, outTemp + outFrames * t.mMixerChannelCount, t.buffer.frameCount,
- state->resampleTemp, aux);
- outFrames += t.buffer.frameCount;
- t.bufferProvider->releaseBuffer(&t.buffer);
- }
- }
- }
- convertMixerFormat(out, t1.mMixerFormat,
- outTemp, t1.mMixerInFormat, numFrames * t1.mMixerChannelCount);
- }
-}
-
-// one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
-{
- ALOGVV("process__OneTrack16BitsStereoNoResampling\n");
- // This method is only called when state->enabledTracks has exactly
- // one bit set. The asserts below would verify this, but are commented out
- // since the whole point of this method is to optimize performance.
- //ALOG_ASSERT(0 != state->enabledTracks, "no tracks enabled");
- const int i = 31 - __builtin_clz(state->enabledTracks);
- //ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
- const track_t& t = state->tracks[i];
-
- AudioBufferProvider::Buffer& b(t.buffer);
-
- int32_t* out = t.mainBuffer;
- float *fout = reinterpret_cast<float*>(out);
- size_t numFrames = state->frameCount;
-
- const int16_t vl = t.volume[0];
- const int16_t vr = t.volume[1];
- const uint32_t vrl = t.volumeRL;
- while (numFrames) {
- b.frameCount = numFrames;
- t.bufferProvider->getNextBuffer(&b);
- const int16_t *in = b.i16;
-
- // in == NULL can happen if the track was flushed just after having
- // been enabled for mixing.
- if (in == NULL || (((uintptr_t)in) & 3)) {
- if ( AUDIO_FORMAT_PCM_FLOAT == t.mMixerFormat ) {
- memset((char*)fout, 0, numFrames
- * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
- } else {
- memset((char*)out, 0, numFrames
- * t.mMixerChannelCount * audio_bytes_per_sample(t.mMixerFormat));
- }
- ALOGE_IF((((uintptr_t)in) & 3),
- "process__OneTrack16BitsStereoNoResampling: misaligned buffer"
- " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
- in, i, t.channelCount, t.needs, vrl, t.mVolume[0], t.mVolume[1]);
- return;
- }
- size_t outFrames = b.frameCount;
-
- switch (t.mMixerFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- in += 2;
- int32_t l = mulRL(1, rl, vrl);
- int32_t r = mulRL(0, rl, vrl);
- *fout++ = float_from_q4_27(l);
- *fout++ = float_from_q4_27(r);
- // Note: In case of later int16_t sink output,
- // conversion and clamping is done by memcpy_to_i16_from_float().
- } while (--outFrames);
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
- // volume is boosted, so we might need to clamp even though
- // we process only one track.
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- in += 2;
- int32_t l = mulRL(1, rl, vrl) >> 12;
- int32_t r = mulRL(0, rl, vrl) >> 12;
- // clamping...
- l = clamp16(l);
- r = clamp16(r);
- *out++ = (r<<16) | (l & 0xFFFF);
- } while (--outFrames);
- } else {
- do {
- uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
- in += 2;
- int32_t l = mulRL(1, rl, vrl) >> 12;
- int32_t r = mulRL(0, rl, vrl) >> 12;
- *out++ = (r<<16) | (l & 0xFFFF);
- } while (--outFrames);
- }
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixer format: %d", t.mMixerFormat);
- }
- numFrames -= b.frameCount;
- t.bufferProvider->releaseBuffer(&b);
- }
-}
-
-/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT;
-
-/*static*/ void AudioMixer::sInitRoutine()
-{
- DownmixerBufferProvider::init(); // for the downmixer
-}
-
-/* TODO: consider whether this level of optimization is necessary.
- * Perhaps just stick with a single for loop.
- */
-
-// Needs to derive a compile time constant (constexpr). Could be targeted to go
-// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
-#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
- (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
-
-/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE,
- typename TO, typename TI, typename TV, typename TA, typename TAV>
-static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
- const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
-{
- switch (channels) {
- case 1:
- volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 2:
- volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 3:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 4:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 5:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 6:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 7:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- case 8:
- volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
- frameCount, in, aux, vol, volinc, vola, volainc);
- break;
- }
-}
-
-/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE,
- typename TO, typename TI, typename TV, typename TA, typename TAV>
-static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
- const TI* in, TA* aux, const TV *vol, TAV vola)
-{
- switch (channels) {
- case 1:
- volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
- break;
- case 2:
- volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
- break;
- case 3:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
- break;
- case 4:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
- break;
- case 5:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
- break;
- case 6:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
- break;
- case 7:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
- break;
- case 8:
- volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
- break;
- }
-}
-
-/* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * USEFLOATVOL (set to true if float volume is used)
- * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
- typename TO, typename TI, typename TA>
-void AudioMixer::volumeMix(TO *out, size_t outFrames,
- const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t)
-{
- if (USEFLOATVOL) {
- if (ramp) {
- volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
- t->mPrevVolume, t->mVolumeInc, &t->prevAuxLevel, t->auxInc);
- if (ADJUSTVOL) {
- t->adjustVolumeRamp(aux != NULL, true);
- }
- } else {
- volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
- t->mVolume, t->auxLevel);
- }
- } else {
- if (ramp) {
- volumeRampMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
- t->prevVolume, t->volumeInc, &t->prevAuxLevel, t->auxInc);
- if (ADJUSTVOL) {
- t->adjustVolumeRamp(aux != NULL);
- }
- } else {
- volumeMulti<MIXTYPE>(t->mMixerChannelCount, out, outFrames, in, aux,
- t->volume, t->auxLevel);
- }
- }
-}
-
-/* This process hook is called when there is a single track without
- * aux buffer, volume ramp, or resampling.
- * TODO: Update the hook selection: this can properly handle aux and ramp.
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::process_NoResampleOneTrack(state_t* state)
-{
- ALOGVV("process_NoResampleOneTrack\n");
- // CLZ is faster than CTZ on ARM, though really not sure if true after 31 - clz.
- const int i = 31 - __builtin_clz(state->enabledTracks);
- ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
- track_t *t = &state->tracks[i];
- const uint32_t channels = t->mMixerChannelCount;
- TO* out = reinterpret_cast<TO*>(t->mainBuffer);
- TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
- const bool ramp = t->needsRamp();
-
- for (size_t numFrames = state->frameCount; numFrames; ) {
- AudioBufferProvider::Buffer& b(t->buffer);
- // get input buffer
- b.frameCount = numFrames;
- t->bufferProvider->getNextBuffer(&b);
- const TI *in = reinterpret_cast<TI*>(b.raw);
-
- // in == NULL can happen if the track was flushed just after having
- // been enabled for mixing.
- if (in == NULL || (((uintptr_t)in) & 3)) {
- memset(out, 0, numFrames
- * channels * audio_bytes_per_sample(t->mMixerFormat));
- ALOGE_IF((((uintptr_t)in) & 3), "process_NoResampleOneTrack: bus error: "
- "buffer %p track %p, channels %d, needs %#x",
- in, t, t->channelCount, t->needs);
- return;
- }
-
- const size_t outFrames = b.frameCount;
- volumeMix<MIXTYPE, is_same<TI, float>::value, false> (
- out, outFrames, in, aux, ramp, t);
-
- out += outFrames * channels;
- if (aux != NULL) {
- aux += channels;
- }
- numFrames -= b.frameCount;
-
- // release buffer
- t->bufferProvider->releaseBuffer(&b);
- }
- if (ramp) {
- t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value);
- }
-}
-
-/* This track hook is called to do resampling then mixing,
- * pulling from the track's upstream AudioBufferProvider.
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::track__Resample(track_t* t, TO* out, size_t outFrameCount, TO* temp, TA* aux)
-{
- ALOGVV("track__Resample\n");
- t->resampler->setSampleRate(t->sampleRate);
- const bool ramp = t->needsRamp();
- if (ramp || aux != NULL) {
- // if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step.
- // if aux != NULL: resample with unity gain to temp buffer then apply send level.
-
- t->resampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
- memset(temp, 0, outFrameCount * t->mMixerChannelCount * sizeof(TO));
- t->resampler->resample((int32_t*)temp, outFrameCount, t->bufferProvider);
-
- volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
- out, outFrameCount, temp, aux, ramp, t);
-
- } else { // constant volume gain
- t->resampler->setVolume(t->mVolume[0], t->mVolume[1]);
- t->resampler->resample((int32_t*)out, outFrameCount, t->bufferProvider);
- }
-}
-
-/* This track hook is called to mix a track, when no resampling is required.
- * The input buffer should be present in t->in.
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::track__NoResample(track_t* t, TO* out, size_t frameCount,
- TO* temp __unused, TA* aux)
-{
- ALOGVV("track__NoResample\n");
- const TI *in = static_cast<const TI *>(t->in);
-
- volumeMix<MIXTYPE, is_same<TI, float>::value, true>(
- out, frameCount, in, aux, t->needsRamp(), t);
-
- // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
- // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
- in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * t->mMixerChannelCount;
- t->in = in;
-}
-
-/* The Mixer engine generates either int32_t (Q4_27) or float data.
- * We use this function to convert the engine buffers
- * to the desired mixer output format, either int16_t (Q.15) or float.
- */
-void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
- void *in, audio_format_t mixerInFormat, size_t sampleCount)
-{
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- switch (mixerOutFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount);
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
- break;
- }
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- switch (mixerOutFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- memcpy_to_float_from_q4_27((float*)out, (int32_t*)in, sampleCount);
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- // two int16_t are produced per iteration
- ditherAndClamp((int32_t*)out, (int32_t*)in, sampleCount >> 1);
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
- break;
- }
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
-}
-
-/* Returns the proper track hook to use for mixing the track into the output buffer.
- */
-AudioMixer::hook_t AudioMixer::getTrackHook(int trackType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
-{
- if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
- switch (trackType) {
- case TRACKTYPE_NOP:
- return track__nop;
- case TRACKTYPE_RESAMPLE:
- return track__genericResample;
- case TRACKTYPE_NORESAMPLEMONO:
- return track__16BitsMono;
- case TRACKTYPE_NORESAMPLE:
- return track__16BitsStereo;
- default:
- LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
- break;
- }
- }
- LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
- switch (trackType) {
- case TRACKTYPE_NOP:
- return track__nop;
- case TRACKTYPE_RESAMPLE:
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t)
- track__Resample<MIXTYPE_MULTI, float /*TO*/, float /*TI*/, int32_t /*TA*/>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t)\
- track__Resample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
- break;
- case TRACKTYPE_NORESAMPLEMONO:
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t)
- track__NoResample<MIXTYPE_MONOEXPAND, float, float, int32_t>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t)
- track__NoResample<MIXTYPE_MONOEXPAND, int32_t, int16_t, int32_t>;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
- break;
- case TRACKTYPE_NORESAMPLE:
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return (AudioMixer::hook_t)
- track__NoResample<MIXTYPE_MULTI, float, float, int32_t>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return (AudioMixer::hook_t)
- track__NoResample<MIXTYPE_MULTI, int32_t, int16_t, int32_t>;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
- break;
- default:
- LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
- break;
- }
- return NULL;
-}
-
-/* Returns the proper process hook for mixing tracks. Currently works only for
- * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
- *
- * TODO: Due to the special mixing considerations of duplicating to
- * a stereo output track, the input track cannot be MONO. This should be
- * prevented by the caller.
- */
-AudioMixer::process_hook_t AudioMixer::getProcessHook(int processType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
-{
- if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
- LOG_ALWAYS_FATAL("bad processType: %d", processType);
- return NULL;
- }
- if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
- return process__OneTrack16BitsStereoNoResampling;
- }
- LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
- switch (mixerInFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- switch (mixerOutFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
- float /*TO*/, float /*TI*/, int32_t /*TA*/>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
- int16_t, float, int32_t>;
- default:
- LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
- break;
- }
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- switch (mixerOutFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
- float, int16_t, int32_t>;
- case AUDIO_FORMAT_PCM_16_BIT:
- return process_NoResampleOneTrack<MIXTYPE_MULTI_SAVEONLY,
- int16_t, int16_t, int32_t>;
- default:
- LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
- break;
- }
- break;
- default:
- LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
- break;
- }
- return NULL;
-}
-
-// ----------------------------------------------------------------------------
-} // namespace android
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
deleted file mode 100644
index e788ac3..0000000
--- a/services/audioflinger/AudioMixer.h
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_AUDIO_MIXER_H
-#define ANDROID_AUDIO_MIXER_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <hardware/audio_effect.h>
-#include <media/AudioBufferProvider.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/nbaio/NBLog.h>
-#include <system/audio.h>
-#include <utils/Compat.h>
-#include <utils/threads.h>
-
-#include "AudioResampler.h"
-#include "BufferProviders.h"
-
-// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
-#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class AudioMixer
-{
-public:
- AudioMixer(size_t frameCount, uint32_t sampleRate,
- uint32_t maxNumTracks = MAX_NUM_TRACKS);
-
- /*virtual*/ ~AudioMixer(); // non-virtual saves a v-table, restore if sub-classed
-
-
- // This mixer has a hard-coded upper limit of 32 active track inputs.
- // Adding support for > 32 tracks would require more than simply changing this value.
- static const uint32_t MAX_NUM_TRACKS = 32;
- // maximum number of channels supported by the mixer
-
- // This mixer has a hard-coded upper limit of 8 channels for output.
- static const uint32_t MAX_NUM_CHANNELS = 8;
- static const uint32_t MAX_NUM_VOLUMES = 2; // stereo volume only
- // maximum number of channels supported for the content
- static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
-
- static const uint16_t UNITY_GAIN_INT = 0x1000;
- static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
-
- enum { // names
-
- // track names (MAX_NUM_TRACKS units)
- TRACK0 = 0x1000,
-
- // 0x2000 is unused
-
- // setParameter targets
- TRACK = 0x3000,
- RESAMPLE = 0x3001,
- RAMP_VOLUME = 0x3002, // ramp to new volume
- VOLUME = 0x3003, // don't ramp
- TIMESTRETCH = 0x3004,
-
- // set Parameter names
- // for target TRACK
- CHANNEL_MASK = 0x4000,
- FORMAT = 0x4001,
- MAIN_BUFFER = 0x4002,
- AUX_BUFFER = 0x4003,
- DOWNMIX_TYPE = 0X4004,
- MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
- MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
- // for target RESAMPLE
- SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name;
- // parameter 'value' is the new sample rate in Hz.
- // Only creates a sample rate converter the first time that
- // the track sample rate is different from the mix sample rate.
- // If the new sample rate is the same as the mix sample rate,
- // and a sample rate converter already exists,
- // then the sample rate converter remains present but is a no-op.
- RESET = 0x4101, // Reset sample rate converter without changing sample rate.
- // This clears out the resampler's input buffer.
- REMOVE = 0x4102, // Remove the sample rate converter on this track name;
- // the track is restored to the mix sample rate.
- // for target RAMP_VOLUME and VOLUME (8 channels max)
- // FIXME use float for these 3 to improve the dynamic range
- VOLUME0 = 0x4200,
- VOLUME1 = 0x4201,
- AUXLEVEL = 0x4210,
- // for target TIMESTRETCH
- PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name;
- // parameter 'value' is a pointer to the new playback rate.
- };
-
-
- // For all APIs with "name": TRACK0 <= name < TRACK0 + MAX_NUM_TRACKS
-
- // Allocate a track name. Returns new track name if successful, -1 on failure.
- // The failure could be because of an invalid channelMask or format, or that
- // the track capacity of the mixer is exceeded.
- int getTrackName(audio_channel_mask_t channelMask,
- audio_format_t format, int sessionId);
-
- // Free an allocated track by name
- void deleteTrackName(int name);
-
- // Enable or disable an allocated track by name
- void enable(int name);
- void disable(int name);
-
- void setParameter(int name, int target, int param, void *value);
-
- void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
- void process();
-
- uint32_t trackNames() const { return mTrackNames; }
-
- size_t getUnreleasedFrames(int name) const;
-
- static inline bool isValidPcmTrackFormat(audio_format_t format) {
- switch (format) {
- case AUDIO_FORMAT_PCM_8_BIT:
- case AUDIO_FORMAT_PCM_16_BIT:
- case AUDIO_FORMAT_PCM_24_BIT_PACKED:
- case AUDIO_FORMAT_PCM_32_BIT:
- case AUDIO_FORMAT_PCM_FLOAT:
- return true;
- default:
- return false;
- }
- }
-
-private:
-
- enum {
- // FIXME this representation permits up to 8 channels
- NEEDS_CHANNEL_COUNT__MASK = 0x00000007,
- };
-
- enum {
- NEEDS_CHANNEL_1 = 0x00000000, // mono
- NEEDS_CHANNEL_2 = 0x00000001, // stereo
-
- // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
-
- NEEDS_MUTE = 0x00000100,
- NEEDS_RESAMPLE = 0x00001000,
- NEEDS_AUX = 0x00010000,
- };
-
- struct state_t;
- struct track_t;
-
- typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
- int32_t* aux);
- static const int BLOCKSIZE = 16; // 4 cache lines
-
- struct track_t {
- uint32_t needs;
-
- // TODO: Eventually remove legacy integer volume settings
- union {
- int16_t volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
- int32_t volumeRL;
- };
-
- int32_t prevVolume[MAX_NUM_VOLUMES];
-
- // 16-byte boundary
-
- int32_t volumeInc[MAX_NUM_VOLUMES];
- int32_t auxInc;
- int32_t prevAuxLevel;
-
- // 16-byte boundary
-
- int16_t auxLevel; // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
- uint16_t frameCount;
-
- uint8_t channelCount; // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
- uint8_t unused_padding; // formerly format, was always 16
- uint16_t enabled; // actually bool
- audio_channel_mask_t channelMask;
-
- // actual buffer provider used by the track hooks, see DownmixerBufferProvider below
- // for how the Track buffer provider is wrapped by another one when dowmixing is required
- AudioBufferProvider* bufferProvider;
-
- // 16-byte boundary
-
- mutable AudioBufferProvider::Buffer buffer; // 8 bytes
-
- hook_t hook;
- const void* in; // current location in buffer
-
- // 16-byte boundary
-
- AudioResampler* resampler;
- uint32_t sampleRate;
- int32_t* mainBuffer;
- int32_t* auxBuffer;
-
- // 16-byte boundary
-
- /* Buffer providers are constructed to translate the track input data as needed.
- *
- * TODO: perhaps make a single PlaybackConverterProvider class to move
- * all pre-mixer track buffer conversions outside the AudioMixer class.
- *
- * 1) mInputBufferProvider: The AudioTrack buffer provider.
- * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
- * match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
- * requires reformat. For example, it may convert floating point input to
- * PCM_16_bit if that's required by the downmixer.
- * 3) downmixerBufferProvider: If not NULL, performs the channel remixing to match
- * the number of channels required by the mixer sink.
- * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
- * the downmixer requirements to the mixer engine input requirements.
- * 5) mTimestretchBufferProvider: Adds timestretching for playback rate
- */
- AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
- PassthruBufferProvider* mReformatBufferProvider; // provider wrapper for reformatting.
- PassthruBufferProvider* downmixerBufferProvider; // wrapper for channel conversion.
- PassthruBufferProvider* mPostDownmixReformatBufferProvider;
- PassthruBufferProvider* mTimestretchBufferProvider;
-
- int32_t sessionId;
-
- audio_format_t mMixerFormat; // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
- audio_format_t mFormat; // input track format
- audio_format_t mMixerInFormat; // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
- // each track must be converted to this format.
- audio_format_t mDownmixRequiresFormat; // required downmixer format
- // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
- // AUDIO_FORMAT_INVALID if no required format
-
- float mVolume[MAX_NUM_VOLUMES]; // floating point set volume
- float mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
- float mVolumeInc[MAX_NUM_VOLUMES]; // floating point volume increment
-
- float mAuxLevel; // floating point set aux level
- float mPrevAuxLevel; // floating point prev aux level
- float mAuxInc; // floating point aux increment
-
- audio_channel_mask_t mMixerChannelMask;
- uint32_t mMixerChannelCount;
-
- AudioPlaybackRate mPlaybackRate;
-
- bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
- bool setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
- bool doesResample() const { return resampler != NULL; }
- void resetResampler() { if (resampler != NULL) resampler->reset(); }
- void adjustVolumeRamp(bool aux, bool useFloat = false);
- size_t getUnreleasedFrames() const { return resampler != NULL ?
- resampler->getUnreleasedFrames() : 0; };
-
- status_t prepareForDownmix();
- void unprepareForDownmix();
- status_t prepareForReformat();
- void unprepareForReformat();
- bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
- void reconfigureBufferProviders();
- };
-
- typedef void (*process_hook_t)(state_t* state);
-
- // pad to 32-bytes to fill cache line
- struct state_t {
- uint32_t enabledTracks;
- uint32_t needsChanged;
- size_t frameCount;
- process_hook_t hook; // one of process__*, never NULL
- int32_t *outputTemp;
- int32_t *resampleTemp;
- NBLog::Writer* mLog;
- int32_t reserved[1];
- // FIXME allocate dynamically to save some memory when maxNumTracks < MAX_NUM_TRACKS
- track_t tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
- };
-
- // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
- uint32_t mTrackNames;
-
- // bitmask of configured track names; ~0 if maxNumTracks == MAX_NUM_TRACKS,
- // but will have fewer bits set if maxNumTracks < MAX_NUM_TRACKS
- const uint32_t mConfiguredNames;
-
- const uint32_t mSampleRate;
-
- NBLog::Writer mDummyLog;
-public:
- void setLog(NBLog::Writer* log);
-private:
- state_t mState __attribute__((aligned(32)));
-
- // Call after changing either the enabled status of a track, or parameters of an enabled track.
- // OK to call more often than that, but unnecessary.
- void invalidateState(uint32_t mask);
-
- bool setChannelMasks(int name,
- audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
-
- static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
- int32_t* aux);
- static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
- static void track__16BitsStereo(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
- int32_t* aux);
- static void track__16BitsMono(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
- int32_t* aux);
- static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
- int32_t* aux);
- static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
- int32_t* aux);
-
- static void process__validate(state_t* state);
- static void process__nop(state_t* state);
- static void process__genericNoResampling(state_t* state);
- static void process__genericResampling(state_t* state);
- static void process__OneTrack16BitsStereoNoResampling(state_t* state);
-
- static pthread_once_t sOnceControl;
- static void sInitRoutine();
-
- /* multi-format volume mixing function (calls template functions
- * in AudioMixerOps.h). The template parameters are as follows:
- *
- * MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
- * USEFLOATVOL (set to true if float volume is used)
- * ADJUSTVOL (set to true if volume ramp parameters needs adjustment afterwards)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
- template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
- typename TO, typename TI, typename TA>
- static void volumeMix(TO *out, size_t outFrames,
- const TI *in, TA *aux, bool ramp, AudioMixer::track_t *t);
-
- // multi-format process hooks
- template <int MIXTYPE, typename TO, typename TI, typename TA>
- static void process_NoResampleOneTrack(state_t* state);
-
- // multi-format track hooks
- template <int MIXTYPE, typename TO, typename TI, typename TA>
- static void track__Resample(track_t* t, TO* out, size_t frameCount,
- TO* temp __unused, TA* aux);
- template <int MIXTYPE, typename TO, typename TI, typename TA>
- static void track__NoResample(track_t* t, TO* out, size_t frameCount,
- TO* temp __unused, TA* aux);
-
- static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
- void *in, audio_format_t mixerInFormat, size_t sampleCount);
-
- // hook types
- enum {
- PROCESSTYPE_NORESAMPLEONETRACK,
- };
- enum {
- TRACKTYPE_NOP,
- TRACKTYPE_RESAMPLE,
- TRACKTYPE_NORESAMPLE,
- TRACKTYPE_NORESAMPLEMONO,
- };
-
- // functions for determining the proper process and track hooks.
- static process_hook_t getProcessHook(int processType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
- static hook_t getTrackHook(int trackType, uint32_t channelCount,
- audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-};
-
-// ----------------------------------------------------------------------------
-} // namespace android
-
-#endif // ANDROID_AUDIO_MIXER_H
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
deleted file mode 100644
index 8b7259d..0000000
--- a/services/audioflinger/AudioResampler.cpp
+++ /dev/null
@@ -1,787 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioResampler"
-//#define LOG_NDEBUG 0
-
-#include <pthread.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/types.h>
-
-#include <cutils/properties.h>
-#include <log/log.h>
-
-#include <audio_utils/primitives.h>
-#include "AudioResampler.h"
-#include "AudioResamplerSinc.h"
-#include "AudioResamplerCubic.h"
-#include "AudioResamplerDyn.h"
-
-#ifdef __arm__
- // bug 13102576
- //#define ASM_ARM_RESAMP1 // enable asm optimisation for ResamplerOrder1
-#endif
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class AudioResamplerOrder1 : public AudioResampler {
-public:
- AudioResamplerOrder1(int inChannelCount, int32_t sampleRate) :
- AudioResampler(inChannelCount, sampleRate, LOW_QUALITY), mX0L(0), mX0R(0) {
- }
- virtual size_t resample(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
-private:
- // number of bits used in interpolation multiply - 15 bits avoids overflow
- static const int kNumInterpBits = 15;
-
- // bits to shift the phase fraction down to avoid overflow
- static const int kPreInterpShift = kNumPhaseBits - kNumInterpBits;
-
- void init() {}
- size_t resampleMono16(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
- size_t resampleStereo16(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
-#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
- void AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
- size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
- uint32_t &phaseFraction, uint32_t phaseIncrement);
- void AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
- size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
- uint32_t &phaseFraction, uint32_t phaseIncrement);
-#endif // ASM_ARM_RESAMP1
-
- static inline int32_t Interp(int32_t x0, int32_t x1, uint32_t f) {
- return x0 + (((x1 - x0) * (int32_t)(f >> kPreInterpShift)) >> kNumInterpBits);
- }
- static inline void Advance(size_t* index, uint32_t* frac, uint32_t inc) {
- *frac += inc;
- *index += (size_t)(*frac >> kNumPhaseBits);
- *frac &= kPhaseMask;
- }
- int mX0L;
- int mX0R;
-};
-
-/*static*/
-const double AudioResampler::kPhaseMultiplier = 1L << AudioResampler::kNumPhaseBits;
-
-bool AudioResampler::qualityIsSupported(src_quality quality)
-{
- switch (quality) {
- case DEFAULT_QUALITY:
- case LOW_QUALITY:
- case MED_QUALITY:
- case HIGH_QUALITY:
- case VERY_HIGH_QUALITY:
- case DYN_LOW_QUALITY:
- case DYN_MED_QUALITY:
- case DYN_HIGH_QUALITY:
- return true;
- default:
- return false;
- }
-}
-
-// ----------------------------------------------------------------------------
-
-static pthread_once_t once_control = PTHREAD_ONCE_INIT;
-static AudioResampler::src_quality defaultQuality = AudioResampler::DEFAULT_QUALITY;
-
-void AudioResampler::init_routine()
-{
- char value[PROPERTY_VALUE_MAX];
- if (property_get("af.resampler.quality", value, NULL) > 0) {
- char *endptr;
- unsigned long l = strtoul(value, &endptr, 0);
- if (*endptr == '\0') {
- defaultQuality = (src_quality) l;
- ALOGD("forcing AudioResampler quality to %d", defaultQuality);
- if (defaultQuality < DEFAULT_QUALITY || defaultQuality > DYN_HIGH_QUALITY) {
- defaultQuality = DEFAULT_QUALITY;
- }
- }
- }
-}
-
-uint32_t AudioResampler::qualityMHz(src_quality quality)
-{
- switch (quality) {
- default:
- case DEFAULT_QUALITY:
- case LOW_QUALITY:
- return 3;
- case MED_QUALITY:
- return 6;
- case HIGH_QUALITY:
- return 20;
- case VERY_HIGH_QUALITY:
- return 34;
- case DYN_LOW_QUALITY:
- return 4;
- case DYN_MED_QUALITY:
- return 6;
- case DYN_HIGH_QUALITY:
- return 12;
- }
-}
-
-static const uint32_t maxMHz = 130; // an arbitrary number that permits 3 VHQ, should be tunable
-static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
-static uint32_t currentMHz = 0;
-
-AudioResampler* AudioResampler::create(audio_format_t format, int inChannelCount,
- int32_t sampleRate, src_quality quality) {
-
- bool atFinalQuality;
- if (quality == DEFAULT_QUALITY) {
- // read the resampler default quality property the first time it is needed
- int ok = pthread_once(&once_control, init_routine);
- if (ok != 0) {
- ALOGE("%s pthread_once failed: %d", __func__, ok);
- }
- quality = defaultQuality;
- atFinalQuality = false;
- } else {
- atFinalQuality = true;
- }
-
- /* if the caller requests DEFAULT_QUALITY and af.resampler.property
- * has not been set, the target resampler quality is set to DYN_MED_QUALITY,
- * and allowed to "throttle" down to DYN_LOW_QUALITY if necessary
- * due to estimated CPU load of having too many active resamplers
- * (the code below the if).
- */
- if (quality == DEFAULT_QUALITY) {
- quality = DYN_MED_QUALITY;
- }
-
- // naive implementation of CPU load throttling doesn't account for whether resampler is active
- pthread_mutex_lock(&mutex);
- for (;;) {
- uint32_t deltaMHz = qualityMHz(quality);
- uint32_t newMHz = currentMHz + deltaMHz;
- if ((qualityIsSupported(quality) && newMHz <= maxMHz) || atFinalQuality) {
- ALOGV("resampler load %u -> %u MHz due to delta +%u MHz from quality %d",
- currentMHz, newMHz, deltaMHz, quality);
- currentMHz = newMHz;
- break;
- }
- // not enough CPU available for proposed quality level, so try next lowest level
- switch (quality) {
- default:
- case LOW_QUALITY:
- atFinalQuality = true;
- break;
- case MED_QUALITY:
- quality = LOW_QUALITY;
- break;
- case HIGH_QUALITY:
- quality = MED_QUALITY;
- break;
- case VERY_HIGH_QUALITY:
- quality = HIGH_QUALITY;
- break;
- case DYN_LOW_QUALITY:
- atFinalQuality = true;
- break;
- case DYN_MED_QUALITY:
- quality = DYN_LOW_QUALITY;
- break;
- case DYN_HIGH_QUALITY:
- quality = DYN_MED_QUALITY;
- break;
- }
- }
- pthread_mutex_unlock(&mutex);
-
- AudioResampler* resampler;
-
- switch (quality) {
- default:
- case LOW_QUALITY:
- ALOGV("Create linear Resampler");
- LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
- resampler = new AudioResamplerOrder1(inChannelCount, sampleRate);
- break;
- case MED_QUALITY:
- ALOGV("Create cubic Resampler");
- LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
- resampler = new AudioResamplerCubic(inChannelCount, sampleRate);
- break;
- case HIGH_QUALITY:
- ALOGV("Create HIGH_QUALITY sinc Resampler");
- LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
- resampler = new AudioResamplerSinc(inChannelCount, sampleRate);
- break;
- case VERY_HIGH_QUALITY:
- ALOGV("Create VERY_HIGH_QUALITY sinc Resampler = %d", quality);
- LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
- resampler = new AudioResamplerSinc(inChannelCount, sampleRate, quality);
- break;
- case DYN_LOW_QUALITY:
- case DYN_MED_QUALITY:
- case DYN_HIGH_QUALITY:
- ALOGV("Create dynamic Resampler = %d", quality);
- if (format == AUDIO_FORMAT_PCM_FLOAT) {
- resampler = new AudioResamplerDyn<float, float, float>(inChannelCount,
- sampleRate, quality);
- } else {
- LOG_ALWAYS_FATAL_IF(format != AUDIO_FORMAT_PCM_16_BIT);
- if (quality == DYN_HIGH_QUALITY) {
- resampler = new AudioResamplerDyn<int32_t, int16_t, int32_t>(inChannelCount,
- sampleRate, quality);
- } else {
- resampler = new AudioResamplerDyn<int16_t, int16_t, int32_t>(inChannelCount,
- sampleRate, quality);
- }
- }
- break;
- }
-
- // initialize resampler
- resampler->init();
- return resampler;
-}
-
-AudioResampler::AudioResampler(int inChannelCount,
- int32_t sampleRate, src_quality quality) :
- mChannelCount(inChannelCount),
- mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0),
- mPhaseFraction(0),
- mQuality(quality) {
-
- const int maxChannels = quality < DYN_LOW_QUALITY ? 2 : 8;
- if (inChannelCount < 1
- || inChannelCount > maxChannels) {
- LOG_ALWAYS_FATAL("Unsupported sample format %d quality %d channels",
- quality, inChannelCount);
- }
- if (sampleRate <= 0) {
- LOG_ALWAYS_FATAL("Unsupported sample rate %d Hz", sampleRate);
- }
-
- // initialize common members
- mVolume[0] = mVolume[1] = 0;
- mBuffer.frameCount = 0;
-}
-
-AudioResampler::~AudioResampler() {
- pthread_mutex_lock(&mutex);
- src_quality quality = getQuality();
- uint32_t deltaMHz = qualityMHz(quality);
- int32_t newMHz = currentMHz - deltaMHz;
- ALOGV("resampler load %u -> %d MHz due to delta -%u MHz from quality %d",
- currentMHz, newMHz, deltaMHz, quality);
- LOG_ALWAYS_FATAL_IF(newMHz < 0, "negative resampler load %d MHz", newMHz);
- currentMHz = newMHz;
- pthread_mutex_unlock(&mutex);
-}
-
-void AudioResampler::setSampleRate(int32_t inSampleRate) {
- mInSampleRate = inSampleRate;
- mPhaseIncrement = (uint32_t)((kPhaseMultiplier * inSampleRate) / mSampleRate);
-}
-
-void AudioResampler::setVolume(float left, float right) {
- // TODO: Implement anti-zipper filter
- // convert to U4.12 for internal integer use (round down)
- // integer volume values are clamped to 0 to UNITY_GAIN.
- mVolume[0] = u4_12_from_float(clampFloatVol(left));
- mVolume[1] = u4_12_from_float(clampFloatVol(right));
-}
-
-void AudioResampler::reset() {
- mInputIndex = 0;
- mPhaseFraction = 0;
- mBuffer.frameCount = 0;
-}
-
-// ----------------------------------------------------------------------------
-
-size_t AudioResamplerOrder1::resample(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider) {
-
- // should never happen, but we overflow if it does
- // ALOG_ASSERT(outFrameCount < 32767);
-
- // select the appropriate resampler
- switch (mChannelCount) {
- case 1:
- return resampleMono16(out, outFrameCount, provider);
- case 2:
- return resampleStereo16(out, outFrameCount, provider);
- default:
- LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
- return 0;
- }
-}
-
-size_t AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider) {
-
- int32_t vl = mVolume[0];
- int32_t vr = mVolume[1];
-
- size_t inputIndex = mInputIndex;
- uint32_t phaseFraction = mPhaseFraction;
- uint32_t phaseIncrement = mPhaseIncrement;
- size_t outputIndex = 0;
- size_t outputSampleCount = outFrameCount * 2;
- size_t inFrameCount = getInFrameCountRequired(outFrameCount);
-
- // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
- // outFrameCount, inputIndex, phaseFraction, phaseIncrement);
-
- while (outputIndex < outputSampleCount) {
-
- // buffer is empty, fetch a new one
- while (mBuffer.frameCount == 0) {
- mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
- if (mBuffer.raw == NULL) {
- goto resampleStereo16_exit;
- }
-
- // ALOGE("New buffer fetched: %d frames", mBuffer.frameCount);
- if (mBuffer.frameCount > inputIndex) break;
-
- inputIndex -= mBuffer.frameCount;
- mX0L = mBuffer.i16[mBuffer.frameCount*2-2];
- mX0R = mBuffer.i16[mBuffer.frameCount*2-1];
- provider->releaseBuffer(&mBuffer);
- // mBuffer.frameCount == 0 now so we reload a new buffer
- }
-
- int16_t *in = mBuffer.i16;
-
- // handle boundary case
- while (inputIndex == 0) {
- // ALOGE("boundary case");
- out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction);
- out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction);
- Advance(&inputIndex, &phaseFraction, phaseIncrement);
- if (outputIndex == outputSampleCount) {
- break;
- }
- }
-
- // process input samples
- // ALOGE("general case");
-
-#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
- if (inputIndex + 2 < mBuffer.frameCount) {
- int32_t* maxOutPt;
- int32_t maxInIdx;
-
- maxOutPt = out + (outputSampleCount - 2); // 2 because 2 frames per loop
- maxInIdx = mBuffer.frameCount - 2;
- AsmStereo16Loop(in, maxOutPt, maxInIdx, outputIndex, out, inputIndex, vl, vr,
- phaseFraction, phaseIncrement);
- }
-#endif // ASM_ARM_RESAMP1
-
- while (outputIndex < outputSampleCount && inputIndex < mBuffer.frameCount) {
- out[outputIndex++] += vl * Interp(in[inputIndex*2-2],
- in[inputIndex*2], phaseFraction);
- out[outputIndex++] += vr * Interp(in[inputIndex*2-1],
- in[inputIndex*2+1], phaseFraction);
- Advance(&inputIndex, &phaseFraction, phaseIncrement);
- }
-
- // ALOGE("loop done - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
-
- // if done with buffer, save samples
- if (inputIndex >= mBuffer.frameCount) {
- inputIndex -= mBuffer.frameCount;
-
- // ALOGE("buffer done, new input index %d", inputIndex);
-
- mX0L = mBuffer.i16[mBuffer.frameCount*2-2];
- mX0R = mBuffer.i16[mBuffer.frameCount*2-1];
- provider->releaseBuffer(&mBuffer);
-
- // verify that the releaseBuffer resets the buffer frameCount
- // ALOG_ASSERT(mBuffer.frameCount == 0);
- }
- }
-
- // ALOGE("output buffer full - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
-
-resampleStereo16_exit:
- // save state
- mInputIndex = inputIndex;
- mPhaseFraction = phaseFraction;
- return outputIndex / 2 /* channels for stereo */;
-}
-
-size_t AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider) {
-
- int32_t vl = mVolume[0];
- int32_t vr = mVolume[1];
-
- size_t inputIndex = mInputIndex;
- uint32_t phaseFraction = mPhaseFraction;
- uint32_t phaseIncrement = mPhaseIncrement;
- size_t outputIndex = 0;
- size_t outputSampleCount = outFrameCount * 2;
- size_t inFrameCount = getInFrameCountRequired(outFrameCount);
-
- // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
- // outFrameCount, inputIndex, phaseFraction, phaseIncrement);
- while (outputIndex < outputSampleCount) {
- // buffer is empty, fetch a new one
- while (mBuffer.frameCount == 0) {
- mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
- if (mBuffer.raw == NULL) {
- mInputIndex = inputIndex;
- mPhaseFraction = phaseFraction;
- goto resampleMono16_exit;
- }
- // ALOGE("New buffer fetched: %d frames", mBuffer.frameCount);
- if (mBuffer.frameCount > inputIndex) break;
-
- inputIndex -= mBuffer.frameCount;
- mX0L = mBuffer.i16[mBuffer.frameCount-1];
- provider->releaseBuffer(&mBuffer);
- // mBuffer.frameCount == 0 now so we reload a new buffer
- }
- int16_t *in = mBuffer.i16;
-
- // handle boundary case
- while (inputIndex == 0) {
- // ALOGE("boundary case");
- int32_t sample = Interp(mX0L, in[0], phaseFraction);
- out[outputIndex++] += vl * sample;
- out[outputIndex++] += vr * sample;
- Advance(&inputIndex, &phaseFraction, phaseIncrement);
- if (outputIndex == outputSampleCount) {
- break;
- }
- }
-
- // process input samples
- // ALOGE("general case");
-
-#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
- if (inputIndex + 2 < mBuffer.frameCount) {
- int32_t* maxOutPt;
- int32_t maxInIdx;
-
- maxOutPt = out + (outputSampleCount - 2);
- maxInIdx = (int32_t)mBuffer.frameCount - 2;
- AsmMono16Loop(in, maxOutPt, maxInIdx, outputIndex, out, inputIndex, vl, vr,
- phaseFraction, phaseIncrement);
- }
-#endif // ASM_ARM_RESAMP1
-
- while (outputIndex < outputSampleCount && inputIndex < mBuffer.frameCount) {
- int32_t sample = Interp(in[inputIndex-1], in[inputIndex],
- phaseFraction);
- out[outputIndex++] += vl * sample;
- out[outputIndex++] += vr * sample;
- Advance(&inputIndex, &phaseFraction, phaseIncrement);
- }
-
-
- // ALOGE("loop done - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
-
- // if done with buffer, save samples
- if (inputIndex >= mBuffer.frameCount) {
- inputIndex -= mBuffer.frameCount;
-
- // ALOGE("buffer done, new input index %d", inputIndex);
-
- mX0L = mBuffer.i16[mBuffer.frameCount-1];
- provider->releaseBuffer(&mBuffer);
-
- // verify that the releaseBuffer resets the buffer frameCount
- // ALOG_ASSERT(mBuffer.frameCount == 0);
- }
- }
-
- // ALOGE("output buffer full - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
-
-resampleMono16_exit:
- // save state
- mInputIndex = inputIndex;
- mPhaseFraction = phaseFraction;
- return outputIndex;
-}
-
-#ifdef ASM_ARM_RESAMP1 // asm optimisation for ResamplerOrder1
-
-/*******************************************************************
-*
-* AsmMono16Loop
-* asm optimized monotonic loop version; one loop is 2 frames
-* Input:
-* in : pointer on input samples
-* maxOutPt : pointer on first not filled
-* maxInIdx : index on first not used
-* outputIndex : pointer on current output index
-* out : pointer on output buffer
-* inputIndex : pointer on current input index
-* vl, vr : left and right gain
-* phaseFraction : pointer on current phase fraction
-* phaseIncrement
-* Ouput:
-* outputIndex :
-* out : updated buffer
-* inputIndex : index of next to use
-* phaseFraction : phase fraction for next interpolation
-*
-*******************************************************************/
-__attribute__((noinline))
-void AudioResamplerOrder1::AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
- size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
- uint32_t &phaseFraction, uint32_t phaseIncrement)
-{
- (void)maxOutPt; // remove unused parameter warnings
- (void)maxInIdx;
- (void)outputIndex;
- (void)out;
- (void)inputIndex;
- (void)vl;
- (void)vr;
- (void)phaseFraction;
- (void)phaseIncrement;
- (void)in;
-#define MO_PARAM5 "36" // offset of parameter 5 (outputIndex)
-
- asm(
- "stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, lr}\n"
- // get parameters
- " ldr r6, [sp, #" MO_PARAM5 " + 20]\n" // &phaseFraction
- " ldr r6, [r6]\n" // phaseFraction
- " ldr r7, [sp, #" MO_PARAM5 " + 8]\n" // &inputIndex
- " ldr r7, [r7]\n" // inputIndex
- " ldr r8, [sp, #" MO_PARAM5 " + 4]\n" // out
- " ldr r0, [sp, #" MO_PARAM5 " + 0]\n" // &outputIndex
- " ldr r0, [r0]\n" // outputIndex
- " add r8, r8, r0, asl #2\n" // curOut
- " ldr r9, [sp, #" MO_PARAM5 " + 24]\n" // phaseIncrement
- " ldr r10, [sp, #" MO_PARAM5 " + 12]\n" // vl
- " ldr r11, [sp, #" MO_PARAM5 " + 16]\n" // vr
-
- // r0 pin, x0, Samp
-
- // r1 in
- // r2 maxOutPt
- // r3 maxInIdx
-
- // r4 x1, i1, i3, Out1
- // r5 out0
-
- // r6 frac
- // r7 inputIndex
- // r8 curOut
-
- // r9 inc
- // r10 vl
- // r11 vr
-
- // r12
- // r13 sp
- // r14
-
- // the following loop works on 2 frames
-
- "1:\n"
- " cmp r8, r2\n" // curOut - maxCurOut
- " bcs 2f\n"
-
-#define MO_ONE_FRAME \
- " add r0, r1, r7, asl #1\n" /* in + inputIndex */\
- " ldrsh r4, [r0]\n" /* in[inputIndex] */\
- " ldr r5, [r8]\n" /* out[outputIndex] */\
- " ldrsh r0, [r0, #-2]\n" /* in[inputIndex-1] */\
- " bic r6, r6, #0xC0000000\n" /* phaseFraction & ... */\
- " sub r4, r4, r0\n" /* in[inputIndex] - in[inputIndex-1] */\
- " mov r4, r4, lsl #2\n" /* <<2 */\
- " smulwt r4, r4, r6\n" /* (x1-x0)*.. */\
- " add r6, r6, r9\n" /* phaseFraction + phaseIncrement */\
- " add r0, r0, r4\n" /* x0 - (..) */\
- " mla r5, r0, r10, r5\n" /* vl*interp + out[] */\
- " ldr r4, [r8, #4]\n" /* out[outputIndex+1] */\
- " str r5, [r8], #4\n" /* out[outputIndex++] = ... */\
- " mla r4, r0, r11, r4\n" /* vr*interp + out[] */\
- " add r7, r7, r6, lsr #30\n" /* inputIndex + phaseFraction>>30 */\
- " str r4, [r8], #4\n" /* out[outputIndex++] = ... */
-
- MO_ONE_FRAME // frame 1
- MO_ONE_FRAME // frame 2
-
- " cmp r7, r3\n" // inputIndex - maxInIdx
- " bcc 1b\n"
- "2:\n"
-
- " bic r6, r6, #0xC0000000\n" // phaseFraction & ...
- // save modified values
- " ldr r0, [sp, #" MO_PARAM5 " + 20]\n" // &phaseFraction
- " str r6, [r0]\n" // phaseFraction
- " ldr r0, [sp, #" MO_PARAM5 " + 8]\n" // &inputIndex
- " str r7, [r0]\n" // inputIndex
- " ldr r0, [sp, #" MO_PARAM5 " + 4]\n" // out
- " sub r8, r0\n" // curOut - out
- " asr r8, #2\n" // new outputIndex
- " ldr r0, [sp, #" MO_PARAM5 " + 0]\n" // &outputIndex
- " str r8, [r0]\n" // save outputIndex
-
- " ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, pc}\n"
- );
-}
-
-/*******************************************************************
-*
-* AsmStereo16Loop
-* asm optimized stereo loop version; one loop is 2 frames
-* Input:
-* in : pointer on input samples
-* maxOutPt : pointer on first not filled
-* maxInIdx : index on first not used
-* outputIndex : pointer on current output index
-* out : pointer on output buffer
-* inputIndex : pointer on current input index
-* vl, vr : left and right gain
-* phaseFraction : pointer on current phase fraction
-* phaseIncrement
-* Ouput:
-* outputIndex :
-* out : updated buffer
-* inputIndex : index of next to use
-* phaseFraction : phase fraction for next interpolation
-*
-*******************************************************************/
-__attribute__((noinline))
-void AudioResamplerOrder1::AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
- size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
- uint32_t &phaseFraction, uint32_t phaseIncrement)
-{
- (void)maxOutPt; // remove unused parameter warnings
- (void)maxInIdx;
- (void)outputIndex;
- (void)out;
- (void)inputIndex;
- (void)vl;
- (void)vr;
- (void)phaseFraction;
- (void)phaseIncrement;
- (void)in;
-#define ST_PARAM5 "40" // offset of parameter 5 (outputIndex)
- asm(
- "stmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}\n"
- // get parameters
- " ldr r6, [sp, #" ST_PARAM5 " + 20]\n" // &phaseFraction
- " ldr r6, [r6]\n" // phaseFraction
- " ldr r7, [sp, #" ST_PARAM5 " + 8]\n" // &inputIndex
- " ldr r7, [r7]\n" // inputIndex
- " ldr r8, [sp, #" ST_PARAM5 " + 4]\n" // out
- " ldr r0, [sp, #" ST_PARAM5 " + 0]\n" // &outputIndex
- " ldr r0, [r0]\n" // outputIndex
- " add r8, r8, r0, asl #2\n" // curOut
- " ldr r9, [sp, #" ST_PARAM5 " + 24]\n" // phaseIncrement
- " ldr r10, [sp, #" ST_PARAM5 " + 12]\n" // vl
- " ldr r11, [sp, #" ST_PARAM5 " + 16]\n" // vr
-
- // r0 pin, x0, Samp
-
- // r1 in
- // r2 maxOutPt
- // r3 maxInIdx
-
- // r4 x1, i1, i3, out1
- // r5 out0
-
- // r6 frac
- // r7 inputIndex
- // r8 curOut
-
- // r9 inc
- // r10 vl
- // r11 vr
-
- // r12 temporary
- // r13 sp
- // r14
-
- "3:\n"
- " cmp r8, r2\n" // curOut - maxCurOut
- " bcs 4f\n"
-
-#define ST_ONE_FRAME \
- " bic r6, r6, #0xC0000000\n" /* phaseFraction & ... */\
-\
- " add r0, r1, r7, asl #2\n" /* in + 2*inputIndex */\
-\
- " ldrsh r4, [r0]\n" /* in[2*inputIndex] */\
- " ldr r5, [r8]\n" /* out[outputIndex] */\
- " ldrsh r12, [r0, #-4]\n" /* in[2*inputIndex-2] */\
- " sub r4, r4, r12\n" /* in[2*InputIndex] - in[2*InputIndex-2] */\
- " mov r4, r4, lsl #2\n" /* <<2 */\
- " smulwt r4, r4, r6\n" /* (x1-x0)*.. */\
- " add r12, r12, r4\n" /* x0 - (..) */\
- " mla r5, r12, r10, r5\n" /* vl*interp + out[] */\
- " ldr r4, [r8, #4]\n" /* out[outputIndex+1] */\
- " str r5, [r8], #4\n" /* out[outputIndex++] = ... */\
-\
- " ldrsh r12, [r0, #+2]\n" /* in[2*inputIndex+1] */\
- " ldrsh r0, [r0, #-2]\n" /* in[2*inputIndex-1] */\
- " sub r12, r12, r0\n" /* in[2*InputIndex] - in[2*InputIndex-2] */\
- " mov r12, r12, lsl #2\n" /* <<2 */\
- " smulwt r12, r12, r6\n" /* (x1-x0)*.. */\
- " add r12, r0, r12\n" /* x0 - (..) */\
- " mla r4, r12, r11, r4\n" /* vr*interp + out[] */\
- " str r4, [r8], #4\n" /* out[outputIndex++] = ... */\
-\
- " add r6, r6, r9\n" /* phaseFraction + phaseIncrement */\
- " add r7, r7, r6, lsr #30\n" /* inputIndex + phaseFraction>>30 */
-
- ST_ONE_FRAME // frame 1
- ST_ONE_FRAME // frame 1
-
- " cmp r7, r3\n" // inputIndex - maxInIdx
- " bcc 3b\n"
- "4:\n"
-
- " bic r6, r6, #0xC0000000\n" // phaseFraction & ...
- // save modified values
- " ldr r0, [sp, #" ST_PARAM5 " + 20]\n" // &phaseFraction
- " str r6, [r0]\n" // phaseFraction
- " ldr r0, [sp, #" ST_PARAM5 " + 8]\n" // &inputIndex
- " str r7, [r0]\n" // inputIndex
- " ldr r0, [sp, #" ST_PARAM5 " + 4]\n" // out
- " sub r8, r0\n" // curOut - out
- " asr r8, #2\n" // new outputIndex
- " ldr r0, [sp, #" ST_PARAM5 " + 0]\n" // &outputIndex
- " str r8, [r0]\n" // save outputIndex
-
- " ldmfd sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, pc}\n"
- );
-}
-
-#endif // ASM_ARM_RESAMP1
-
-
-// ----------------------------------------------------------------------------
-
-} // namespace android
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
deleted file mode 100644
index 9fb6699..0000000
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioResamplerCubic"
-
-#include <stdint.h>
-#include <string.h>
-#include <sys/types.h>
-
-#include <log/log.h>
-
-#include "AudioResampler.h"
-#include "AudioResamplerCubic.h"
-
-namespace android {
-// ----------------------------------------------------------------------------
-
-void AudioResamplerCubic::init() {
- memset(&left, 0, sizeof(state));
- memset(&right, 0, sizeof(state));
-}
-
-size_t AudioResamplerCubic::resample(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider) {
-
- // should never happen, but we overflow if it does
- // ALOG_ASSERT(outFrameCount < 32767);
-
- // select the appropriate resampler
- switch (mChannelCount) {
- case 1:
- return resampleMono16(out, outFrameCount, provider);
- case 2:
- return resampleStereo16(out, outFrameCount, provider);
- default:
- LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
- return 0;
- }
-}
-
-size_t AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider) {
-
- int32_t vl = mVolume[0];
- int32_t vr = mVolume[1];
-
- size_t inputIndex = mInputIndex;
- uint32_t phaseFraction = mPhaseFraction;
- uint32_t phaseIncrement = mPhaseIncrement;
- size_t outputIndex = 0;
- size_t outputSampleCount = outFrameCount * 2;
- size_t inFrameCount = getInFrameCountRequired(outFrameCount);
-
- // fetch first buffer
- if (mBuffer.frameCount == 0) {
- mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
- if (mBuffer.raw == NULL) {
- return 0;
- }
- // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
- }
- int16_t *in = mBuffer.i16;
-
- while (outputIndex < outputSampleCount) {
- int32_t x;
-
- // calculate output sample
- x = phaseFraction >> kPreInterpShift;
- out[outputIndex++] += vl * interp(&left, x);
- out[outputIndex++] += vr * interp(&right, x);
- // out[outputIndex++] += vr * in[inputIndex*2];
-
- // increment phase
- phaseFraction += phaseIncrement;
- uint32_t indexIncrement = (phaseFraction >> kNumPhaseBits);
- phaseFraction &= kPhaseMask;
-
- // time to fetch another sample
- while (indexIncrement--) {
-
- inputIndex++;
- if (inputIndex == mBuffer.frameCount) {
- inputIndex = 0;
- provider->releaseBuffer(&mBuffer);
- mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
- if (mBuffer.raw == NULL) {
- goto save_state; // ugly, but efficient
- }
- in = mBuffer.i16;
- // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
- }
-
- // advance sample state
- advance(&left, in[inputIndex*2]);
- advance(&right, in[inputIndex*2+1]);
- }
- }
-
-save_state:
- // ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction);
- mInputIndex = inputIndex;
- mPhaseFraction = phaseFraction;
- return outputIndex / 2 /* channels for stereo */;
-}
-
-size_t AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider) {
-
- int32_t vl = mVolume[0];
- int32_t vr = mVolume[1];
-
- size_t inputIndex = mInputIndex;
- uint32_t phaseFraction = mPhaseFraction;
- uint32_t phaseIncrement = mPhaseIncrement;
- size_t outputIndex = 0;
- size_t outputSampleCount = outFrameCount * 2;
- size_t inFrameCount = getInFrameCountRequired(outFrameCount);
-
- // fetch first buffer
- if (mBuffer.frameCount == 0) {
- mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
- if (mBuffer.raw == NULL) {
- return 0;
- }
- // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
- }
- int16_t *in = mBuffer.i16;
-
- while (outputIndex < outputSampleCount) {
- int32_t sample;
- int32_t x;
-
- // calculate output sample
- x = phaseFraction >> kPreInterpShift;
- sample = interp(&left, x);
- out[outputIndex++] += vl * sample;
- out[outputIndex++] += vr * sample;
-
- // increment phase
- phaseFraction += phaseIncrement;
- uint32_t indexIncrement = (phaseFraction >> kNumPhaseBits);
- phaseFraction &= kPhaseMask;
-
- // time to fetch another sample
- while (indexIncrement--) {
-
- inputIndex++;
- if (inputIndex == mBuffer.frameCount) {
- inputIndex = 0;
- provider->releaseBuffer(&mBuffer);
- mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
- if (mBuffer.raw == NULL) {
- goto save_state; // ugly, but efficient
- }
- // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
- in = mBuffer.i16;
- }
-
- // advance sample state
- advance(&left, in[inputIndex]);
- }
- }
-
-save_state:
- // ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction);
- mInputIndex = inputIndex;
- mPhaseFraction = phaseFraction;
- return outputIndex;
-}
-
-// ----------------------------------------------------------------------------
-} // namespace android
diff --git a/services/audioflinger/AudioResamplerCubic.h b/services/audioflinger/AudioResamplerCubic.h
deleted file mode 100644
index f218fd9..0000000
--- a/services/audioflinger/AudioResamplerCubic.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_RESAMPLER_CUBIC_H
-#define ANDROID_AUDIO_RESAMPLER_CUBIC_H
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <android/log.h>
-
-#include "AudioResampler.h"
-
-namespace android {
-// ----------------------------------------------------------------------------
-
-class AudioResamplerCubic : public AudioResampler {
-public:
- AudioResamplerCubic(int inChannelCount, int32_t sampleRate) :
- AudioResampler(inChannelCount, sampleRate, MED_QUALITY) {
- }
- virtual size_t resample(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
-private:
- // number of bits used in interpolation multiply - 14 bits avoids overflow
- static const int kNumInterpBits = 14;
-
- // bits to shift the phase fraction down to avoid overflow
- static const int kPreInterpShift = kNumPhaseBits - kNumInterpBits;
- typedef struct {
- int32_t a, b, c, y0, y1, y2, y3;
- } state;
- void init();
- size_t resampleMono16(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
- size_t resampleStereo16(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
- static inline int32_t interp(state* p, int32_t x) {
- return (((((p->a * x >> 14) + p->b) * x >> 14) + p->c) * x >> 14) + p->y1;
- }
- static inline void advance(state* p, int16_t in) {
- p->y0 = p->y1;
- p->y1 = p->y2;
- p->y2 = p->y3;
- p->y3 = in;
- p->a = (3 * (p->y1 - p->y2) - p->y0 + p->y3) >> 1;
- p->b = (p->y2 << 1) + p->y0 - (((5 * p->y1 + p->y3)) >> 1);
- p->c = (p->y2 - p->y0) >> 1;
- }
- state left, right;
-};
-
-// ----------------------------------------------------------------------------
-} // namespace android
-
-#endif /*ANDROID_AUDIO_RESAMPLER_CUBIC_H*/
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
deleted file mode 100644
index 213cd1a..0000000
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioResamplerDyn"
-//#define LOG_NDEBUG 0
-
-#include <malloc.h>
-#include <string.h>
-#include <stdlib.h>
-#include <dlfcn.h>
-#include <math.h>
-
-#include <cutils/compiler.h>
-#include <cutils/properties.h>
-#include <utils/Debug.h>
-#include <utils/Log.h>
-#include <audio_utils/primitives.h>
-
-#include "AudioResamplerFirOps.h" // USE_NEON, USE_SSE and USE_INLINE_ASSEMBLY defined here
-#include "AudioResamplerFirProcess.h"
-#include "AudioResamplerFirProcessNeon.h"
-#include "AudioResamplerFirProcessSSE.h"
-#include "AudioResamplerFirGen.h" // requires math.h
-#include "AudioResamplerDyn.h"
-
-//#define DEBUG_RESAMPLER
-
-namespace android {
-
-/*
- * InBuffer is a type agnostic input buffer.
- *
- * Layout of the state buffer for halfNumCoefs=8.
- *
- * [rrrrrrppppppppnnnnnnnnrrrrrrrrrrrrrrrrrrr.... rrrrrrr]
- * S I R
- *
- * S = mState
- * I = mImpulse
- * R = mRingFull
- * p = past samples, convoluted with the (p)ositive side of sinc()
- * n = future samples, convoluted with the (n)egative side of sinc()
- * r = extra space for implementing the ring buffer
- */
-
-template<typename TC, typename TI, typename TO>
-AudioResamplerDyn<TC, TI, TO>::InBuffer::InBuffer()
- : mState(NULL), mImpulse(NULL), mRingFull(NULL), mStateCount(0)
-{
-}
-
-template<typename TC, typename TI, typename TO>
-AudioResamplerDyn<TC, TI, TO>::InBuffer::~InBuffer()
-{
- init();
-}
-
-template<typename TC, typename TI, typename TO>
-void AudioResamplerDyn<TC, TI, TO>::InBuffer::init()
-{
- free(mState);
- mState = NULL;
- mImpulse = NULL;
- mRingFull = NULL;
- mStateCount = 0;
-}
-
-// resizes the state buffer to accommodate the appropriate filter length
-template<typename TC, typename TI, typename TO>
-void AudioResamplerDyn<TC, TI, TO>::InBuffer::resize(int CHANNELS, int halfNumCoefs)
-{
- // calculate desired state size
- size_t stateCount = halfNumCoefs * CHANNELS * 2 * kStateSizeMultipleOfFilterLength;
-
- // check if buffer needs resizing
- if (mState
- && stateCount == mStateCount
- && mRingFull-mState == (ssize_t) (mStateCount-halfNumCoefs*CHANNELS)) {
- return;
- }
-
- // create new buffer
- TI* state = NULL;
- (void)posix_memalign(reinterpret_cast<void**>(&state), 32, stateCount*sizeof(*state));
- memset(state, 0, stateCount*sizeof(*state));
-
- // attempt to preserve state
- if (mState) {
- TI* srcLo = mImpulse - halfNumCoefs*CHANNELS;
- TI* srcHi = mImpulse + halfNumCoefs*CHANNELS;
- TI* dst = state;
-
- if (srcLo < mState) {
- dst += mState-srcLo;
- srcLo = mState;
- }
- if (srcHi > mState + mStateCount) {
- srcHi = mState + mStateCount;
- }
- memcpy(dst, srcLo, (srcHi - srcLo) * sizeof(*srcLo));
- free(mState);
- }
-
- // set class member vars
- mState = state;
- mStateCount = stateCount;
- mImpulse = state + halfNumCoefs*CHANNELS; // actually one sample greater than needed
- mRingFull = state + mStateCount - halfNumCoefs*CHANNELS;
-}
-
-// copy in the input data into the head (impulse+halfNumCoefs) of the buffer.
-template<typename TC, typename TI, typename TO>
-template<int CHANNELS>
-void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAgain(TI*& impulse, const int halfNumCoefs,
- const TI* const in, const size_t inputIndex)
-{
- TI* head = impulse + halfNumCoefs*CHANNELS;
- for (size_t i=0 ; i<CHANNELS ; i++) {
- head[i] = in[inputIndex*CHANNELS + i];
- }
-}
-
-// advance the impulse pointer, and load in data into the head (impulse+halfNumCoefs)
-template<typename TC, typename TI, typename TO>
-template<int CHANNELS>
-void AudioResamplerDyn<TC, TI, TO>::InBuffer::readAdvance(TI*& impulse, const int halfNumCoefs,
- const TI* const in, const size_t inputIndex)
-{
- impulse += CHANNELS;
-
- if (CC_UNLIKELY(impulse >= mRingFull)) {
- const size_t shiftDown = mRingFull - mState - halfNumCoefs*CHANNELS;
- memcpy(mState, mState+shiftDown, halfNumCoefs*CHANNELS*2*sizeof(TI));
- impulse -= shiftDown;
- }
- readAgain<CHANNELS>(impulse, halfNumCoefs, in, inputIndex);
-}
-
-template<typename TC, typename TI, typename TO>
-void AudioResamplerDyn<TC, TI, TO>::Constants::set(
- int L, int halfNumCoefs, int inSampleRate, int outSampleRate)
-{
- int bits = 0;
- int lscale = inSampleRate/outSampleRate < 2 ? L - 1 :
- static_cast<int>(static_cast<uint64_t>(L)*inSampleRate/outSampleRate);
- for (int i=lscale; i; ++bits, i>>=1)
- ;
- mL = L;
- mShift = kNumPhaseBits - bits;
- mHalfNumCoefs = halfNumCoefs;
-}
-
-template<typename TC, typename TI, typename TO>
-AudioResamplerDyn<TC, TI, TO>::AudioResamplerDyn(
- int inChannelCount, int32_t sampleRate, src_quality quality)
- : AudioResampler(inChannelCount, sampleRate, quality),
- mResampleFunc(0), mFilterSampleRate(0), mFilterQuality(DEFAULT_QUALITY),
- mCoefBuffer(NULL)
-{
- mVolumeSimd[0] = mVolumeSimd[1] = 0;
- // The AudioResampler base class assumes we are always ready for 1:1 resampling.
- // We reset mInSampleRate to 0, so setSampleRate() will calculate filters for
- // setSampleRate() for 1:1. (May be removed if precalculated filters are used.)
- mInSampleRate = 0;
- mConstants.set(128, 8, mSampleRate, mSampleRate); // TODO: set better
-}
-
-template<typename TC, typename TI, typename TO>
-AudioResamplerDyn<TC, TI, TO>::~AudioResamplerDyn()
-{
- free(mCoefBuffer);
-}
-
-template<typename TC, typename TI, typename TO>
-void AudioResamplerDyn<TC, TI, TO>::init()
-{
- mFilterSampleRate = 0; // always trigger new filter generation
- mInBuffer.init();
-}
-
-template<typename TC, typename TI, typename TO>
-void AudioResamplerDyn<TC, TI, TO>::setVolume(float left, float right)
-{
- AudioResampler::setVolume(left, right);
- if (is_same<TO, float>::value || is_same<TO, double>::value) {
- mVolumeSimd[0] = static_cast<TO>(left);
- mVolumeSimd[1] = static_cast<TO>(right);
- } else { // integer requires scaling to U4_28 (rounding down)
- // integer volumes are clamped to 0 to UNITY_GAIN so there
- // are no issues with signed overflow.
- mVolumeSimd[0] = u4_28_from_float(clampFloatVol(left));
- mVolumeSimd[1] = u4_28_from_float(clampFloatVol(right));
- }
-}
-
-template<typename T> T max(T a, T b) {return a > b ? a : b;}
-
-template<typename T> T absdiff(T a, T b) {return a > b ? a - b : b - a;}
-
-template<typename TC, typename TI, typename TO>
-void AudioResamplerDyn<TC, TI, TO>::createKaiserFir(Constants &c,
- double stopBandAtten, int inSampleRate, int outSampleRate, double tbwCheat)
-{
- TC* buf = NULL;
- static const double atten = 0.9998; // to avoid ripple overflow
- double fcr;
- double tbw = firKaiserTbw(c.mHalfNumCoefs, stopBandAtten);
-
- (void)posix_memalign(reinterpret_cast<void**>(&buf), 32, (c.mL+1)*c.mHalfNumCoefs*sizeof(TC));
- if (inSampleRate < outSampleRate) { // upsample
- fcr = max(0.5*tbwCheat - tbw/2, tbw/2);
- } else { // downsample
- fcr = max(0.5*tbwCheat*outSampleRate/inSampleRate - tbw/2, tbw/2);
- }
- // create and set filter
- firKaiserGen(buf, c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten);
- c.mFirCoefs = buf;
- if (mCoefBuffer) {
- free(mCoefBuffer);
- }
- mCoefBuffer = buf;
-#ifdef DEBUG_RESAMPLER
- // print basic filter stats
- printf("L:%d hnc:%d stopBandAtten:%lf fcr:%lf atten:%lf tbw:%lf\n",
- c.mL, c.mHalfNumCoefs, stopBandAtten, fcr, atten, tbw);
- // test the filter and report results
- double fp = (fcr - tbw/2)/c.mL;
- double fs = (fcr + tbw/2)/c.mL;
- double passMin, passMax, passRipple;
- double stopMax, stopRipple;
- testFir(buf, c.mL, c.mHalfNumCoefs, fp, fs, /*passSteps*/ 1000, /*stopSteps*/ 100000,
- passMin, passMax, passRipple, stopMax, stopRipple);
- printf("passband(%lf, %lf): %.8lf %.8lf %.8lf\n", 0., fp, passMin, passMax, passRipple);
- printf("stopband(%lf, %lf): %.8lf %.3lf\n", fs, 0.5, stopMax, stopRipple);
-#endif
-}
-
-// recursive gcd. Using objdump, it appears the tail recursion is converted to a while loop.
-static int gcd(int n, int m)
-{
- if (m == 0) {
- return n;
- }
- return gcd(m, n % m);
-}
-
-static bool isClose(int32_t newSampleRate, int32_t prevSampleRate,
- int32_t filterSampleRate, int32_t outSampleRate)
-{
-
- // different upsampling ratios do not need a filter change.
- if (filterSampleRate != 0
- && filterSampleRate < outSampleRate
- && newSampleRate < outSampleRate)
- return true;
-
- // check design criteria again if downsampling is detected.
- int pdiff = absdiff(newSampleRate, prevSampleRate);
- int adiff = absdiff(newSampleRate, filterSampleRate);
-
- // allow up to 6% relative change increments.
- // allow up to 12% absolute change increments (from filter design)
- return pdiff < prevSampleRate>>4 && adiff < filterSampleRate>>3;
-}
-
-template<typename TC, typename TI, typename TO>
-void AudioResamplerDyn<TC, TI, TO>::setSampleRate(int32_t inSampleRate)
-{
- if (mInSampleRate == inSampleRate) {
- return;
- }
- int32_t oldSampleRate = mInSampleRate;
- uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift;
- bool useS32 = false;
-
- mInSampleRate = inSampleRate;
-
- // TODO: Add precalculated Equiripple filters
-
- if (mFilterQuality != getQuality() ||
- !isClose(inSampleRate, oldSampleRate, mFilterSampleRate, mSampleRate)) {
- mFilterSampleRate = inSampleRate;
- mFilterQuality = getQuality();
-
- // Begin Kaiser Filter computation
- //
- // The quantization floor for S16 is about 96db - 10*log_10(#length) + 3dB.
- // Keep the stop band attenuation no greater than 84-85dB for 32 length S16 filters
- //
- // For s32 we keep the stop band attenuation at the same as 16b resolution, about
- // 96-98dB
- //
-
- double stopBandAtten;
- double tbwCheat = 1.; // how much we "cheat" into aliasing
- int halfLength;
- if (mFilterQuality == DYN_HIGH_QUALITY) {
- // 32b coefficients, 64 length
- useS32 = true;
- stopBandAtten = 98.;
- if (inSampleRate >= mSampleRate * 4) {
- halfLength = 48;
- } else if (inSampleRate >= mSampleRate * 2) {
- halfLength = 40;
- } else {
- halfLength = 32;
- }
- } else if (mFilterQuality == DYN_LOW_QUALITY) {
- // 16b coefficients, 16-32 length
- useS32 = false;
- stopBandAtten = 80.;
- if (inSampleRate >= mSampleRate * 4) {
- halfLength = 24;
- } else if (inSampleRate >= mSampleRate * 2) {
- halfLength = 16;
- } else {
- halfLength = 8;
- }
- if (inSampleRate <= mSampleRate) {
- tbwCheat = 1.05;
- } else {
- tbwCheat = 1.03;
- }
- } else { // DYN_MED_QUALITY
- // 16b coefficients, 32-64 length
- // note: > 64 length filters with 16b coefs can have quantization noise problems
- useS32 = false;
- stopBandAtten = 84.;
- if (inSampleRate >= mSampleRate * 4) {
- halfLength = 32;
- } else if (inSampleRate >= mSampleRate * 2) {
- halfLength = 24;
- } else {
- halfLength = 16;
- }
- if (inSampleRate <= mSampleRate) {
- tbwCheat = 1.03;
- } else {
- tbwCheat = 1.01;
- }
- }
-
- // determine the number of polyphases in the filterbank.
- // for 16b, it is desirable to have 2^(16/2) = 256 phases.
- // https://ccrma.stanford.edu/~jos/resample/Relation_Interpolation_Error_Quantization.html
- //
- // We are a bit more lax on this.
-
- int phases = mSampleRate / gcd(mSampleRate, inSampleRate);
-
- // TODO: Once dynamic sample rate change is an option, the code below
- // should be modified to execute only when dynamic sample rate change is enabled.
- //
- // as above, #phases less than 63 is too few phases for accurate linear interpolation.
- // we increase the phases to compensate, but more phases means more memory per
- // filter and more time to compute the filter.
- //
- // if we know that the filter will be used for dynamic sample rate changes,
- // that would allow us skip this part for fixed sample rate resamplers.
- //
- while (phases<63) {
- phases *= 2; // this code only needed to support dynamic rate changes
- }
-
- if (phases>=256) { // too many phases, always interpolate
- phases = 127;
- }
-
- // create the filter
- mConstants.set(phases, halfLength, inSampleRate, mSampleRate);
- createKaiserFir(mConstants, stopBandAtten,
- inSampleRate, mSampleRate, tbwCheat);
- } // End Kaiser filter
-
- // update phase and state based on the new filter.
- const Constants& c(mConstants);
- mInBuffer.resize(mChannelCount, c.mHalfNumCoefs);
- const uint32_t phaseWrapLimit = c.mL << c.mShift;
- // try to preserve as much of the phase fraction as possible for on-the-fly changes
- mPhaseFraction = static_cast<unsigned long long>(mPhaseFraction)
- * phaseWrapLimit / oldPhaseWrapLimit;
- mPhaseFraction %= phaseWrapLimit; // should not do anything, but just in case.
- mPhaseIncrement = static_cast<uint32_t>(static_cast<uint64_t>(phaseWrapLimit)
- * inSampleRate / mSampleRate);
-
- // determine which resampler to use
- // check if locked phase (works only if mPhaseIncrement has no "fractional phase bits")
- int locked = (mPhaseIncrement << (sizeof(mPhaseIncrement)*8 - c.mShift)) == 0;
- if (locked) {
- mPhaseFraction = mPhaseFraction >> c.mShift << c.mShift; // remove fractional phase
- }
-
- // stride is the minimum number of filter coefficients processed per loop iteration.
- // We currently only allow a stride of 16 to match with SIMD processing.
- // This means that the filter length must be a multiple of 16,
- // or half the filter length (mHalfNumCoefs) must be a multiple of 8.
- //
- // Note: A stride of 2 is achieved with non-SIMD processing.
- int stride = ((c.mHalfNumCoefs & 7) == 0) ? 16 : 2;
- LOG_ALWAYS_FATAL_IF(stride < 16, "Resampler stride must be 16 or more");
- LOG_ALWAYS_FATAL_IF(mChannelCount < 1 || mChannelCount > 8,
- "Resampler channels(%d) must be between 1 to 8", mChannelCount);
- // stride 16 (falls back to stride 2 for machines that do not support NEON)
- if (locked) {
- switch (mChannelCount) {
- case 1:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, true, 16>;
- break;
- case 2:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, true, 16>;
- break;
- case 3:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, true, 16>;
- break;
- case 4:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, true, 16>;
- break;
- case 5:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, true, 16>;
- break;
- case 6:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, true, 16>;
- break;
- case 7:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, true, 16>;
- break;
- case 8:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, true, 16>;
- break;
- }
- } else {
- switch (mChannelCount) {
- case 1:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<1, false, 16>;
- break;
- case 2:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<2, false, 16>;
- break;
- case 3:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<3, false, 16>;
- break;
- case 4:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<4, false, 16>;
- break;
- case 5:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<5, false, 16>;
- break;
- case 6:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<6, false, 16>;
- break;
- case 7:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<7, false, 16>;
- break;
- case 8:
- mResampleFunc = &AudioResamplerDyn<TC, TI, TO>::resample<8, false, 16>;
- break;
- }
- }
-#ifdef DEBUG_RESAMPLER
- printf("channels:%d %s stride:%d %s coef:%d shift:%d\n",
- mChannelCount, locked ? "locked" : "interpolated",
- stride, useS32 ? "S32" : "S16", 2*c.mHalfNumCoefs, c.mShift);
-#endif
-}
-
-template<typename TC, typename TI, typename TO>
-size_t AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider)
-{
- return (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider);
-}
-
-template<typename TC, typename TI, typename TO>
-template<int CHANNELS, bool LOCKED, int STRIDE>
-size_t AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount,
- AudioBufferProvider* provider)
-{
- // TODO Mono -> Mono is not supported. OUTPUT_CHANNELS reflects minimum of stereo out.
- const int OUTPUT_CHANNELS = (CHANNELS < 2) ? 2 : CHANNELS;
- const Constants& c(mConstants);
- const TC* const coefs = mConstants.mFirCoefs;
- TI* impulse = mInBuffer.getImpulse();
- size_t inputIndex = 0;
- uint32_t phaseFraction = mPhaseFraction;
- const uint32_t phaseIncrement = mPhaseIncrement;
- size_t outputIndex = 0;
- size_t outputSampleCount = outFrameCount * OUTPUT_CHANNELS;
- const uint32_t phaseWrapLimit = c.mL << c.mShift;
- size_t inFrameCount = (phaseIncrement * (uint64_t)outFrameCount + phaseFraction)
- / phaseWrapLimit;
- // sanity check that inFrameCount is in signed 32 bit integer range.
- ALOG_ASSERT(0 <= inFrameCount && inFrameCount < (1U << 31));
-
- //ALOGV("inFrameCount:%d outFrameCount:%d"
- // " phaseIncrement:%u phaseFraction:%u phaseWrapLimit:%u",
- // inFrameCount, outFrameCount, phaseIncrement, phaseFraction, phaseWrapLimit);
-
- // NOTE: be very careful when modifying the code here. register
- // pressure is very high and a small change might cause the compiler
- // to generate far less efficient code.
- // Always sanity check the result with objdump or test-resample.
-
- // the following logic is a bit convoluted to keep the main processing loop
- // as tight as possible with register allocation.
- while (outputIndex < outputSampleCount) {
- //ALOGV("LOOP: inFrameCount:%d outputIndex:%d outFrameCount:%d"
- // " phaseFraction:%u phaseWrapLimit:%u",
- // inFrameCount, outputIndex, outFrameCount, phaseFraction, phaseWrapLimit);
-
- // check inputIndex overflow
- ALOG_ASSERT(inputIndex <= mBuffer.frameCount, "inputIndex%zu > frameCount%zu",
- inputIndex, mBuffer.frameCount);
- // Buffer is empty, fetch a new one if necessary (inFrameCount > 0).
- // We may not fetch a new buffer if the existing data is sufficient.
- while (mBuffer.frameCount == 0 && inFrameCount > 0) {
- mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer);
- if (mBuffer.raw == NULL) {
- goto resample_exit;
- }
- inFrameCount -= mBuffer.frameCount;
- if (phaseFraction >= phaseWrapLimit) { // read in data
- mInBuffer.template readAdvance<CHANNELS>(
- impulse, c.mHalfNumCoefs,
- reinterpret_cast<TI*>(mBuffer.raw), inputIndex);
- inputIndex++;
- phaseFraction -= phaseWrapLimit;
- while (phaseFraction >= phaseWrapLimit) {
- if (inputIndex >= mBuffer.frameCount) {
- inputIndex = 0;
- provider->releaseBuffer(&mBuffer);
- break;
- }
- mInBuffer.template readAdvance<CHANNELS>(
- impulse, c.mHalfNumCoefs,
- reinterpret_cast<TI*>(mBuffer.raw), inputIndex);
- inputIndex++;
- phaseFraction -= phaseWrapLimit;
- }
- }
- }
- const TI* const in = reinterpret_cast<const TI*>(mBuffer.raw);
- const size_t frameCount = mBuffer.frameCount;
- const int coefShift = c.mShift;
- const int halfNumCoefs = c.mHalfNumCoefs;
- const TO* const volumeSimd = mVolumeSimd;
-
- // main processing loop
- while (CC_LIKELY(outputIndex < outputSampleCount)) {
- // caution: fir() is inlined and may be large.
- // output will be loaded with the appropriate values
- //
- // from the input samples in impulse[-halfNumCoefs+1]... impulse[halfNumCoefs]
- // from the polyphase filter of (phaseFraction / phaseWrapLimit) in coefs.
- //
- //ALOGV("LOOP2: inFrameCount:%d outputIndex:%d outFrameCount:%d"
- // " phaseFraction:%u phaseWrapLimit:%u",
- // inFrameCount, outputIndex, outFrameCount, phaseFraction, phaseWrapLimit);
- ALOG_ASSERT(phaseFraction < phaseWrapLimit);
- fir<CHANNELS, LOCKED, STRIDE>(
- &out[outputIndex],
- phaseFraction, phaseWrapLimit,
- coefShift, halfNumCoefs, coefs,
- impulse, volumeSimd);
-
- outputIndex += OUTPUT_CHANNELS;
-
- phaseFraction += phaseIncrement;
- while (phaseFraction >= phaseWrapLimit) {
- if (inputIndex >= frameCount) {
- goto done; // need a new buffer
- }
- mInBuffer.template readAdvance<CHANNELS>(impulse, halfNumCoefs, in, inputIndex);
- inputIndex++;
- phaseFraction -= phaseWrapLimit;
- }
- }
-done:
- // We arrive here when we're finished or when the input buffer runs out.
- // Regardless we need to release the input buffer if we've acquired it.
- if (inputIndex > 0) { // we've acquired a buffer (alternatively could check frameCount)
- ALOG_ASSERT(inputIndex == frameCount, "inputIndex(%zu) != frameCount(%zu)",
- inputIndex, frameCount); // must have been fully read.
- inputIndex = 0;
- provider->releaseBuffer(&mBuffer);
- ALOG_ASSERT(mBuffer.frameCount == 0);
- }
- }
-
-resample_exit:
- // inputIndex must be zero in all three cases:
- // (1) the buffer never was been acquired; (2) the buffer was
- // released at "done:"; or (3) getNextBuffer() failed.
- ALOG_ASSERT(inputIndex == 0, "Releasing: inputindex:%zu frameCount:%zu phaseFraction:%u",
- inputIndex, mBuffer.frameCount, phaseFraction);
- ALOG_ASSERT(mBuffer.frameCount == 0); // there must be no frames in the buffer
- mInBuffer.setImpulse(impulse);
- mPhaseFraction = phaseFraction;
- return outputIndex / OUTPUT_CHANNELS;
-}
-
-/* instantiate templates used by AudioResampler::create */
-template class AudioResamplerDyn<float, float, float>;
-template class AudioResamplerDyn<int16_t, int16_t, int32_t>;
-template class AudioResamplerDyn<int32_t, int16_t, int32_t>;
-
-// ----------------------------------------------------------------------------
-} // namespace android
diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h
deleted file mode 100644
index f8b8fa1..0000000
--- a/services/audioflinger/AudioResamplerDyn.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_RESAMPLER_DYN_H
-#define ANDROID_AUDIO_RESAMPLER_DYN_H
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <android/log.h>
-
-#include "AudioResampler.h"
-
-namespace android {
-
-/* AudioResamplerDyn
- *
- * This class template is used for floating point and integer resamplers.
- *
- * Type variables:
- * TC = filter coefficient type (one of int16_t, int32_t, or float)
- * TI = input data type (one of int16_t or float)
- * TO = output data type (one of int32_t or float)
- *
- * For integer input data types TI, the coefficient type TC is either int16_t or int32_t.
- * For float input data types TI, the coefficient type TC is float.
- */
-
-template<typename TC, typename TI, typename TO>
-class AudioResamplerDyn: public AudioResampler {
-public:
- AudioResamplerDyn(int inChannelCount,
- int32_t sampleRate, src_quality quality);
-
- virtual ~AudioResamplerDyn();
-
- virtual void init();
-
- virtual void setSampleRate(int32_t inSampleRate);
-
- virtual void setVolume(float left, float right);
-
- virtual size_t resample(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
-
-private:
-
- class Constants { // stores the filter constants.
- public:
- Constants() :
- mL(0), mShift(0), mHalfNumCoefs(0), mFirCoefs(NULL)
- {}
- void set(int L, int halfNumCoefs,
- int inSampleRate, int outSampleRate);
-
- int mL; // interpolation phases in the filter.
- int mShift; // right shift to get polyphase index
- unsigned int mHalfNumCoefs; // filter half #coefs
- const TC* mFirCoefs; // polyphase filter bank
- };
-
- class InBuffer { // buffer management for input type TI
- public:
- InBuffer();
- ~InBuffer();
- void init();
-
- void resize(int CHANNELS, int halfNumCoefs);
-
- // used for direct management of the mImpulse pointer
- inline TI* getImpulse() {
- return mImpulse;
- }
-
- inline void setImpulse(TI *impulse) {
- mImpulse = impulse;
- }
-
- template<int CHANNELS>
- inline void readAgain(TI*& impulse, const int halfNumCoefs,
- const TI* const in, const size_t inputIndex);
-
- template<int CHANNELS>
- inline void readAdvance(TI*& impulse, const int halfNumCoefs,
- const TI* const in, const size_t inputIndex);
-
- private:
- // tuning parameter guidelines: 2 <= multiple <= 8
- static const int kStateSizeMultipleOfFilterLength = 4;
-
- // in general, mRingFull = mState + mStateSize - halfNumCoefs*CHANNELS.
- TI* mState; // base pointer for the input buffer storage
- TI* mImpulse; // current location of the impulse response (centered)
- TI* mRingFull; // mState <= mImpulse < mRingFull
- size_t mStateCount; // size of state in units of TI.
- };
-
- void createKaiserFir(Constants &c, double stopBandAtten,
- int inSampleRate, int outSampleRate, double tbwCheat);
-
- template<int CHANNELS, bool LOCKED, int STRIDE>
- size_t resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider);
-
- // define a pointer to member function type for resample
- typedef size_t (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out,
- size_t outFrameCount, AudioBufferProvider* provider);
-
- // data - the contiguous storage and layout of these is important.
- InBuffer mInBuffer;
- Constants mConstants; // current set of coefficient parameters
- TO __attribute__ ((aligned (8))) mVolumeSimd[2]; // must be aligned or NEON may crash
- resample_ABP_t mResampleFunc; // called function for resampling
- int32_t mFilterSampleRate; // designed filter sample rate.
- src_quality mFilterQuality; // designed filter quality.
- void* mCoefBuffer; // if a filter is created, this is not null
-};
-
-} // namespace android
-
-#endif /*ANDROID_AUDIO_RESAMPLER_DYN_H*/
diff --git a/services/audioflinger/AudioResamplerFirOps.h b/services/audioflinger/AudioResamplerFirOps.h
deleted file mode 100644
index 776903c..0000000
--- a/services/audioflinger/AudioResamplerFirOps.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_RESAMPLER_FIR_OPS_H
-#define ANDROID_AUDIO_RESAMPLER_FIR_OPS_H
-
-namespace android {
-
-#if defined(__arm__) && !defined(__thumb__)
-#define USE_INLINE_ASSEMBLY (true)
-#else
-#define USE_INLINE_ASSEMBLY (false)
-#endif
-
-#if defined(__aarch64__) || defined(__ARM_NEON__)
-#ifndef USE_NEON
-#define USE_NEON (true)
-#endif
-#else
-#define USE_NEON (false)
-#endif
-#if USE_NEON
-#include <arm_neon.h>
-#endif
-
-#if defined(__SSSE3__) // Should be supported in x86 ABI for both 32 & 64-bit.
-#define USE_SSE (true)
-#include <tmmintrin.h>
-#else
-#define USE_SSE (false)
-#endif
-
-template<typename T, typename U>
-struct is_same
-{
- static const bool value = false;
-};
-
-template<typename T>
-struct is_same<T, T> // partial specialization
-{
- static const bool value = true;
-};
-
-static inline
-int32_t mulRL(int left, int32_t in, uint32_t vRL)
-{
-#if USE_INLINE_ASSEMBLY
- int32_t out;
- if (left) {
- asm( "smultb %[out], %[in], %[vRL] \n"
- : [out]"=r"(out)
- : [in]"%r"(in), [vRL]"r"(vRL)
- : );
- } else {
- asm( "smultt %[out], %[in], %[vRL] \n"
- : [out]"=r"(out)
- : [in]"%r"(in), [vRL]"r"(vRL)
- : );
- }
- return out;
-#else
- int16_t v = left ? static_cast<int16_t>(vRL) : static_cast<int16_t>(vRL>>16);
- return static_cast<int32_t>((static_cast<int64_t>(in) * v) >> 16);
-#endif
-}
-
-static inline
-int32_t mulAdd(int16_t in, int16_t v, int32_t a)
-{
-#if USE_INLINE_ASSEMBLY
- int32_t out;
- asm( "smlabb %[out], %[v], %[in], %[a] \n"
- : [out]"=r"(out)
- : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
- : );
- return out;
-#else
- return a + v * in;
-#endif
-}
-
-static inline
-int32_t mulAdd(int16_t in, int32_t v, int32_t a)
-{
-#if USE_INLINE_ASSEMBLY
- int32_t out;
- asm( "smlawb %[out], %[v], %[in], %[a] \n"
- : [out]"=r"(out)
- : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
- : );
- return out;
-#else
- return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 16);
-#endif
-}
-
-static inline
-int32_t mulAdd(int32_t in, int32_t v, int32_t a)
-{
-#if USE_INLINE_ASSEMBLY
- int32_t out;
- asm( "smmla %[out], %[v], %[in], %[a] \n"
- : [out]"=r"(out)
- : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
- : );
- return out;
-#else
- return a + static_cast<int32_t>((static_cast<int64_t>(v) * in) >> 32);
-#endif
-}
-
-static inline
-int32_t mulAddRL(int left, uint32_t inRL, int16_t v, int32_t a)
-{
-#if USE_INLINE_ASSEMBLY
- int32_t out;
- if (left) {
- asm( "smlabb %[out], %[v], %[inRL], %[a] \n"
- : [out]"=r"(out)
- : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
- : );
- } else {
- asm( "smlabt %[out], %[v], %[inRL], %[a] \n"
- : [out]"=r"(out)
- : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
- : );
- }
- return out;
-#else
- int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16);
- return a + v * s;
-#endif
-}
-
-static inline
-int32_t mulAddRL(int left, uint32_t inRL, int32_t v, int32_t a)
-{
-#if USE_INLINE_ASSEMBLY
- int32_t out;
- if (left) {
- asm( "smlawb %[out], %[v], %[inRL], %[a] \n"
- : [out]"=r"(out)
- : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
- : );
- } else {
- asm( "smlawt %[out], %[v], %[inRL], %[a] \n"
- : [out]"=r"(out)
- : [inRL]"%r"(inRL), [v]"r"(v), [a]"r"(a)
- : );
- }
- return out;
-#else
- int16_t s = left ? static_cast<int16_t>(inRL) : static_cast<int16_t>(inRL>>16);
- return a + static_cast<int32_t>((static_cast<int64_t>(v) * s) >> 16);
-#endif
-}
-
-} // namespace android
-
-#endif /*ANDROID_AUDIO_RESAMPLER_FIR_OPS_H*/
diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h
deleted file mode 100644
index 176202e..0000000
--- a/services/audioflinger/AudioResamplerFirProcess.h
+++ /dev/null
@@ -1,439 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H
-#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H
-
-namespace android {
-
-// depends on AudioResamplerFirOps.h
-
-/* variant for input type TI = int16_t input samples */
-template<typename TC>
-static inline
-void mac(int32_t& l, int32_t& r, TC coef, const int16_t* samples)
-{
- uint32_t rl = *reinterpret_cast<const uint32_t*>(samples);
- l = mulAddRL(1, rl, coef, l);
- r = mulAddRL(0, rl, coef, r);
-}
-
-template<typename TC>
-static inline
-void mac(int32_t& l, TC coef, const int16_t* samples)
-{
- l = mulAdd(samples[0], coef, l);
-}
-
-/* variant for input type TI = float input samples */
-template<typename TC>
-static inline
-void mac(float& l, float& r, TC coef, const float* samples)
-{
- l += *samples++ * coef;
- r += *samples * coef;
-}
-
-template<typename TC>
-static inline
-void mac(float& l, TC coef, const float* samples)
-{
- l += *samples * coef;
-}
-
-/* variant for output type TO = int32_t output samples */
-static inline
-int32_t volumeAdjust(int32_t value, int32_t volume)
-{
- return 2 * mulRL(0, value, volume); // Note: only use top 16b
-}
-
-/* variant for output type TO = float output samples */
-static inline
-float volumeAdjust(float value, float volume)
-{
- return value * volume;
-}
-
-/*
- * Helper template functions for loop unrolling accumulator operations.
- *
- * Unrolling the loops achieves about 2x gain.
- * Using a recursive template rather than an array of TO[] for the accumulator
- * values is an additional 10-20% gain.
- */
-
-template<int CHANNELS, typename TO>
-class Accumulator : public Accumulator<CHANNELS-1, TO> // recursive
-{
-public:
- inline void clear() {
- value = 0;
- Accumulator<CHANNELS-1, TO>::clear();
- }
- template<typename TC, typename TI>
- inline void acc(TC coef, const TI*& data) {
- mac(value, coef, data++);
- Accumulator<CHANNELS-1, TO>::acc(coef, data);
- }
- inline void volume(TO*& out, TO gain) {
- *out++ = volumeAdjust(value, gain);
- Accumulator<CHANNELS-1, TO>::volume(out, gain);
- }
-
- TO value; // one per recursive inherited base class
-};
-
-template<typename TO>
-class Accumulator<0, TO> {
-public:
- inline void clear() {
- }
- template<typename TC, typename TI>
- inline void acc(TC coef __unused, const TI*& data __unused) {
- }
- inline void volume(TO*& out __unused, TO gain __unused) {
- }
-};
-
-template<typename TC, typename TINTERP>
-inline
-TC interpolate(TC coef_0, TC coef_1, TINTERP lerp)
-{
- return lerp * (coef_1 - coef_0) + coef_0;
-}
-
-template<>
-inline
-int16_t interpolate<int16_t, uint32_t>(int16_t coef_0, int16_t coef_1, uint32_t lerp)
-{ // in some CPU architectures 16b x 16b multiplies are faster.
- return (static_cast<int16_t>(lerp) * static_cast<int16_t>(coef_1 - coef_0) >> 15) + coef_0;
-}
-
-template<>
-inline
-int32_t interpolate<int32_t, uint32_t>(int32_t coef_0, int32_t coef_1, uint32_t lerp)
-{
- return (lerp * static_cast<int64_t>(coef_1 - coef_0) >> 31) + coef_0;
-}
-
-/* class scope for passing in functions into templates */
-struct InterpCompute {
- template<typename TC, typename TINTERP>
- static inline
- TC interpolatep(TC coef_0, TC coef_1, TINTERP lerp) {
- return interpolate(coef_0, coef_1, lerp);
- }
-
- template<typename TC, typename TINTERP>
- static inline
- TC interpolaten(TC coef_0, TC coef_1, TINTERP lerp) {
- return interpolate(coef_0, coef_1, lerp);
- }
-};
-
-struct InterpNull {
- template<typename TC, typename TINTERP>
- static inline
- TC interpolatep(TC coef_0, TC coef_1 __unused, TINTERP lerp __unused) {
- return coef_0;
- }
-
- template<typename TC, typename TINTERP>
- static inline
- TC interpolaten(TC coef_0 __unused, TC coef_1, TINTERP lerp __unused) {
- return coef_1;
- }
-};
-
-/*
- * Calculates a single output frame (two samples).
- *
- * The Process*() functions compute both the positive half FIR dot product and
- * the negative half FIR dot product, accumulates, and then applies the volume.
- *
- * Use fir() to compute the proper coefficient pointers for a polyphase
- * filter bank.
- *
- * ProcessBase() is the fundamental processing template function.
- *
- * ProcessL() calls ProcessBase() with TFUNC = InterpNull, for fixed/locked phase.
- * Process() calls ProcessBase() with TFUNC = InterpCompute, for interpolated phase.
- */
-
-template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO,
- typename TINTERP>
-static inline
-void ProcessBase(TO* const out,
- size_t count,
- const TC* coefsP,
- const TC* coefsN,
- const TI* sP,
- const TI* sN,
- TINTERP lerpP,
- const TO* const volumeLR)
-{
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS > 0)
-
- if (CHANNELS > 2) {
- // TO accum[CHANNELS];
- Accumulator<CHANNELS, TO> accum;
-
- // for (int j = 0; j < CHANNELS; ++j) accum[j] = 0;
- accum.clear();
- for (size_t i = 0; i < count; ++i) {
- TC c = TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP);
-
- // for (int j = 0; j < CHANNELS; ++j) mac(accum[j], c, sP + j);
- const TI *tmp_data = sP; // tmp_ptr seems to work better
- accum.acc(c, tmp_data);
-
- coefsP++;
- sP -= CHANNELS;
- c = TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP);
-
- // for (int j = 0; j < CHANNELS; ++j) mac(accum[j], c, sN + j);
- tmp_data = sN; // tmp_ptr seems faster than directly using sN
- accum.acc(c, tmp_data);
-
- coefsN++;
- sN += CHANNELS;
- }
- // for (int j = 0; j < CHANNELS; ++j) out[j] += volumeAdjust(accum[j], volumeLR[0]);
- TO *tmp_out = out; // may remove if const out definition changes.
- accum.volume(tmp_out, volumeLR[0]);
- } else if (CHANNELS == 2) {
- TO l = 0;
- TO r = 0;
- for (size_t i = 0; i < count; ++i) {
- mac(l, r, TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP), sP);
- coefsP++;
- sP -= CHANNELS;
- mac(l, r, TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP), sN);
- coefsN++;
- sN += CHANNELS;
- }
- out[0] += volumeAdjust(l, volumeLR[0]);
- out[1] += volumeAdjust(r, volumeLR[1]);
- } else { /* CHANNELS == 1 */
- TO l = 0;
- for (size_t i = 0; i < count; ++i) {
- mac(l, TFUNC::interpolatep(coefsP[0], coefsP[count], lerpP), sP);
- coefsP++;
- sP -= CHANNELS;
- mac(l, TFUNC::interpolaten(coefsN[count], coefsN[0], lerpP), sN);
- coefsN++;
- sN += CHANNELS;
- }
- out[0] += volumeAdjust(l, volumeLR[0]);
- out[1] += volumeAdjust(l, volumeLR[1]);
- }
-}
-
-/* Calculates a single output frame from a polyphase resampling filter.
- * See Process() for parameter details.
- */
-template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO>
-static inline
-void ProcessL(TO* const out,
- int count,
- const TC* coefsP,
- const TC* coefsN,
- const TI* sP,
- const TI* sN,
- const TO* const volumeLR)
-{
- ProcessBase<CHANNELS, STRIDE, InterpNull>(out, count, coefsP, coefsN, sP, sN, 0, volumeLR);
-}
-
-/*
- * Calculates a single output frame from a polyphase resampling filter,
- * with filter phase interpolation.
- *
- * @param out should point to the output buffer with space for at least one output frame.
- *
- * @param count should be half the size of the total filter length (halfNumCoefs), as we
- * use symmetry in filter coefficients to evaluate two dot products.
- *
- * @param coefsP is one phase of the polyphase filter bank of size halfNumCoefs, corresponding
- * to the positive sP.
- *
- * @param coefsN is one phase of the polyphase filter bank of size halfNumCoefs, corresponding
- * to the negative sN.
- *
- * @param coefsP1 is the next phase of coefsP (used for interpolation).
- *
- * @param coefsN1 is the next phase of coefsN (used for interpolation).
- *
- * @param sP is the positive half of the coefficients (as viewed by a convolution),
- * starting at the original samples pointer and decrementing (by CHANNELS).
- *
- * @param sN is the negative half of the samples (as viewed by a convolution),
- * starting at the original samples pointer + CHANNELS and incrementing (by CHANNELS).
- *
- * @param lerpP The fractional siting between the polyphase indices is given by the bits
- * below coefShift. See fir() for details.
- *
- * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel,
- * expressed as a S32 integer or float. A negative value inverts the channel 180 degrees.
- * The pointer volumeLR should be aligned to a minimum of 8 bytes.
- * A typical value for volume is 0x1000 to align to a unity gain output of 20.12.
- */
-template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO, typename TINTERP>
-static inline
-void Process(TO* const out,
- int count,
- const TC* coefsP,
- const TC* coefsN,
- const TC* coefsP1 __unused,
- const TC* coefsN1 __unused,
- const TI* sP,
- const TI* sN,
- TINTERP lerpP,
- const TO* const volumeLR)
-{
- ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP,
- volumeLR);
-}
-
-/*
- * Calculates a single output frame from input sample pointer.
- *
- * This sets up the params for the accelerated Process() and ProcessL()
- * functions to do the appropriate dot products.
- *
- * @param out should point to the output buffer with space for at least one output frame.
- *
- * @param phase is the fractional distance between input frames for interpolation:
- * phase >= 0 && phase < phaseWrapLimit. It can be thought of as a rational fraction
- * of phase/phaseWrapLimit.
- *
- * @param phaseWrapLimit is #polyphases<<coefShift, where #polyphases is the number of polyphases
- * in the polyphase filter. Likewise, #polyphases can be obtained as (phaseWrapLimit>>coefShift).
- *
- * @param coefShift gives the bit alignment of the polyphase index in the phase parameter.
- *
- * @param halfNumCoefs is the half the number of coefficients per polyphase filter. Since the
- * overall filterbank is odd-length symmetric, only halfNumCoefs need be stored.
- *
- * @param coefs is the polyphase filter bank, starting at from polyphase index 0, and ranging to
- * and including the #polyphases. Each polyphase of the filter has half-length halfNumCoefs
- * (due to symmetry). The total size of the filter bank in coefficients is
- * (#polyphases+1)*halfNumCoefs.
- *
- * The filter bank coefs should be aligned to a minimum of 16 bytes (preferrably to cache line).
- *
- * The coefs should be attenuated (to compensate for passband ripple)
- * if storing back into the native format.
- *
- * @param samples are unaligned input samples. The position is in the "middle" of the
- * sample array with respect to the FIR filter:
- * the negative half of the filter is dot product from samples+1 to samples+halfNumCoefs;
- * the positive half of the filter is dot product from samples to samples-halfNumCoefs+1.
- *
- * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel,
- * expressed as a S32 integer or float. A negative value inverts the channel 180 degrees.
- * The pointer volumeLR should be aligned to a minimum of 8 bytes.
- * A typical value for volume is 0x1000 to align to a unity gain output of 20.12.
- *
- * In between calls to filterCoefficient, the phase is incremented by phaseIncrement, where
- * phaseIncrement is calculated as inputSampling * phaseWrapLimit / outputSampling.
- *
- * The filter polyphase index is given by indexP = phase >> coefShift. Due to
- * odd length symmetric filter, the polyphase index of the negative half depends on
- * whether interpolation is used.
- *
- * The fractional siting between the polyphase indices is given by the bits below coefShift:
- *
- * lerpP = phase << 32 - coefShift >> 1; // for 32 bit unsigned phase multiply
- * lerpP = phase << 32 - coefShift >> 17; // for 16 bit unsigned phase multiply
- *
- * For integer types, this is expressed as:
- *
- * lerpP = phase << sizeof(phase)*8 - coefShift
- * >> (sizeof(phase)-sizeof(*coefs))*8 + 1;
- *
- * For floating point, lerpP is the fractional phase scaled to [0.0, 1.0):
- *
- * lerpP = (phase << 32 - coefShift) / (1 << 32); // floating point equivalent
- */
-
-template<int CHANNELS, bool LOCKED, int STRIDE, typename TC, typename TI, typename TO>
-static inline
-void fir(TO* const out,
- const uint32_t phase, const uint32_t phaseWrapLimit,
- const int coefShift, const int halfNumCoefs, const TC* const coefs,
- const TI* const samples, const TO* const volumeLR)
-{
- // NOTE: be very careful when modifying the code here. register
- // pressure is very high and a small change might cause the compiler
- // to generate far less efficient code.
- // Always sanity check the result with objdump or test-resample.
-
- if (LOCKED) {
- // locked polyphase (no interpolation)
- // Compute the polyphase filter index on the positive and negative side.
- uint32_t indexP = phase >> coefShift;
- uint32_t indexN = (phaseWrapLimit - phase) >> coefShift;
- const TC* coefsP = coefs + indexP*halfNumCoefs;
- const TC* coefsN = coefs + indexN*halfNumCoefs;
- const TI* sP = samples;
- const TI* sN = samples + CHANNELS;
-
- // dot product filter.
- ProcessL<CHANNELS, STRIDE>(out,
- halfNumCoefs, coefsP, coefsN, sP, sN, volumeLR);
- } else {
- // interpolated polyphase
- // Compute the polyphase filter index on the positive and negative side.
- uint32_t indexP = phase >> coefShift;
- uint32_t indexN = (phaseWrapLimit - phase - 1) >> coefShift; // one's complement.
- const TC* coefsP = coefs + indexP*halfNumCoefs;
- const TC* coefsN = coefs + indexN*halfNumCoefs;
- const TC* coefsP1 = coefsP + halfNumCoefs;
- const TC* coefsN1 = coefsN + halfNumCoefs;
- const TI* sP = samples;
- const TI* sN = samples + CHANNELS;
-
- // Interpolation fraction lerpP derived by shifting all the way up and down
- // to clear the appropriate bits and align to the appropriate level
- // for the integer multiply. The constants should resolve in compile time.
- //
- // The interpolated filter coefficient is derived as follows for the pos/neg half:
- //
- // interpolated[P] = index[P]*lerpP + index[P+1]*(1-lerpP)
- // interpolated[N] = index[N+1]*lerpP + index[N]*(1-lerpP)
-
- // on-the-fly interpolated dot product filter
- if (is_same<TC, float>::value || is_same<TC, double>::value) {
- static const TC scale = 1. / (65536. * 65536.); // scale phase bits to [0.0, 1.0)
- TC lerpP = TC(phase << (sizeof(phase)*8 - coefShift)) * scale;
-
- Process<CHANNELS, STRIDE>(out,
- halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR);
- } else {
- uint32_t lerpP = phase << (sizeof(phase)*8 - coefShift)
- >> ((sizeof(phase)-sizeof(*coefs))*8 + 1);
-
- Process<CHANNELS, STRIDE>(out,
- halfNumCoefs, coefsP, coefsN, coefsP1, coefsN1, sP, sN, lerpP, volumeLR);
- }
- }
-}
-
-} // namespace android
-
-#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H*/
diff --git a/services/audioflinger/AudioResamplerFirProcessNeon.h b/services/audioflinger/AudioResamplerFirProcessNeon.h
deleted file mode 100644
index 3de9edd..0000000
--- a/services/audioflinger/AudioResamplerFirProcessNeon.h
+++ /dev/null
@@ -1,1214 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H
-#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H
-
-namespace android {
-
-// depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h
-
-#if USE_NEON
-
-// use intrinsics if inline arm32 assembly is not possible
-#if !USE_INLINE_ASSEMBLY
-#define USE_INTRINSIC
-#endif
-
-// following intrinsics available only on ARM 64 bit ACLE
-#ifndef __aarch64__
-#undef vld1q_f32_x2
-#undef vld1q_s32_x2
-#endif
-
-#define TO_STRING2(x) #x
-#define TO_STRING(x) TO_STRING2(x)
-// uncomment to print GCC version, may be relevant for intrinsic optimizations
-/* #pragma message ("GCC version: " TO_STRING(__GNUC__) \
- "." TO_STRING(__GNUC_MINOR__) \
- "." TO_STRING(__GNUC_PATCHLEVEL__)) */
-
-//
-// NEON specializations are enabled for Process() and ProcessL() in AudioResamplerFirProcess.h
-//
-// Two variants are presented here:
-// ARM NEON inline assembly which appears up to 10-15% faster than intrinsics (gcc 4.9) for arm32.
-// ARM NEON intrinsics which can also be used by arm64 and x86/64 with NEON header.
-//
-
-// Macros to save a mono/stereo accumulator sample in q0 (and q4) as stereo out.
-// These are only used for inline assembly.
-#define ASSEMBLY_ACCUMULATE_MONO \
- "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes */\
- "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output */\
- "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums */\
- "vpadd.s32 d0, d0, d0 \n"/* (1+4d) and replicate L/R */\
- "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume */\
- "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating) */\
- "vst1.s32 {d3}, %[out] \n"/* (2+2d) store result */
-
-#define ASSEMBLY_ACCUMULATE_STEREO \
- "vld1.s32 {d2}, [%[vLR]:64] \n"/* (1) load volumes*/\
- "vld1.s32 {d3}, %[out] \n"/* (2) unaligned load the output*/\
- "vpadd.s32 d0, d0, d1 \n"/* (1) add all 4 partial sums from q0*/\
- "vpadd.s32 d8, d8, d9 \n"/* (1) add all 4 partial sums from q4*/\
- "vpadd.s32 d0, d0, d8 \n"/* (1+4d) combine into L/R*/\
- "vqrdmulh.s32 d0, d0, d2 \n"/* (2+3d) apply volume*/\
- "vqadd.s32 d3, d3, d0 \n"/* (1+4d) accumulate result (saturating)*/\
- "vst1.s32 {d3}, %[out] \n"/* (2+2d)store result*/
-
-template <int CHANNELS, int STRIDE, bool FIXED>
-static inline void ProcessNeonIntrinsic(int32_t* out,
- int count,
- const int16_t* coefsP,
- const int16_t* coefsN,
- const int16_t* sP,
- const int16_t* sN,
- const int32_t* volumeLR,
- uint32_t lerpP,
- const int16_t* coefsP1,
- const int16_t* coefsN1)
-{
- ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
-
- sP -= CHANNELS*((STRIDE>>1)-1);
- coefsP = (const int16_t*)__builtin_assume_aligned(coefsP, 16);
- coefsN = (const int16_t*)__builtin_assume_aligned(coefsN, 16);
-
- int16x4_t interp;
- if (!FIXED) {
- interp = vdup_n_s16(lerpP);
- //interp = (int16x4_t)vset_lane_s32 ((int32x2_t)lerpP, interp, 0);
- coefsP1 = (const int16_t*)__builtin_assume_aligned(coefsP1, 16);
- coefsN1 = (const int16_t*)__builtin_assume_aligned(coefsN1, 16);
- }
- int32x4_t accum, accum2;
- // warning uninitialized if we use veorq_s32
- // (alternative to below) accum = veorq_s32(accum, accum);
- accum = vdupq_n_s32(0);
- if (CHANNELS == 2) {
- // (alternative to below) accum2 = veorq_s32(accum2, accum2);
- accum2 = vdupq_n_s32(0);
- }
- do {
- int16x8_t posCoef = vld1q_s16(coefsP);
- coefsP += 8;
- int16x8_t negCoef = vld1q_s16(coefsN);
- coefsN += 8;
- if (!FIXED) { // interpolate
- int16x8_t posCoef1 = vld1q_s16(coefsP1);
- coefsP1 += 8;
- int16x8_t negCoef1 = vld1q_s16(coefsN1);
- coefsN1 += 8;
-
- posCoef1 = vsubq_s16(posCoef1, posCoef);
- negCoef = vsubq_s16(negCoef, negCoef1);
-
- posCoef1 = vqrdmulhq_lane_s16(posCoef1, interp, 0);
- negCoef = vqrdmulhq_lane_s16(negCoef, interp, 0);
-
- posCoef = vaddq_s16(posCoef, posCoef1);
- negCoef = vaddq_s16(negCoef, negCoef1);
- }
- switch (CHANNELS) {
- case 1: {
- int16x8_t posSamp = vld1q_s16(sP);
- int16x8_t negSamp = vld1q_s16(sN);
- sN += 8;
- posSamp = vrev64q_s16(posSamp);
-
- // dot product
- accum = vmlal_s16(accum, vget_low_s16(posSamp), vget_high_s16(posCoef)); // reversed
- accum = vmlal_s16(accum, vget_high_s16(posSamp), vget_low_s16(posCoef)); // reversed
- accum = vmlal_s16(accum, vget_low_s16(negSamp), vget_low_s16(negCoef));
- accum = vmlal_s16(accum, vget_high_s16(negSamp), vget_high_s16(negCoef));
- sP -= 8;
- } break;
- case 2: {
- int16x8x2_t posSamp = vld2q_s16(sP);
- int16x8x2_t negSamp = vld2q_s16(sN);
- sN += 16;
- posSamp.val[0] = vrev64q_s16(posSamp.val[0]);
- posSamp.val[1] = vrev64q_s16(posSamp.val[1]);
-
- // dot product
- accum = vmlal_s16(accum, vget_low_s16(posSamp.val[0]), vget_high_s16(posCoef)); // r
- accum = vmlal_s16(accum, vget_high_s16(posSamp.val[0]), vget_low_s16(posCoef)); // r
- accum2 = vmlal_s16(accum2, vget_low_s16(posSamp.val[1]), vget_high_s16(posCoef)); // r
- accum2 = vmlal_s16(accum2, vget_high_s16(posSamp.val[1]), vget_low_s16(posCoef)); // r
- accum = vmlal_s16(accum, vget_low_s16(negSamp.val[0]), vget_low_s16(negCoef));
- accum = vmlal_s16(accum, vget_high_s16(negSamp.val[0]), vget_high_s16(negCoef));
- accum2 = vmlal_s16(accum2, vget_low_s16(negSamp.val[1]), vget_low_s16(negCoef));
- accum2 = vmlal_s16(accum2, vget_high_s16(negSamp.val[1]), vget_high_s16(negCoef));
- sP -= 16;
- }
- } break;
- } while (count -= 8);
-
- // multiply by volume and save
- volumeLR = (const int32_t*)__builtin_assume_aligned(volumeLR, 8);
- int32x2_t vLR = vld1_s32(volumeLR);
- int32x2_t outSamp = vld1_s32(out);
- // combine and funnel down accumulator
- int32x2_t outAccum = vpadd_s32(vget_low_s32(accum), vget_high_s32(accum));
- if (CHANNELS == 1) {
- // duplicate accum to both L and R
- outAccum = vpadd_s32(outAccum, outAccum);
- } else if (CHANNELS == 2) {
- // accum2 contains R, fold in
- int32x2_t outAccum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
- outAccum = vpadd_s32(outAccum, outAccum2);
- }
- outAccum = vqrdmulh_s32(outAccum, vLR);
- outSamp = vqadd_s32(outSamp, outAccum);
- vst1_s32(out, outSamp);
-}
-
-template <int CHANNELS, int STRIDE, bool FIXED>
-static inline void ProcessNeonIntrinsic(int32_t* out,
- int count,
- const int32_t* coefsP,
- const int32_t* coefsN,
- const int16_t* sP,
- const int16_t* sN,
- const int32_t* volumeLR,
- uint32_t lerpP,
- const int32_t* coefsP1,
- const int32_t* coefsN1)
-{
- ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
-
- sP -= CHANNELS*((STRIDE>>1)-1);
- coefsP = (const int32_t*)__builtin_assume_aligned(coefsP, 16);
- coefsN = (const int32_t*)__builtin_assume_aligned(coefsN, 16);
-
- int32x2_t interp;
- if (!FIXED) {
- interp = vdup_n_s32(lerpP);
- coefsP1 = (const int32_t*)__builtin_assume_aligned(coefsP1, 16);
- coefsN1 = (const int32_t*)__builtin_assume_aligned(coefsN1, 16);
- }
- int32x4_t accum, accum2;
- // warning uninitialized if we use veorq_s32
- // (alternative to below) accum = veorq_s32(accum, accum);
- accum = vdupq_n_s32(0);
- if (CHANNELS == 2) {
- // (alternative to below) accum2 = veorq_s32(accum2, accum2);
- accum2 = vdupq_n_s32(0);
- }
- do {
-#ifdef vld1q_s32_x2
- int32x4x2_t posCoef = vld1q_s32_x2(coefsP);
- coefsP += 8;
- int32x4x2_t negCoef = vld1q_s32_x2(coefsN);
- coefsN += 8;
-#else
- int32x4x2_t posCoef;
- posCoef.val[0] = vld1q_s32(coefsP);
- coefsP += 4;
- posCoef.val[1] = vld1q_s32(coefsP);
- coefsP += 4;
- int32x4x2_t negCoef;
- negCoef.val[0] = vld1q_s32(coefsN);
- coefsN += 4;
- negCoef.val[1] = vld1q_s32(coefsN);
- coefsN += 4;
-#endif
- if (!FIXED) { // interpolate
-#ifdef vld1q_s32_x2
- int32x4x2_t posCoef1 = vld1q_s32_x2(coefsP1);
- coefsP1 += 8;
- int32x4x2_t negCoef1 = vld1q_s32_x2(coefsN1);
- coefsN1 += 8;
-#else
- int32x4x2_t posCoef1;
- posCoef1.val[0] = vld1q_s32(coefsP1);
- coefsP1 += 4;
- posCoef1.val[1] = vld1q_s32(coefsP1);
- coefsP1 += 4;
- int32x4x2_t negCoef1;
- negCoef1.val[0] = vld1q_s32(coefsN1);
- coefsN1 += 4;
- negCoef1.val[1] = vld1q_s32(coefsN1);
- coefsN1 += 4;
-#endif
-
- posCoef1.val[0] = vsubq_s32(posCoef1.val[0], posCoef.val[0]);
- posCoef1.val[1] = vsubq_s32(posCoef1.val[1], posCoef.val[1]);
- negCoef.val[0] = vsubq_s32(negCoef.val[0], negCoef1.val[0]);
- negCoef.val[1] = vsubq_s32(negCoef.val[1], negCoef1.val[1]);
-
- posCoef1.val[0] = vqrdmulhq_lane_s32(posCoef1.val[0], interp, 0);
- posCoef1.val[1] = vqrdmulhq_lane_s32(posCoef1.val[1], interp, 0);
- negCoef.val[0] = vqrdmulhq_lane_s32(negCoef.val[0], interp, 0);
- negCoef.val[1] = vqrdmulhq_lane_s32(negCoef.val[1], interp, 0);
-
- posCoef.val[0] = vaddq_s32(posCoef.val[0], posCoef1.val[0]);
- posCoef.val[1] = vaddq_s32(posCoef.val[1], posCoef1.val[1]);
- negCoef.val[0] = vaddq_s32(negCoef.val[0], negCoef1.val[0]);
- negCoef.val[1] = vaddq_s32(negCoef.val[1], negCoef1.val[1]);
- }
- switch (CHANNELS) {
- case 1: {
- int16x8_t posSamp = vld1q_s16(sP);
- int16x8_t negSamp = vld1q_s16(sN);
- sN += 8;
- posSamp = vrev64q_s16(posSamp);
-
- int32x4_t posSamp0 = vshll_n_s16(vget_low_s16(posSamp), 15);
- int32x4_t posSamp1 = vshll_n_s16(vget_high_s16(posSamp), 15);
- int32x4_t negSamp0 = vshll_n_s16(vget_low_s16(negSamp), 15);
- int32x4_t negSamp1 = vshll_n_s16(vget_high_s16(negSamp), 15);
-
- // dot product
- posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
- posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
- negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
- negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
-
- accum = vaddq_s32(accum, posSamp0);
- negSamp0 = vaddq_s32(negSamp0, negSamp1);
- accum = vaddq_s32(accum, posSamp1);
- accum = vaddq_s32(accum, negSamp0);
-
- sP -= 8;
- } break;
- case 2: {
- int16x8x2_t posSamp = vld2q_s16(sP);
- int16x8x2_t negSamp = vld2q_s16(sN);
- sN += 16;
- posSamp.val[0] = vrev64q_s16(posSamp.val[0]);
- posSamp.val[1] = vrev64q_s16(posSamp.val[1]);
-
- // left
- int32x4_t posSamp0 = vshll_n_s16(vget_low_s16(posSamp.val[0]), 15);
- int32x4_t posSamp1 = vshll_n_s16(vget_high_s16(posSamp.val[0]), 15);
- int32x4_t negSamp0 = vshll_n_s16(vget_low_s16(negSamp.val[0]), 15);
- int32x4_t negSamp1 = vshll_n_s16(vget_high_s16(negSamp.val[0]), 15);
-
- // dot product
- posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
- posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
- negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
- negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
-
- accum = vaddq_s32(accum, posSamp0);
- negSamp0 = vaddq_s32(negSamp0, negSamp1);
- accum = vaddq_s32(accum, posSamp1);
- accum = vaddq_s32(accum, negSamp0);
-
- // right
- posSamp0 = vshll_n_s16(vget_low_s16(posSamp.val[1]), 15);
- posSamp1 = vshll_n_s16(vget_high_s16(posSamp.val[1]), 15);
- negSamp0 = vshll_n_s16(vget_low_s16(negSamp.val[1]), 15);
- negSamp1 = vshll_n_s16(vget_high_s16(negSamp.val[1]), 15);
-
- // dot product
- posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
- posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
- negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
- negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
-
- accum2 = vaddq_s32(accum2, posSamp0);
- negSamp0 = vaddq_s32(negSamp0, negSamp1);
- accum2 = vaddq_s32(accum2, posSamp1);
- accum2 = vaddq_s32(accum2, negSamp0);
-
- sP -= 16;
- } break;
- }
- } while (count -= 8);
-
- // multiply by volume and save
- volumeLR = (const int32_t*)__builtin_assume_aligned(volumeLR, 8);
- int32x2_t vLR = vld1_s32(volumeLR);
- int32x2_t outSamp = vld1_s32(out);
- // combine and funnel down accumulator
- int32x2_t outAccum = vpadd_s32(vget_low_s32(accum), vget_high_s32(accum));
- if (CHANNELS == 1) {
- // duplicate accum to both L and R
- outAccum = vpadd_s32(outAccum, outAccum);
- } else if (CHANNELS == 2) {
- // accum2 contains R, fold in
- int32x2_t outAccum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
- outAccum = vpadd_s32(outAccum, outAccum2);
- }
- outAccum = vqrdmulh_s32(outAccum, vLR);
- outSamp = vqadd_s32(outSamp, outAccum);
- vst1_s32(out, outSamp);
-}
-
-template <int CHANNELS, int STRIDE, bool FIXED>
-static inline void ProcessNeonIntrinsic(float* out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* sP,
- const float* sN,
- const float* volumeLR,
- float lerpP,
- const float* coefsP1,
- const float* coefsN1)
-{
- ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
-
- sP -= CHANNELS*((STRIDE>>1)-1);
- coefsP = (const float*)__builtin_assume_aligned(coefsP, 16);
- coefsN = (const float*)__builtin_assume_aligned(coefsN, 16);
-
- float32x2_t interp;
- if (!FIXED) {
- interp = vdup_n_f32(lerpP);
- coefsP1 = (const float*)__builtin_assume_aligned(coefsP1, 16);
- coefsN1 = (const float*)__builtin_assume_aligned(coefsN1, 16);
- }
- float32x4_t accum, accum2;
- // warning uninitialized if we use veorq_s32
- // (alternative to below) accum = veorq_s32(accum, accum);
- accum = vdupq_n_f32(0);
- if (CHANNELS == 2) {
- // (alternative to below) accum2 = veorq_s32(accum2, accum2);
- accum2 = vdupq_n_f32(0);
- }
- do {
-#ifdef vld1q_f32_x2
- float32x4x2_t posCoef = vld1q_f32_x2(coefsP);
- coefsP += 8;
- float32x4x2_t negCoef = vld1q_f32_x2(coefsN);
- coefsN += 8;
-#else
- float32x4x2_t posCoef;
- posCoef.val[0] = vld1q_f32(coefsP);
- coefsP += 4;
- posCoef.val[1] = vld1q_f32(coefsP);
- coefsP += 4;
- float32x4x2_t negCoef;
- negCoef.val[0] = vld1q_f32(coefsN);
- coefsN += 4;
- negCoef.val[1] = vld1q_f32(coefsN);
- coefsN += 4;
-#endif
- if (!FIXED) { // interpolate
-#ifdef vld1q_f32_x2
- float32x4x2_t posCoef1 = vld1q_f32_x2(coefsP1);
- coefsP1 += 8;
- float32x4x2_t negCoef1 = vld1q_f32_x2(coefsN1);
- coefsN1 += 8;
-#else
- float32x4x2_t posCoef1;
- posCoef1.val[0] = vld1q_f32(coefsP1);
- coefsP1 += 4;
- posCoef1.val[1] = vld1q_f32(coefsP1);
- coefsP1 += 4;
- float32x4x2_t negCoef1;
- negCoef1.val[0] = vld1q_f32(coefsN1);
- coefsN1 += 4;
- negCoef1.val[1] = vld1q_f32(coefsN1);
- coefsN1 += 4;
-#endif
- posCoef1.val[0] = vsubq_f32(posCoef1.val[0], posCoef.val[0]);
- posCoef1.val[1] = vsubq_f32(posCoef1.val[1], posCoef.val[1]);
- negCoef.val[0] = vsubq_f32(negCoef.val[0], negCoef1.val[0]);
- negCoef.val[1] = vsubq_f32(negCoef.val[1], negCoef1.val[1]);
-
- posCoef.val[0] = vmlaq_lane_f32(posCoef.val[0], posCoef1.val[0], interp, 0);
- posCoef.val[1] = vmlaq_lane_f32(posCoef.val[1], posCoef1.val[1], interp, 0);
- negCoef.val[0] = vmlaq_lane_f32(negCoef1.val[0], negCoef.val[0], interp, 0); // rev
- negCoef.val[1] = vmlaq_lane_f32(negCoef1.val[1], negCoef.val[1], interp, 0); // rev
- }
- switch (CHANNELS) {
- case 1: {
-#ifdef vld1q_f32_x2
- float32x4x2_t posSamp = vld1q_f32_x2(sP);
- float32x4x2_t negSamp = vld1q_f32_x2(sN);
- sN += 8;
- sP -= 8;
-#else
- float32x4x2_t posSamp;
- posSamp.val[0] = vld1q_f32(sP);
- sP += 4;
- posSamp.val[1] = vld1q_f32(sP);
- sP -= 12;
- float32x4x2_t negSamp;
- negSamp.val[0] = vld1q_f32(sN);
- sN += 4;
- negSamp.val[1] = vld1q_f32(sN);
- sN += 4;
-#endif
- // effectively we want a vrev128q_f32()
- posSamp.val[0] = vrev64q_f32(posSamp.val[0]);
- posSamp.val[1] = vrev64q_f32(posSamp.val[1]);
- posSamp.val[0] = vcombine_f32(
- vget_high_f32(posSamp.val[0]), vget_low_f32(posSamp.val[0]));
- posSamp.val[1] = vcombine_f32(
- vget_high_f32(posSamp.val[1]), vget_low_f32(posSamp.val[1]));
-
- accum = vmlaq_f32(accum, posSamp.val[0], posCoef.val[1]);
- accum = vmlaq_f32(accum, posSamp.val[1], posCoef.val[0]);
- accum = vmlaq_f32(accum, negSamp.val[0], negCoef.val[0]);
- accum = vmlaq_f32(accum, negSamp.val[1], negCoef.val[1]);
- } break;
- case 2: {
- float32x4x2_t posSamp0 = vld2q_f32(sP);
- sP += 8;
- float32x4x2_t negSamp0 = vld2q_f32(sN);
- sN += 8;
- posSamp0.val[0] = vrev64q_f32(posSamp0.val[0]);
- posSamp0.val[1] = vrev64q_f32(posSamp0.val[1]);
- posSamp0.val[0] = vcombine_f32(
- vget_high_f32(posSamp0.val[0]), vget_low_f32(posSamp0.val[0]));
- posSamp0.val[1] = vcombine_f32(
- vget_high_f32(posSamp0.val[1]), vget_low_f32(posSamp0.val[1]));
-
- float32x4x2_t posSamp1 = vld2q_f32(sP);
- sP -= 24;
- float32x4x2_t negSamp1 = vld2q_f32(sN);
- sN += 8;
- posSamp1.val[0] = vrev64q_f32(posSamp1.val[0]);
- posSamp1.val[1] = vrev64q_f32(posSamp1.val[1]);
- posSamp1.val[0] = vcombine_f32(
- vget_high_f32(posSamp1.val[0]), vget_low_f32(posSamp1.val[0]));
- posSamp1.val[1] = vcombine_f32(
- vget_high_f32(posSamp1.val[1]), vget_low_f32(posSamp1.val[1]));
-
- // Note: speed is affected by accumulation order.
- // Also, speed appears slower using vmul/vadd instead of vmla for
- // stereo case, comparable for mono.
-
- accum = vmlaq_f32(accum, negSamp0.val[0], negCoef.val[0]);
- accum = vmlaq_f32(accum, negSamp1.val[0], negCoef.val[1]);
- accum2 = vmlaq_f32(accum2, negSamp0.val[1], negCoef.val[0]);
- accum2 = vmlaq_f32(accum2, negSamp1.val[1], negCoef.val[1]);
-
- accum = vmlaq_f32(accum, posSamp0.val[0], posCoef.val[1]); // reversed
- accum = vmlaq_f32(accum, posSamp1.val[0], posCoef.val[0]); // reversed
- accum2 = vmlaq_f32(accum2, posSamp0.val[1], posCoef.val[1]); // reversed
- accum2 = vmlaq_f32(accum2, posSamp1.val[1], posCoef.val[0]); // reversed
- } break;
- }
- } while (count -= 8);
-
- // multiply by volume and save
- volumeLR = (const float*)__builtin_assume_aligned(volumeLR, 8);
- float32x2_t vLR = vld1_f32(volumeLR);
- float32x2_t outSamp = vld1_f32(out);
- // combine and funnel down accumulator
- float32x2_t outAccum = vpadd_f32(vget_low_f32(accum), vget_high_f32(accum));
- if (CHANNELS == 1) {
- // duplicate accum to both L and R
- outAccum = vpadd_f32(outAccum, outAccum);
- } else if (CHANNELS == 2) {
- // accum2 contains R, fold in
- float32x2_t outAccum2 = vpadd_f32(vget_low_f32(accum2), vget_high_f32(accum2));
- outAccum = vpadd_f32(outAccum, outAccum2);
- }
- outSamp = vmla_f32(outSamp, outAccum, vLR);
- vst1_f32(out, outSamp);
-}
-
-template <>
-inline void ProcessL<1, 16>(int32_t* const out,
- int count,
- const int16_t* coefsP,
- const int16_t* coefsN,
- const int16_t* sP,
- const int16_t* sN,
- const int32_t* const volumeLR)
-{
-#ifdef USE_INTRINSIC
- ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
-#else
- const int CHANNELS = 1; // template specialization does not preserve params
- const int STRIDE = 16;
- sP -= CHANNELS*((STRIDE>>1)-1);
- asm (
- "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0
-
- "1: \n"
-
- "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples
- "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples
- "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
- "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs
-
- "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4
-
- // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
- "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply (reversed)samples by coef
- "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed)samples by coef
- "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples
- "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples
-
- // moving these ARM instructions before neon above seems to be slower
- "subs %[count], %[count], #8 \n"// (1) update loop counter
- "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples
-
- // sP used after branch (warning)
- "bne 1b \n"// loop
-
- ASSEMBLY_ACCUMULATE_MONO
-
- : [out] "=Uv" (out[0]),
- [count] "+r" (count),
- [coefsP0] "+r" (coefsP),
- [coefsN0] "+r" (coefsN),
- [sP] "+r" (sP),
- [sN] "+r" (sN)
- : [vLR] "r" (volumeLR)
- : "cc", "memory",
- "q0", "q1", "q2", "q3",
- "q8", "q10"
- );
-#endif
-}
-
-template <>
-inline void ProcessL<2, 16>(int32_t* const out,
- int count,
- const int16_t* coefsP,
- const int16_t* coefsN,
- const int16_t* sP,
- const int16_t* sN,
- const int32_t* const volumeLR)
-{
-#ifdef USE_INTRINSIC
- ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
-#else
- const int CHANNELS = 2; // template specialization does not preserve params
- const int STRIDE = 16;
- sP -= CHANNELS*((STRIDE>>1)-1);
- asm (
- "veor q0, q0, q0 \n"// (1) acc_L = 0
- "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0
-
- "1: \n"
-
- "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo frames
- "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo frames
- "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
- "vld1.16 {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs
-
- "vrev64.16 q2, q2 \n"// (1) reverse 8 samples of positive left
- "vrev64.16 q3, q3 \n"// (0 combines+) reverse positive right
-
- "vmlal.s16 q0, d4, d17 \n"// (1) multiply (reversed) samples left
- "vmlal.s16 q0, d5, d16 \n"// (1) multiply (reversed) samples left
- "vmlal.s16 q4, d6, d17 \n"// (1) multiply (reversed) samples right
- "vmlal.s16 q4, d7, d16 \n"// (1) multiply (reversed) samples right
- "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left
- "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left
- "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right
- "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right
-
- // moving these ARM before neon seems to be slower
- "subs %[count], %[count], #8 \n"// (1) update loop counter
- "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples
-
- // sP used after branch (warning)
- "bne 1b \n"// loop
-
- ASSEMBLY_ACCUMULATE_STEREO
-
- : [out] "=Uv" (out[0]),
- [count] "+r" (count),
- [coefsP0] "+r" (coefsP),
- [coefsN0] "+r" (coefsN),
- [sP] "+r" (sP),
- [sN] "+r" (sN)
- : [vLR] "r" (volumeLR)
- : "cc", "memory",
- "q0", "q1", "q2", "q3",
- "q4", "q5", "q6",
- "q8", "q10"
- );
-#endif
-}
-
-template <>
-inline void Process<1, 16>(int32_t* const out,
- int count,
- const int16_t* coefsP,
- const int16_t* coefsN,
- const int16_t* coefsP1,
- const int16_t* coefsN1,
- const int16_t* sP,
- const int16_t* sN,
- uint32_t lerpP,
- const int32_t* const volumeLR)
-{
-#ifdef USE_INTRINSIC
- ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- lerpP, coefsP1, coefsN1);
-#else
-
- const int CHANNELS = 1; // template specialization does not preserve params
- const int STRIDE = 16;
- sP -= CHANNELS*((STRIDE>>1)-1);
- asm (
- "vmov.32 d2[0], %[lerpP] \n"// load the positive phase S32 Q15
- "veor q0, q0, q0 \n"// (0 - combines+) accumulator = 0
-
- "1: \n"
-
- "vld1.16 {q2}, [%[sP]] \n"// (2+0d) load 8 16-bits mono samples
- "vld1.16 {q3}, [%[sN]]! \n"// (2) load 8 16-bits mono samples
- "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
- "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation
- "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs
- "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation
-
- "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs
- "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets
-
- "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs
- "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs
-
- "vrev64.16 q2, q2 \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4
-
- "vadd.s16 q8, q8, q9 \n"// (1+2d) interpolate (step3) 1st set
- "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set
-
- // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
- "vmlal.s16 q0, d4, d17 \n"// (1+0d) multiply reversed samples by coef
- "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples by coef
- "vmlal.s16 q0, d6, d20 \n"// (1) multiply neg samples
- "vmlal.s16 q0, d7, d21 \n"// (1) multiply neg samples
-
- // moving these ARM instructions before neon above seems to be slower
- "subs %[count], %[count], #8 \n"// (1) update loop counter
- "sub %[sP], %[sP], #16 \n"// (0) move pointer to next set of samples
-
- // sP used after branch (warning)
- "bne 1b \n"// loop
-
- ASSEMBLY_ACCUMULATE_MONO
-
- : [out] "=Uv" (out[0]),
- [count] "+r" (count),
- [coefsP0] "+r" (coefsP),
- [coefsN0] "+r" (coefsN),
- [coefsP1] "+r" (coefsP1),
- [coefsN1] "+r" (coefsN1),
- [sP] "+r" (sP),
- [sN] "+r" (sN)
- : [lerpP] "r" (lerpP),
- [vLR] "r" (volumeLR)
- : "cc", "memory",
- "q0", "q1", "q2", "q3",
- "q8", "q9", "q10", "q11"
- );
-#endif
-}
-
-template <>
-inline void Process<2, 16>(int32_t* const out,
- int count,
- const int16_t* coefsP,
- const int16_t* coefsN,
- const int16_t* coefsP1,
- const int16_t* coefsN1,
- const int16_t* sP,
- const int16_t* sN,
- uint32_t lerpP,
- const int32_t* const volumeLR)
-{
-#ifdef USE_INTRINSIC
- ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- lerpP, coefsP1, coefsN1);
-#else
- const int CHANNELS = 2; // template specialization does not preserve params
- const int STRIDE = 16;
- sP -= CHANNELS*((STRIDE>>1)-1);
- asm (
- "vmov.32 d2[0], %[lerpP] \n"// load the positive phase
- "veor q0, q0, q0 \n"// (1) acc_L = 0
- "veor q4, q4, q4 \n"// (0 combines+) acc_R = 0
-
- "1: \n"
-
- "vld2.16 {q2, q3}, [%[sP]] \n"// (3+0d) load 8 16-bits stereo frames
- "vld2.16 {q5, q6}, [%[sN]]! \n"// (3) load 8 16-bits stereo frames
- "vld1.16 {q8}, [%[coefsP0]:128]! \n"// (1) load 8 16-bits coefs
- "vld1.16 {q9}, [%[coefsP1]:128]! \n"// (1) load 8 16-bits coefs for interpolation
- "vld1.16 {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs
- "vld1.16 {q11}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs for interpolation
-
- "vsub.s16 q9, q9, q8 \n"// (1) interpolate (step1) 1st set of coefs
- "vsub.s16 q11, q11, q10 \n"// (1) interpolate (step1) 2nd set of coets
-
- "vqrdmulh.s16 q9, q9, d2[0] \n"// (2) interpolate (step2) 1st set of coefs
- "vqrdmulh.s16 q11, q11, d2[0] \n"// (2) interpolate (step2) 2nd set of coefs
-
- "vrev64.16 q2, q2 \n"// (1) reverse 8 samples of positive left
- "vrev64.16 q3, q3 \n"// (1) reverse 8 samples of positive right
-
- "vadd.s16 q8, q8, q9 \n"// (1+1d) interpolate (step3) 1st set
- "vadd.s16 q10, q10, q11 \n"// (1+1d) interpolate (step3) 2nd set
-
- "vmlal.s16 q0, d4, d17 \n"// (1) multiply reversed samples left
- "vmlal.s16 q0, d5, d16 \n"// (1) multiply reversed samples left
- "vmlal.s16 q4, d6, d17 \n"// (1) multiply reversed samples right
- "vmlal.s16 q4, d7, d16 \n"// (1) multiply reversed samples right
- "vmlal.s16 q0, d10, d20 \n"// (1) multiply samples left
- "vmlal.s16 q0, d11, d21 \n"// (1) multiply samples left
- "vmlal.s16 q4, d12, d20 \n"// (1) multiply samples right
- "vmlal.s16 q4, d13, d21 \n"// (1) multiply samples right
-
- // moving these ARM before neon seems to be slower
- "subs %[count], %[count], #8 \n"// (1) update loop counter
- "sub %[sP], %[sP], #32 \n"// (0) move pointer to next set of samples
-
- // sP used after branch (warning)
- "bne 1b \n"// loop
-
- ASSEMBLY_ACCUMULATE_STEREO
-
- : [out] "=Uv" (out[0]),
- [count] "+r" (count),
- [coefsP0] "+r" (coefsP),
- [coefsN0] "+r" (coefsN),
- [coefsP1] "+r" (coefsP1),
- [coefsN1] "+r" (coefsN1),
- [sP] "+r" (sP),
- [sN] "+r" (sN)
- : [lerpP] "r" (lerpP),
- [vLR] "r" (volumeLR)
- : "cc", "memory",
- "q0", "q1", "q2", "q3",
- "q4", "q5", "q6",
- "q8", "q9", "q10", "q11"
- );
-#endif
-}
-
-template <>
-inline void ProcessL<1, 16>(int32_t* const out,
- int count,
- const int32_t* coefsP,
- const int32_t* coefsN,
- const int16_t* sP,
- const int16_t* sN,
- const int32_t* const volumeLR)
-{
-#ifdef USE_INTRINSIC
- ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
-#else
- const int CHANNELS = 1; // template specialization does not preserve params
- const int STRIDE = 16;
- sP -= CHANNELS*((STRIDE>>1)-1);
- asm (
- "veor q0, q0, q0 \n"// result, initialize to 0
-
- "1: \n"
-
- "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples
- "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples
- "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
- "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
-
- "vrev64.16 q2, q2 \n"// reverse 8 samples of the positive side
-
- "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
- "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
-
- "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits
- "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits
-
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples
-
- "vadd.s32 q0, q0, q12 \n"// accumulate result
- "vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q0, q0, q15 \n"// accumulate result
- "vadd.s32 q0, q0, q13 \n"// accumulate result
-
- "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples
- "subs %[count], %[count], #8 \n"// update loop counter
-
- "bne 1b \n"// loop
-
- ASSEMBLY_ACCUMULATE_MONO
-
- : [out] "=Uv" (out[0]),
- [count] "+r" (count),
- [coefsP0] "+r" (coefsP),
- [coefsN0] "+r" (coefsN),
- [sP] "+r" (sP),
- [sN] "+r" (sN)
- : [vLR] "r" (volumeLR)
- : "cc", "memory",
- "q0", "q1", "q2", "q3",
- "q8", "q9", "q10", "q11",
- "q12", "q13", "q14", "q15"
- );
-#endif
-}
-
-template <>
-inline void ProcessL<2, 16>(int32_t* const out,
- int count,
- const int32_t* coefsP,
- const int32_t* coefsN,
- const int16_t* sP,
- const int16_t* sN,
- const int32_t* const volumeLR)
-{
-#ifdef USE_INTRINSIC
- ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
-#else
- const int CHANNELS = 2; // template specialization does not preserve params
- const int STRIDE = 16;
- sP -= CHANNELS*((STRIDE>>1)-1);
- asm (
- "veor q0, q0, q0 \n"// result, initialize to 0
- "veor q4, q4, q4 \n"// result, initialize to 0
-
- "1: \n"
-
- "vld2.16 {q2, q3}, [%[sP]] \n"// load 8 16-bits stereo frames
- "vld2.16 {q5, q6}, [%[sN]]! \n"// load 8 16-bits stereo frames
- "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
- "vld1.32 {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
-
- "vrev64.16 q2, q2 \n"// reverse 8 samples of positive left
- "vrev64.16 q3, q3 \n"// reverse 8 samples of positive right
-
- "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
- "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
-
- "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits
- "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits
-
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by coef
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by coef
-
- "vadd.s32 q0, q0, q12 \n"// accumulate result
- "vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q0, q0, q15 \n"// accumulate result
- "vadd.s32 q0, q0, q13 \n"// accumulate result
-
- "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits
- "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits
-
- "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits
- "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits
-
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by coef
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by coef
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by coef
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by coef
-
- "vadd.s32 q4, q4, q12 \n"// accumulate result
- "vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q4, q4, q15 \n"// accumulate result
- "vadd.s32 q4, q4, q13 \n"// accumulate result
-
- "subs %[count], %[count], #8 \n"// update loop counter
- "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples
-
- "bne 1b \n"// loop
-
- ASSEMBLY_ACCUMULATE_STEREO
-
- : [out] "=Uv" (out[0]),
- [count] "+r" (count),
- [coefsP0] "+r" (coefsP),
- [coefsN0] "+r" (coefsN),
- [sP] "+r" (sP),
- [sN] "+r" (sN)
- : [vLR] "r" (volumeLR)
- : "cc", "memory",
- "q0", "q1", "q2", "q3",
- "q4", "q5", "q6",
- "q8", "q9", "q10", "q11",
- "q12", "q13", "q14", "q15"
- );
-#endif
-}
-
-template <>
-inline void Process<1, 16>(int32_t* const out,
- int count,
- const int32_t* coefsP,
- const int32_t* coefsN,
- const int32_t* coefsP1,
- const int32_t* coefsN1,
- const int16_t* sP,
- const int16_t* sN,
- uint32_t lerpP,
- const int32_t* const volumeLR)
-{
-#ifdef USE_INTRINSIC
- ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- lerpP, coefsP1, coefsN1);
-#else
- const int CHANNELS = 1; // template specialization does not preserve params
- const int STRIDE = 16;
- sP -= CHANNELS*((STRIDE>>1)-1);
- asm (
- "vmov.32 d2[0], %[lerpP] \n"// load the positive phase
- "veor q0, q0, q0 \n"// result, initialize to 0
-
- "1: \n"
-
- "vld1.16 {q2}, [%[sP]] \n"// load 8 16-bits mono samples
- "vld1.16 {q3}, [%[sN]]! \n"// load 8 16-bits mono samples
- "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
- "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs
- "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs
- "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
-
- "vsub.s32 q12, q12, q8 \n"// interpolate (step1)
- "vsub.s32 q13, q13, q9 \n"// interpolate (step1)
- "vsub.s32 q14, q14, q10 \n"// interpolate (step1)
- "vsub.s32 q15, q15, q11 \n"// interpolate (step1)
-
- "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2)
- "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2)
- "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2)
- "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2)
-
- "vadd.s32 q8, q8, q12 \n"// interpolate (step3)
- "vadd.s32 q9, q9, q13 \n"// interpolate (step3)
- "vadd.s32 q10, q10, q14 \n"// interpolate (step3)
- "vadd.s32 q11, q11, q15 \n"// interpolate (step3)
-
- "vrev64.16 q2, q2 \n"// reverse 8 samples of the positive side
-
- "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
- "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
-
- "vshll.s16 q14, d6, #15 \n"// extend samples to 31 bits
- "vshll.s16 q15, d7, #15 \n"// extend samples to 31 bits
-
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
-
- "vadd.s32 q0, q0, q12 \n"// accumulate result
- "vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q0, q0, q15 \n"// accumulate result
- "vadd.s32 q0, q0, q13 \n"// accumulate result
-
- "sub %[sP], %[sP], #16 \n"// move pointer to next set of samples
- "subs %[count], %[count], #8 \n"// update loop counter
-
- "bne 1b \n"// loop
-
- ASSEMBLY_ACCUMULATE_MONO
-
- : [out] "=Uv" (out[0]),
- [count] "+r" (count),
- [coefsP0] "+r" (coefsP),
- [coefsN0] "+r" (coefsN),
- [coefsP1] "+r" (coefsP1),
- [coefsN1] "+r" (coefsN1),
- [sP] "+r" (sP),
- [sN] "+r" (sN)
- : [lerpP] "r" (lerpP),
- [vLR] "r" (volumeLR)
- : "cc", "memory",
- "q0", "q1", "q2", "q3",
- "q8", "q9", "q10", "q11",
- "q12", "q13", "q14", "q15"
- );
-#endif
-}
-
-template <>
-inline void Process<2, 16>(int32_t* const out,
- int count,
- const int32_t* coefsP,
- const int32_t* coefsN,
- const int32_t* coefsP1,
- const int32_t* coefsN1,
- const int16_t* sP,
- const int16_t* sN,
- uint32_t lerpP,
- const int32_t* const volumeLR)
-{
-#ifdef USE_INTRINSIC
- ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- lerpP, coefsP1, coefsN1);
-#else
- const int CHANNELS = 2; // template specialization does not preserve params
- const int STRIDE = 16;
- sP -= CHANNELS*((STRIDE>>1)-1);
- asm (
- "vmov.32 d2[0], %[lerpP] \n"// load the positive phase
- "veor q0, q0, q0 \n"// result, initialize to 0
- "veor q4, q4, q4 \n"// result, initialize to 0
-
- "1: \n"
-
- "vld2.16 {q2, q3}, [%[sP]] \n"// load 8 16-bits stereo frames
- "vld2.16 {q5, q6}, [%[sN]]! \n"// load 8 16-bits stereo frames
- "vld1.32 {q8, q9}, [%[coefsP0]:128]! \n"// load 8 32-bits coefs
- "vld1.32 {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs
- "vld1.32 {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs
- "vld1.32 {q14, q15}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
-
- "vsub.s32 q12, q12, q8 \n"// interpolate (step1)
- "vsub.s32 q13, q13, q9 \n"// interpolate (step1)
- "vsub.s32 q14, q14, q10 \n"// interpolate (step1)
- "vsub.s32 q15, q15, q11 \n"// interpolate (step1)
-
- "vqrdmulh.s32 q12, q12, d2[0] \n"// interpolate (step2)
- "vqrdmulh.s32 q13, q13, d2[0] \n"// interpolate (step2)
- "vqrdmulh.s32 q14, q14, d2[0] \n"// interpolate (step2)
- "vqrdmulh.s32 q15, q15, d2[0] \n"// interpolate (step2)
-
- "vadd.s32 q8, q8, q12 \n"// interpolate (step3)
- "vadd.s32 q9, q9, q13 \n"// interpolate (step3)
- "vadd.s32 q10, q10, q14 \n"// interpolate (step3)
- "vadd.s32 q11, q11, q15 \n"// interpolate (step3)
-
- "vrev64.16 q2, q2 \n"// reverse 8 samples of positive left
- "vrev64.16 q3, q3 \n"// reverse 8 samples of positive right
-
- "vshll.s16 q12, d4, #15 \n"// extend samples to 31 bits
- "vshll.s16 q13, d5, #15 \n"// extend samples to 31 bits
-
- "vshll.s16 q14, d10, #15 \n"// extend samples to 31 bits
- "vshll.s16 q15, d11, #15 \n"// extend samples to 31 bits
-
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
-
- "vadd.s32 q0, q0, q12 \n"// accumulate result
- "vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q0, q0, q15 \n"// accumulate result
- "vadd.s32 q0, q0, q13 \n"// accumulate result
-
- "vshll.s16 q12, d6, #15 \n"// extend samples to 31 bits
- "vshll.s16 q13, d7, #15 \n"// extend samples to 31 bits
-
- "vshll.s16 q14, d12, #15 \n"// extend samples to 31 bits
- "vshll.s16 q15, d13, #15 \n"// extend samples to 31 bits
-
- "vqrdmulh.s32 q12, q12, q9 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q13, q13, q8 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q14, q14, q10 \n"// multiply samples by interpolated coef
- "vqrdmulh.s32 q15, q15, q11 \n"// multiply samples by interpolated coef
-
- "vadd.s32 q4, q4, q12 \n"// accumulate result
- "vadd.s32 q13, q13, q14 \n"// accumulate result
- "vadd.s32 q4, q4, q15 \n"// accumulate result
- "vadd.s32 q4, q4, q13 \n"// accumulate result
-
- "subs %[count], %[count], #8 \n"// update loop counter
- "sub %[sP], %[sP], #32 \n"// move pointer to next set of samples
-
- "bne 1b \n"// loop
-
- ASSEMBLY_ACCUMULATE_STEREO
-
- : [out] "=Uv" (out[0]),
- [count] "+r" (count),
- [coefsP0] "+r" (coefsP),
- [coefsN0] "+r" (coefsN),
- [coefsP1] "+r" (coefsP1),
- [coefsN1] "+r" (coefsN1),
- [sP] "+r" (sP),
- [sN] "+r" (sN)
- : [lerpP] "r" (lerpP),
- [vLR] "r" (volumeLR)
- : "cc", "memory",
- "q0", "q1", "q2", "q3",
- "q4", "q5", "q6",
- "q8", "q9", "q10", "q11",
- "q12", "q13", "q14", "q15"
- );
-#endif
-}
-
-template<>
-inline void ProcessL<1, 16>(float* const out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* sP,
- const float* sN,
- const float* const volumeLR)
-{
- ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
-}
-
-template<>
-inline void ProcessL<2, 16>(float* const out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* sP,
- const float* sN,
- const float* const volumeLR)
-{
- ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
-}
-
-template<>
-inline void Process<1, 16>(float* const out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* coefsP1,
- const float* coefsN1,
- const float* sP,
- const float* sN,
- float lerpP,
- const float* const volumeLR)
-{
- ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- lerpP, coefsP1, coefsN1);
-}
-
-template<>
-inline void Process<2, 16>(float* const out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* coefsP1,
- const float* coefsN1,
- const float* sP,
- const float* sN,
- float lerpP,
- const float* const volumeLR)
-{
- ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- lerpP, coefsP1, coefsN1);
-}
-
-#endif //USE_NEON
-
-} // namespace android
-
-#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H*/
diff --git a/services/audioflinger/AudioResamplerFirProcessSSE.h b/services/audioflinger/AudioResamplerFirProcessSSE.h
deleted file mode 100644
index 63ed052..0000000
--- a/services/audioflinger/AudioResamplerFirProcessSSE.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_SSE_H
-#define ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_SSE_H
-
-namespace android {
-
-// depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h
-
-#if USE_SSE
-
-#define TO_STRING2(x) #x
-#define TO_STRING(x) TO_STRING2(x)
-// uncomment to print GCC version, may be relevant for intrinsic optimizations
-/* #pragma message ("GCC version: " TO_STRING(__GNUC__) \
- "." TO_STRING(__GNUC_MINOR__) \
- "." TO_STRING(__GNUC_PATCHLEVEL__)) */
-
-//
-// SSEx specializations are enabled for Process() and ProcessL() in AudioResamplerFirProcess.h
-//
-
-template <int CHANNELS, int STRIDE, bool FIXED>
-static inline void ProcessSSEIntrinsic(float* out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* sP,
- const float* sN,
- const float* volumeLR,
- float lerpP,
- const float* coefsP1,
- const float* coefsN1)
-{
- ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
-
- sP -= CHANNELS*(4-1); // adjust sP for a loop iteration of four
-
- __m128 interp;
- if (!FIXED) {
- interp = _mm_set1_ps(lerpP);
- }
-
- __m128 accL, accR;
- accL = _mm_setzero_ps();
- if (CHANNELS == 2) {
- accR = _mm_setzero_ps();
- }
-
- do {
- __m128 posCoef = _mm_load_ps(coefsP);
- __m128 negCoef = _mm_load_ps(coefsN);
- coefsP += 4;
- coefsN += 4;
-
- if (!FIXED) { // interpolate
- __m128 posCoef1 = _mm_load_ps(coefsP1);
- __m128 negCoef1 = _mm_load_ps(coefsN1);
- coefsP1 += 4;
- coefsN1 += 4;
-
- // Calculate the final coefficient for interpolation
- // posCoef = interp * (posCoef1 - posCoef) + posCoef
- // negCoef = interp * (negCoef - negCoef1) + negCoef1
- posCoef1 = _mm_sub_ps(posCoef1, posCoef);
- negCoef = _mm_sub_ps(negCoef, negCoef1);
-
- posCoef1 = _mm_mul_ps(posCoef1, interp);
- negCoef = _mm_mul_ps(negCoef, interp);
-
- posCoef = _mm_add_ps(posCoef1, posCoef);
- negCoef = _mm_add_ps(negCoef, negCoef1);
- }
- switch (CHANNELS) {
- case 1: {
- __m128 posSamp = _mm_loadu_ps(sP);
- __m128 negSamp = _mm_loadu_ps(sN);
- sP -= 4;
- sN += 4;
-
- posSamp = _mm_shuffle_ps(posSamp, posSamp, 0x1B);
- posSamp = _mm_mul_ps(posSamp, posCoef);
- negSamp = _mm_mul_ps(negSamp, negCoef);
-
- accL = _mm_add_ps(accL, posSamp);
- accL = _mm_add_ps(accL, negSamp);
- } break;
- case 2: {
- __m128 posSamp0 = _mm_loadu_ps(sP);
- __m128 posSamp1 = _mm_loadu_ps(sP+4);
- __m128 negSamp0 = _mm_loadu_ps(sN);
- __m128 negSamp1 = _mm_loadu_ps(sN+4);
- sP -= 8;
- sN += 8;
-
- // deinterleave everything and reverse the positives
- __m128 posSampL = _mm_shuffle_ps(posSamp1, posSamp0, 0x22);
- __m128 posSampR = _mm_shuffle_ps(posSamp1, posSamp0, 0x77);
- __m128 negSampL = _mm_shuffle_ps(negSamp0, negSamp1, 0x88);
- __m128 negSampR = _mm_shuffle_ps(negSamp0, negSamp1, 0xDD);
-
- posSampL = _mm_mul_ps(posSampL, posCoef);
- posSampR = _mm_mul_ps(posSampR, posCoef);
- negSampL = _mm_mul_ps(negSampL, negCoef);
- negSampR = _mm_mul_ps(negSampR, negCoef);
-
- accL = _mm_add_ps(accL, posSampL);
- accR = _mm_add_ps(accR, posSampR);
- accL = _mm_add_ps(accL, negSampL);
- accR = _mm_add_ps(accR, negSampR);
- } break;
- }
- } while (count -= 4);
-
- // multiply by volume and save
- __m128 vLR = _mm_setzero_ps();
- __m128 outSamp;
- vLR = _mm_loadl_pi(vLR, reinterpret_cast<const __m64*>(volumeLR));
- outSamp = _mm_loadl_pi(vLR, reinterpret_cast<__m64*>(out));
-
- // combine and funnel down accumulator
- __m128 outAccum = _mm_setzero_ps();
- if (CHANNELS == 1) {
- // duplicate accL to both L and R
- outAccum = _mm_add_ps(accL, _mm_movehl_ps(accL, accL));
- outAccum = _mm_add_ps(outAccum, _mm_shuffle_ps(outAccum, outAccum, 0x11));
- } else if (CHANNELS == 2) {
- // accR contains R, fold in
- outAccum = _mm_hadd_ps(accL, accR);
- outAccum = _mm_hadd_ps(outAccum, outAccum);
- }
-
- outAccum = _mm_mul_ps(outAccum, vLR);
- outSamp = _mm_add_ps(outSamp, outAccum);
- _mm_storel_pi(reinterpret_cast<__m64*>(out), outSamp);
-}
-
-template<>
-inline void ProcessL<1, 16>(float* const out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* sP,
- const float* sN,
- const float* const volumeLR)
-{
- ProcessSSEIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
-}
-
-template<>
-inline void ProcessL<2, 16>(float* const out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* sP,
- const float* sN,
- const float* const volumeLR)
-{
- ProcessSSEIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- 0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
-}
-
-template<>
-inline void Process<1, 16>(float* const out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* coefsP1,
- const float* coefsN1,
- const float* sP,
- const float* sN,
- float lerpP,
- const float* const volumeLR)
-{
- ProcessSSEIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- lerpP, coefsP1, coefsN1);
-}
-
-template<>
-inline void Process<2, 16>(float* const out,
- int count,
- const float* coefsP,
- const float* coefsN,
- const float* coefsP1,
- const float* coefsN1,
- const float* sP,
- const float* sN,
- float lerpP,
- const float* const volumeLR)
-{
- ProcessSSEIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
- lerpP, coefsP1, coefsN1);
-}
-
-#endif //USE_SSE
-
-} // namespace android
-
-#endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_SSE_H*/
diff --git a/services/audioflinger/AudioResamplerSinc.h b/services/audioflinger/AudioResamplerSinc.h
deleted file mode 100644
index df8b45a..0000000
--- a/services/audioflinger/AudioResamplerSinc.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AUDIO_RESAMPLER_SINC_H
-#define ANDROID_AUDIO_RESAMPLER_SINC_H
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <android/log.h>
-
-#include "AudioResampler.h"
-
-namespace android {
-
-
-typedef const int32_t * (*readCoefficientsFn)(bool upDownSample);
-typedef int32_t (*readResampleFirNumCoeffFn)();
-typedef int32_t (*readResampleFirLerpIntBitsFn)();
-
-// ----------------------------------------------------------------------------
-
-class AudioResamplerSinc : public AudioResampler {
-public:
- AudioResamplerSinc(int inChannelCount, int32_t sampleRate,
- src_quality quality = HIGH_QUALITY);
-
- virtual ~AudioResamplerSinc();
-
- virtual size_t resample(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
-private:
- void init();
-
- virtual void setVolume(float left, float right);
-
- template<int CHANNELS>
- size_t resample(int32_t* out, size_t outFrameCount,
- AudioBufferProvider* provider);
-
- template<int CHANNELS>
- inline void filterCoefficient(
- int32_t* out, uint32_t phase, const int16_t *samples, uint32_t vRL);
-
- template<int CHANNELS>
- inline void interpolate(
- int32_t& l, int32_t& r,
- const int32_t* coefs, size_t offset,
- int32_t lerp, const int16_t* samples);
-
- template<int CHANNELS>
- inline void read(int16_t*& impulse, uint32_t& phaseFraction,
- const int16_t* in, size_t inputIndex);
-
- int16_t *mState;
- int16_t *mImpulse;
- int16_t *mRingFull;
- int32_t mVolumeSIMD[2];
-
- const int32_t * mFirCoefs;
- static const uint32_t mFirCoefsDown[];
- static const uint32_t mFirCoefsUp[];
-
- // ----------------------------------------------------------------------------
- static const int32_t RESAMPLE_FIR_NUM_COEF = 8;
- static const int32_t RESAMPLE_FIR_LERP_INT_BITS = 7;
-
- struct Constants {
- int coefsBits;
- int cShift;
- uint32_t cMask;
- int pShift;
- uint32_t pMask;
- // number of zero-crossing on each side
- unsigned int halfNumCoefs;
- };
-
- static Constants highQualityConstants;
- static Constants veryHighQualityConstants;
- const Constants *mConstants; // points to appropriate set of coefficient parameters
-
- static void init_routine();
-};
-
-// ----------------------------------------------------------------------------
-} // namespace android
-
-#endif /*ANDROID_AUDIO_RESAMPLER_SINC_H*/
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index 6026bbb..1d4b3fe 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -18,7 +18,9 @@
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
-#include <hardware/audio.h>
+#include <media/audiohal/DeviceHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
+#include <system/audio.h>
#include <utils/Log.h>
#include "AudioHwDevice.h"
@@ -40,19 +42,23 @@
{
}
-audio_hw_device_t *AudioStreamOut::hwDev() const
+AudioStreamOut::~AudioStreamOut()
+{
+}
+
+sp<DeviceHalInterface> AudioStreamOut::hwDev() const
{
return audioHwDev->hwDevice();
}
status_t AudioStreamOut::getRenderPosition(uint64_t *frames)
{
- if (stream == NULL) {
+ if (stream == 0) {
return NO_INIT;
}
uint32_t halPosition = 0;
- status_t status = stream->get_render_position(stream, &halPosition);
+ status_t status = stream->getRenderPosition(&halPosition);
if (status != NO_ERROR) {
return status;
}
@@ -84,12 +90,12 @@
status_t AudioStreamOut::getPresentationPosition(uint64_t *frames, struct timespec *timestamp)
{
- if (stream == NULL) {
+ if (stream == 0) {
return NO_INIT;
}
uint64_t halPosition = 0;
- status_t status = stream->get_presentation_position(stream, &halPosition, timestamp);
+ status_t status = stream->getPresentationPosition(&halPosition, timestamp);
if (status != NO_ERROR) {
return status;
}
@@ -115,24 +121,23 @@
struct audio_config *config,
const char *address)
{
- audio_stream_out_t *outStream;
+ sp<StreamOutHalInterface> outStream;
audio_output_flags_t customFlags = (config->format == AUDIO_FORMAT_IEC61937)
? (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO)
: flags;
- int status = hwDev()->open_output_stream(
- hwDev(),
+ int status = hwDev()->openOutputStream(
handle,
devices,
customFlags,
config,
- &outStream,
- address);
+ address,
+ &outStream);
ALOGV("AudioStreamOut::open(), HAL returned "
" stream %p, sampleRate %d, Format %#x, "
"channelMask %#x, status %d",
- outStream,
+ outStream.get(),
config->sample_rate,
config->format,
config->channel_mask,
@@ -144,21 +149,20 @@
struct audio_config customConfig = *config;
customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
- status = hwDev()->open_output_stream(
- hwDev(),
+ status = hwDev()->openOutputStream(
handle,
devices,
customFlags,
&customConfig,
- &outStream,
- address);
+ address,
+ &outStream);
ALOGV("AudioStreamOut::open(), treat IEC61937 as PCM, status = %d", status);
}
if (status == NO_ERROR) {
stream = outStream;
mHalFormatHasProportionalFrames = audio_has_proportional_frames(config->format);
- mHalFrameSize = audio_stream_out_frame_size(stream);
+ status = stream->getFrameSize(&mHalFrameSize);
}
return status;
@@ -166,47 +170,46 @@
audio_format_t AudioStreamOut::getFormat() const
{
- return stream->common.get_format(&stream->common);
+ audio_format_t result;
+ return stream->getFormat(&result) == OK ? result : AUDIO_FORMAT_INVALID;
}
uint32_t AudioStreamOut::getSampleRate() const
{
- return stream->common.get_sample_rate(&stream->common);
+ uint32_t result;
+ return stream->getSampleRate(&result) == OK ? result : 0;
}
audio_channel_mask_t AudioStreamOut::getChannelMask() const
{
- return stream->common.get_channels(&stream->common);
+ audio_channel_mask_t result;
+ return stream->getChannelMask(&result) == OK ? result : AUDIO_CHANNEL_INVALID;
}
int AudioStreamOut::flush()
{
- ALOG_ASSERT(stream != NULL);
mRenderPosition = 0;
mFramesWritten = 0;
mFramesWrittenAtStandby = 0;
- if (stream->flush != NULL) {
- return stream->flush(stream);
- }
- return NO_ERROR;
+ status_t result = stream->flush();
+ return result != INVALID_OPERATION ? result : NO_ERROR;
}
int AudioStreamOut::standby()
{
- ALOG_ASSERT(stream != NULL);
mRenderPosition = 0;
mFramesWrittenAtStandby = mFramesWritten;
- return stream->common.standby(&stream->common);
+ return stream->standby();
}
ssize_t AudioStreamOut::write(const void *buffer, size_t numBytes)
{
- ALOG_ASSERT(stream != NULL);
- ssize_t bytesWritten = stream->write(stream, buffer, numBytes);
- if (bytesWritten > 0 && mHalFrameSize > 0) {
+ size_t bytesWritten;
+ status_t result = stream->write(buffer, numBytes, &bytesWritten);
+ if (result == OK && bytesWritten > 0 && mHalFrameSize > 0) {
mFramesWritten += bytesWritten / mHalFrameSize;
}
- return bytesWritten;
+ return result == OK ? bytesWritten : result;
}
} // namespace android
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index 768f537..b16b1af 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -23,11 +23,11 @@
#include <system/audio.h>
-#include "AudioStreamOut.h"
-
namespace android {
class AudioHwDevice;
+class DeviceHalInterface;
+class StreamOutHalInterface;
/**
* Managed access to a HAL output stream.
@@ -38,10 +38,10 @@
// For emphasis, we could also make all pointers to them be "const *",
// but that would clutter the code unnecessarily.
AudioHwDevice * const audioHwDev;
- audio_stream_out_t *stream;
+ sp<StreamOutHalInterface> stream;
const audio_output_flags_t flags;
- audio_hw_device_t *hwDev() const;
+ sp<DeviceHalInterface> hwDev() const;
AudioStreamOut(AudioHwDevice *dev, audio_output_flags_t flags);
@@ -51,7 +51,7 @@
struct audio_config *config,
const char *address);
- virtual ~AudioStreamOut() { }
+ virtual ~AudioStreamOut();
// Get the bottom 32-bits of the 64-bit render position.
status_t getRenderPosition(uint32_t *frames);
diff --git a/services/audioflinger/BufLog.cpp b/services/audioflinger/BufLog.cpp
new file mode 100644
index 0000000..9680eb5
--- /dev/null
+++ b/services/audioflinger/BufLog.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "BufLog.h"
+#define LOG_TAG "BufLog"
+//#define LOG_NDEBUG 0
+
+#include <errno.h>
+#include "log/log.h"
+#include <pthread.h>
+#include <stdio.h>
+#include <string.h>
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+// ------------------------------
+// BufLogSingleton
+// ------------------------------
+pthread_once_t onceControl = PTHREAD_ONCE_INIT;
+
+BufLog *BufLogSingleton::mInstance = NULL;
+
+void BufLogSingleton::initOnce() {
+ mInstance = new BufLog();
+ ALOGW("=====================================\n" \
+ "Warning: BUFLOG is defined in some part of your code.\n" \
+ "This will create large audio dumps in %s.\n" \
+ "=====================================\n", BUFLOG_BASE_PATH);
+}
+
+BufLog *BufLogSingleton::instance() {
+ pthread_once(&onceControl, initOnce);
+ return mInstance;
+}
+
+bool BufLogSingleton::instanceExists() {
+ return mInstance != NULL;
+}
+
+// ------------------------------
+// BufLog
+// ------------------------------
+
+BufLog::BufLog() {
+ memset(mStreams, 0, sizeof(mStreams));
+}
+
+BufLog::~BufLog() {
+ android::Mutex::Autolock autoLock(mLock);
+
+ for (unsigned int id = 0; id < BUFLOG_MAXSTREAMS; id++) {
+ BufLogStream *pBLStream = mStreams[id];
+ if (pBLStream != NULL) {
+ delete pBLStream ;
+ mStreams[id] = NULL;
+ }
+ }
+}
+
+size_t BufLog::write(int streamid, const char *tag, int format, int channels,
+ int samplingRate, size_t maxBytes, const void *buf, size_t size) {
+ unsigned int id = streamid % BUFLOG_MAXSTREAMS;
+ android::Mutex::Autolock autoLock(mLock);
+
+ BufLogStream *pBLStream = mStreams[id];
+
+ if (pBLStream == NULL) {
+ pBLStream = mStreams[id] = new BufLogStream(id, tag, format, channels,
+ samplingRate, maxBytes);
+ ALOG_ASSERT(pBLStream != NULL, "BufLogStream Failed to be created");
+ }
+
+ return pBLStream->write(buf, size);
+}
+
+void BufLog::reset() {
+ android::Mutex::Autolock autoLock(mLock);
+ ALOGV("Resetting all BufLogs");
+ int count = 0;
+
+ for (unsigned int id = 0; id < BUFLOG_MAXSTREAMS; id++) {
+ BufLogStream *pBLStream = mStreams[id];
+ if (pBLStream != NULL) {
+ delete pBLStream;
+ mStreams[id] = NULL;
+ count++;
+ }
+ }
+ ALOGV("Reset %d BufLogs", count);
+}
+
+// ------------------------------
+// BufLogStream
+// ------------------------------
+
+BufLogStream::BufLogStream(unsigned int id,
+ const char *tag,
+ unsigned int format,
+ unsigned int channels,
+ unsigned int samplingRate,
+ size_t maxBytes = 0) : mId(id), mFormat(format), mChannels(channels),
+ mSamplingRate(samplingRate), mMaxBytes(maxBytes) {
+ mByteCount = 0l;
+ mPaused = false;
+ if (tag != NULL) {
+ strncpy(mTag, tag, BUFLOGSTREAM_MAX_TAGSIZE);
+ } else {
+ mTag[0] = 0;
+ }
+ ALOGV("Creating BufLogStream id:%d tag:%s format:%d ch:%d sr:%d maxbytes:%zu", mId, mTag,
+ mFormat, mChannels, mSamplingRate, mMaxBytes);
+
+ //open file (s), info about tag, format, etc.
+ //timestamp
+ char timeStr[16]; //size 16: format %Y%m%d%H%M%S 14 chars + string null terminator
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ struct tm tm;
+ localtime_r(&tv.tv_sec, &tm);
+ strftime(timeStr, sizeof(timeStr), "%Y%m%d%H%M%S", &tm);
+ char logPath[BUFLOG_MAX_PATH_SIZE];
+ snprintf(logPath, BUFLOG_MAX_PATH_SIZE, "%s/%s_%d_%s_%d_%d_%d.raw", BUFLOG_BASE_PATH, timeStr,
+ mId, mTag, mFormat, mChannels, mSamplingRate);
+ ALOGV("data output: %s", logPath);
+
+ mFile = fopen(logPath, "wb");
+ if (mFile != NULL) {
+ ALOGV("Success creating file at: %p", mFile);
+ } else {
+ ALOGE("Error: could not create file BufLogStream %s", strerror(errno));
+ }
+}
+
+void BufLogStream::closeStream_l() {
+ ALOGV("Closing BufLogStream id:%d tag:%s", mId, mTag);
+ if (mFile != NULL) {
+ fclose(mFile);
+ mFile = NULL;
+ }
+}
+
+BufLogStream::~BufLogStream() {
+ ALOGV("Destroying BufLogStream id:%d tag:%s", mId, mTag);
+ android::Mutex::Autolock autoLock(mLock);
+ closeStream_l();
+}
+
+size_t BufLogStream::write(const void *buf, size_t size) {
+
+ size_t bytes = 0;
+ if (!mPaused && mFile != NULL) {
+ if (size > 0 && buf != NULL) {
+ android::Mutex::Autolock autoLock(mLock);
+ if (mMaxBytes > 0) {
+ size = MIN(size, mMaxBytes - mByteCount);
+ }
+ bytes = fwrite(buf, 1, size, mFile);
+ mByteCount += bytes;
+ if (mMaxBytes > 0 && mMaxBytes == mByteCount) {
+ closeStream_l();
+ }
+ }
+ ALOGV("wrote %zu/%zu bytes to BufLogStream %d tag:%s. Total Bytes: %zu", bytes, size, mId,
+ mTag, mByteCount);
+ } else {
+ ALOGV("Warning: trying to write to %s BufLogStream id:%d tag:%s",
+ mPaused ? "paused" : "closed", mId, mTag);
+ }
+ return bytes;
+}
+
+bool BufLogStream::setPause(bool pause) {
+ bool old = mPaused;
+ mPaused = pause;
+ return old;
+}
+
+void BufLogStream::finalize() {
+ android::Mutex::Autolock autoLock(mLock);
+ closeStream_l();
+}
diff --git a/services/audioflinger/BufLog.h b/services/audioflinger/BufLog.h
new file mode 100644
index 0000000..1b402f4
--- /dev/null
+++ b/services/audioflinger/BufLog.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_BUFLOG_H
+#define ANDROID_AUDIO_BUFLOG_H
+
+/*
+ * BUFLOG creates up to BUFLOG_MAXSTREAMS simultaneous streams [0:15] of audio buffer data
+ * and saves them to disk. The files are stored in the path specified in BUFLOG_BASE_PATH and
+ * are named following this format:
+ * YYYYMMDDHHMMSS_id_format_channels_samplingrate.raw
+ *
+ * Normally we strip BUFLOG dumps from release builds.
+ * You can modify this (for example with "#define BUFLOG_NDEBUG 0"
+ * at the top of your source file) to change that behavior.
+ *
+ * usage:
+ * - Add this to the top of the source file you want to debug:
+ * #define BUFLOG_NDEBUG 0
+ * #include "BufLog.h"
+ *
+ * - dump an audio buffer
+ * BUFLOG(buff_id, buff_tag, format, channels, sampling_rate, max_bytes, buff_pointer, buff_size);
+ *
+ * buff_id: int [0:15] buffer id. If a buffer doesn't exist, it is created the first time.
+ * buff_tag: char* string tag used on stream filename and logs
+ * format: int Audio format (audio_format_t see audio.h)
+ * channels: int Channel Count
+ * sampling_rate: int Sampling rate in Hz. e.g. 8000, 16000, 44100, 48000, etc
+ * max_bytes: int [0 or positive number]
+ * Maximum size of the file (in bytes) to be output.
+ * If the value is 0, no limit.
+ * buff_pointer: void * Pointer to audio buffer.
+ * buff_size: int Size (in bytes) of the current audio buffer to be stored.
+ *
+ *
+ * Example usage:
+ * int format = mConfig.outputCfg.format;
+ * int channels = audio_channel_count_from_out_mask(mConfig.outputCfg.channels);
+ * int samplingRate = mConfig.outputCfg.samplingRate;
+ * int frameCount = mConfig.outputCfg.buffer.frameCount;
+ * int frameSize = audio_bytes_per_sample((audio_format_t)format) * channels;
+ * int buffSize = frameCount * frameSize;
+ * long maxBytes = 10 * samplingRate * frameSize; //10 seconds max
+ * BUFLOG(11, "loudnes_enhancer_out", format, channels, samplingRate, maxBytes,
+ * mConfig.outputCfg.buffer.raw, buffSize);
+ *
+ * Other macros:
+ * BUFLOG_EXISTS returns true if there is an instance of BufLog
+ *
+ * BUFLOG_RESET If an instance of BufLog exists, it stops the capture and closes all
+ * streams.
+ * If a new call to BUFLOG(..) is done, new streams are created.
+ */
+
+#ifndef BUFLOG_NDEBUG
+#ifdef NDEBUG
+#define BUFLOG_NDEBUG 1
+#else
+#define BUFLOG_NDEBUG 0
+#endif
+#endif
+
+/*
+ * Simplified macro to send a buffer.
+ */
+#ifndef BUFLOG
+#define __BUFLOG(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, BUF, SIZE) \
+ BufLogSingleton::instance()->write(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, \
+ BUF, SIZE)
+#if BUFLOG_NDEBUG
+#define BUFLOG(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, BUF, SIZE) \
+ do { if (0) { } } while (0)
+#else
+#define BUFLOG(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, BUF, SIZE) \
+ __BUFLOG(STREAMID, TAG, FORMAT, CHANNELS, SAMPLINGRATE, MAXBYTES, BUF, SIZE)
+#endif
+#endif
+
+#ifndef BUFLOG_EXISTS
+#define BUFLOG_EXISTS BufLogSingleton::instanceExists()
+#endif
+
+#ifndef BUFLOG_RESET
+#define BUFLOG_RESET do { if (BufLogSingleton::instanceExists()) { \
+ BufLogSingleton::instance()->reset(); } } while (0)
+#endif
+
+
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <utils/Mutex.h>
+
+//BufLog configuration
+#define BUFLOGSTREAM_MAX_TAGSIZE 32
+#define BUFLOG_BASE_PATH "/data/misc/audioserver"
+#define BUFLOG_MAX_PATH_SIZE 300
+
+class BufLogStream {
+public:
+ BufLogStream(unsigned int id,
+ const char *tag,
+ unsigned int format,
+ unsigned int channels,
+ unsigned int samplingRate,
+ size_t maxBytes);
+ ~BufLogStream();
+
+ // write buffer to stream
+ // buf: pointer to buffer
+ // size: number of bytes to write
+ size_t write(const void *buf, size_t size);
+
+ // pause/resume stream
+ // pause: true = paused, false = not paused
+ // return value: previous state of stream (paused or not).
+ bool setPause(bool pause);
+
+ // will stop the stream and close any open file
+ // the stream can't be reopen. Instead, a new stream (and file) should be created.
+ void finalize();
+
+private:
+ bool mPaused;
+ const unsigned int mId;
+ char mTag[BUFLOGSTREAM_MAX_TAGSIZE + 1];
+ const unsigned int mFormat;
+ const unsigned int mChannels;
+ const unsigned int mSamplingRate;
+ const size_t mMaxBytes;
+ size_t mByteCount;
+ FILE *mFile;
+ mutable android::Mutex mLock;
+
+ void closeStream_l();
+};
+
+
+class BufLog {
+public:
+ BufLog();
+ ~BufLog();
+ BufLog(BufLog const&) {};
+
+ // streamid: int [0:BUFLOG_MAXSTREAMS-1] buffer id.
+ // If a buffer doesn't exist, it is created the first time is referenced
+ // tag: char* string tag used on stream filename and logs
+ // format: int Audio format (audio_format_t see audio.h)
+ // channels: int Channel Count
+ // samplingRate: int Sampling rate in Hz. e.g. 8000, 16000, 44100, 48000, etc
+ // maxBytes: int [0 or positive number]
+ // Maximum size of the file (in bytes) to be output.
+ // If the value is 0, no limit.
+ // size: int Size (in bytes) of the current audio buffer to be written.
+ // buf: void * Pointer to audio buffer.
+ size_t write(int streamid,
+ const char *tag,
+ int format,
+ int channels,
+ int samplingRate,
+ size_t maxBytes,
+ const void *buf,
+ size_t size);
+
+ // reset will stop and close all active streams, thus finalizing any open file.
+ // New streams will be created if write() is called again.
+ void reset();
+
+protected:
+ static const unsigned int BUFLOG_MAXSTREAMS = 16;
+ BufLogStream *mStreams[BUFLOG_MAXSTREAMS];
+ mutable android::Mutex mLock;
+};
+
+class BufLogSingleton {
+public:
+ static BufLog *instance();
+ static bool instanceExists();
+
+private:
+ static void initOnce();
+ static BufLog *mInstance;
+};
+
+#endif //ANDROID_AUDIO_BUFLOG_H
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
deleted file mode 100644
index 7b6dfcb..0000000
--- a/services/audioflinger/BufferProviders.cpp
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "BufferProvider"
-//#define LOG_NDEBUG 0
-
-#include <audio_effects/effect_downmix.h>
-#include <audio_utils/primitives.h>
-#include <audio_utils/format.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/EffectsFactoryApi.h>
-
-#include <utils/Log.h>
-
-#include "Configuration.h"
-#include "BufferProviders.h"
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
-#endif
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-template <typename T>
-static inline T min(const T& a, const T& b)
-{
- return a < b ? a : b;
-}
-
-CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
- size_t outputFrameSize, size_t bufferFrameCount) :
- mInputFrameSize(inputFrameSize),
- mOutputFrameSize(outputFrameSize),
- mLocalBufferFrameCount(bufferFrameCount),
- mLocalBufferData(NULL),
- mConsumed(0)
-{
- ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
- inputFrameSize, outputFrameSize, bufferFrameCount);
- LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
- "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
- inputFrameSize, outputFrameSize);
- if (mLocalBufferFrameCount) {
- (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
- }
- mBuffer.frameCount = 0;
-}
-
-CopyBufferProvider::~CopyBufferProvider()
-{
- ALOGV("~CopyBufferProvider(%p)", this);
- if (mBuffer.frameCount != 0) {
- mTrackBufferProvider->releaseBuffer(&mBuffer);
- }
- free(mLocalBufferData);
-}
-
-status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer)
-{
- //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu))",
- // this, pBuffer, pBuffer->frameCount);
- if (mLocalBufferFrameCount == 0) {
- status_t res = mTrackBufferProvider->getNextBuffer(pBuffer);
- if (res == OK) {
- copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
- }
- return res;
- }
- if (mBuffer.frameCount == 0) {
- mBuffer.frameCount = pBuffer->frameCount;
- status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer);
- // At one time an upstream buffer provider had
- // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
- //
- // By API spec, if res != OK, then mBuffer.frameCount == 0.
- // but there may be improper implementations.
- ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
- if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
- pBuffer->raw = NULL;
- pBuffer->frameCount = 0;
- return res;
- }
- mConsumed = 0;
- }
- ALOG_ASSERT(mConsumed < mBuffer.frameCount);
- size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
- count = min(count, pBuffer->frameCount);
- pBuffer->raw = mLocalBufferData;
- pBuffer->frameCount = count;
- copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
- pBuffer->frameCount);
- return OK;
-}
-
-void CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
-{
- //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
- // this, pBuffer, pBuffer->frameCount);
- if (mLocalBufferFrameCount == 0) {
- mTrackBufferProvider->releaseBuffer(pBuffer);
- return;
- }
- // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
- mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
- if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
- mTrackBufferProvider->releaseBuffer(&mBuffer);
- ALOG_ASSERT(mBuffer.frameCount == 0);
- }
- pBuffer->raw = NULL;
- pBuffer->frameCount = 0;
-}
-
-void CopyBufferProvider::reset()
-{
- if (mBuffer.frameCount != 0) {
- mTrackBufferProvider->releaseBuffer(&mBuffer);
- }
- mConsumed = 0;
-}
-
-DownmixerBufferProvider::DownmixerBufferProvider(
- audio_channel_mask_t inputChannelMask,
- audio_channel_mask_t outputChannelMask, audio_format_t format,
- uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
- CopyBufferProvider(
- audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
- audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
- bufferFrameCount) // set bufferFrameCount to 0 to do in-place
-{
- ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
- this, inputChannelMask, outputChannelMask, format,
- sampleRate, sessionId);
- if (!sIsMultichannelCapable
- || EffectCreate(&sDwnmFxDesc.uuid,
- sessionId,
- SESSION_ID_INVALID_AND_IGNORED,
- &mDownmixHandle) != 0) {
- ALOGE("DownmixerBufferProvider() error creating downmixer effect");
- mDownmixHandle = NULL;
- return;
- }
- // channel input configuration will be overridden per-track
- mDownmixConfig.inputCfg.channels = inputChannelMask; // FIXME: Should be bits
- mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
- mDownmixConfig.inputCfg.format = format;
- mDownmixConfig.outputCfg.format = format;
- mDownmixConfig.inputCfg.samplingRate = sampleRate;
- mDownmixConfig.outputCfg.samplingRate = sampleRate;
- mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
- mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
- // input and output buffer provider, and frame count will not be used as the downmix effect
- // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
- mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
- EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
- mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
-
- int cmdStatus;
- uint32_t replySize = sizeof(int);
-
- // Configure downmixer
- status_t status = (*mDownmixHandle)->command(mDownmixHandle,
- EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
- &mDownmixConfig /*pCmdData*/,
- &replySize, &cmdStatus /*pReplyData*/);
- if (status != 0 || cmdStatus != 0) {
- ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
- status, cmdStatus);
- EffectRelease(mDownmixHandle);
- mDownmixHandle = NULL;
- return;
- }
-
- // Enable downmixer
- replySize = sizeof(int);
- status = (*mDownmixHandle)->command(mDownmixHandle,
- EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
- &replySize, &cmdStatus /*pReplyData*/);
- if (status != 0 || cmdStatus != 0) {
- ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
- status, cmdStatus);
- EffectRelease(mDownmixHandle);
- mDownmixHandle = NULL;
- return;
- }
-
- // Set downmix type
- // parameter size rounded for padding on 32bit boundary
- const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
- const int downmixParamSize =
- sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
- effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
- param->psize = sizeof(downmix_params_t);
- const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
- memcpy(param->data, &downmixParam, param->psize);
- const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
- param->vsize = sizeof(downmix_type_t);
- memcpy(param->data + psizePadded, &downmixType, param->vsize);
- replySize = sizeof(int);
- status = (*mDownmixHandle)->command(mDownmixHandle,
- EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
- param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
- free(param);
- if (status != 0 || cmdStatus != 0) {
- ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
- status, cmdStatus);
- EffectRelease(mDownmixHandle);
- mDownmixHandle = NULL;
- return;
- }
- ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
-}
-
-DownmixerBufferProvider::~DownmixerBufferProvider()
-{
- ALOGV("~DownmixerBufferProvider (%p)", this);
- EffectRelease(mDownmixHandle);
- mDownmixHandle = NULL;
-}
-
-void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
- mDownmixConfig.inputCfg.buffer.frameCount = frames;
- mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
- mDownmixConfig.outputCfg.buffer.frameCount = frames;
- mDownmixConfig.outputCfg.buffer.raw = dst;
- // may be in-place if src == dst.
- status_t res = (*mDownmixHandle)->process(mDownmixHandle,
- &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
- ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
-}
-
-/* call once in a pthread_once handler. */
-/*static*/ status_t DownmixerBufferProvider::init()
-{
- // find multichannel downmix effect if we have to play multichannel content
- uint32_t numEffects = 0;
- int ret = EffectQueryNumberEffects(&numEffects);
- if (ret != 0) {
- ALOGE("AudioMixer() error %d querying number of effects", ret);
- return NO_INIT;
- }
- ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
- for (uint32_t i = 0 ; i < numEffects ; i++) {
- if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
- ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
- if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
- ALOGI("found effect \"%s\" from %s",
- sDwnmFxDesc.name, sDwnmFxDesc.implementor);
- sIsMultichannelCapable = true;
- break;
- }
- }
- }
- ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
- return NO_INIT;
-}
-
-/*static*/ bool DownmixerBufferProvider::sIsMultichannelCapable = false;
-/*static*/ effect_descriptor_t DownmixerBufferProvider::sDwnmFxDesc;
-
-RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
- audio_channel_mask_t outputChannelMask, audio_format_t format,
- size_t bufferFrameCount) :
- CopyBufferProvider(
- audio_bytes_per_sample(format)
- * audio_channel_count_from_out_mask(inputChannelMask),
- audio_bytes_per_sample(format)
- * audio_channel_count_from_out_mask(outputChannelMask),
- bufferFrameCount),
- mFormat(format),
- mSampleSize(audio_bytes_per_sample(format)),
- mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
- mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
-{
- ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
- this, format, inputChannelMask, outputChannelMask,
- mInputChannels, mOutputChannels);
- (void) memcpy_by_index_array_initialization_from_channel_mask(
- mIdxAry, ARRAY_SIZE(mIdxAry), outputChannelMask, inputChannelMask);
-}
-
-void RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
- memcpy_by_index_array(dst, mOutputChannels,
- src, mInputChannels, mIdxAry, mSampleSize, frames);
-}
-
-ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
- audio_format_t inputFormat, audio_format_t outputFormat,
- size_t bufferFrameCount) :
- CopyBufferProvider(
- channelCount * audio_bytes_per_sample(inputFormat),
- channelCount * audio_bytes_per_sample(outputFormat),
- bufferFrameCount),
- mChannelCount(channelCount),
- mInputFormat(inputFormat),
- mOutputFormat(outputFormat)
-{
- ALOGV("ReformatBufferProvider(%p)(%u, %#x, %#x)",
- this, channelCount, inputFormat, outputFormat);
-}
-
-void ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
- memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount);
-}
-
-TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount,
- audio_format_t format, uint32_t sampleRate, const AudioPlaybackRate &playbackRate) :
- mChannelCount(channelCount),
- mFormat(format),
- mSampleRate(sampleRate),
- mFrameSize(channelCount * audio_bytes_per_sample(format)),
- mLocalBufferFrameCount(0),
- mLocalBufferData(NULL),
- mRemaining(0),
- mSonicStream(sonicCreateStream(sampleRate, mChannelCount)),
- mFallbackFailErrorShown(false),
- mAudioPlaybackRateValid(false)
-{
- LOG_ALWAYS_FATAL_IF(mSonicStream == NULL,
- "TimestretchBufferProvider can't allocate Sonic stream");
-
- setPlaybackRate(playbackRate);
- ALOGV("TimestretchBufferProvider(%p)(%u, %#x, %u %f %f %d %d)",
- this, channelCount, format, sampleRate, playbackRate.mSpeed,
- playbackRate.mPitch, playbackRate.mStretchMode, playbackRate.mFallbackMode);
- mBuffer.frameCount = 0;
-}
-
-TimestretchBufferProvider::~TimestretchBufferProvider()
-{
- ALOGV("~TimestretchBufferProvider(%p)", this);
- sonicDestroyStream(mSonicStream);
- if (mBuffer.frameCount != 0) {
- mTrackBufferProvider->releaseBuffer(&mBuffer);
- }
- free(mLocalBufferData);
-}
-
-status_t TimestretchBufferProvider::getNextBuffer(
- AudioBufferProvider::Buffer *pBuffer)
-{
- ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu))",
- this, pBuffer, pBuffer->frameCount);
-
- // BYPASS
- //return mTrackBufferProvider->getNextBuffer(pBuffer);
-
- // check if previously processed data is sufficient.
- if (pBuffer->frameCount <= mRemaining) {
- ALOGV("previous sufficient");
- pBuffer->raw = mLocalBufferData;
- return OK;
- }
-
- // do we need to resize our buffer?
- if (pBuffer->frameCount > mLocalBufferFrameCount) {
- void *newmem;
- if (posix_memalign(&newmem, 32, pBuffer->frameCount * mFrameSize) == OK) {
- if (mRemaining != 0) {
- memcpy(newmem, mLocalBufferData, mRemaining * mFrameSize);
- }
- free(mLocalBufferData);
- mLocalBufferData = newmem;
- mLocalBufferFrameCount = pBuffer->frameCount;
- }
- }
-
- // need to fetch more data
- const size_t outputDesired = pBuffer->frameCount - mRemaining;
- size_t dstAvailable;
- do {
- mBuffer.frameCount = mPlaybackRate.mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL
- ? outputDesired : outputDesired * mPlaybackRate.mSpeed + 1;
-
- status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer);
-
- ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
- if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
- ALOGV("upstream provider cannot provide data");
- if (mRemaining == 0) {
- pBuffer->raw = NULL;
- pBuffer->frameCount = 0;
- return res;
- } else { // return partial count
- pBuffer->raw = mLocalBufferData;
- pBuffer->frameCount = mRemaining;
- return OK;
- }
- }
-
- // time-stretch the data
- dstAvailable = min(mLocalBufferFrameCount - mRemaining, outputDesired);
- size_t srcAvailable = mBuffer.frameCount;
- processFrames((uint8_t*)mLocalBufferData + mRemaining * mFrameSize, &dstAvailable,
- mBuffer.raw, &srcAvailable);
-
- // release all data consumed
- mBuffer.frameCount = srcAvailable;
- mTrackBufferProvider->releaseBuffer(&mBuffer);
- } while (dstAvailable == 0); // try until we get output data or upstream provider fails.
-
- // update buffer vars with the actual data processed and return with buffer
- mRemaining += dstAvailable;
-
- pBuffer->raw = mLocalBufferData;
- pBuffer->frameCount = mRemaining;
-
- return OK;
-}
-
-void TimestretchBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
-{
- ALOGV("TimestretchBufferProvider(%p)::releaseBuffer(%p (%zu))",
- this, pBuffer, pBuffer->frameCount);
-
- // BYPASS
- //return mTrackBufferProvider->releaseBuffer(pBuffer);
-
- // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
- if (pBuffer->frameCount < mRemaining) {
- memcpy(mLocalBufferData,
- (uint8_t*)mLocalBufferData + pBuffer->frameCount * mFrameSize,
- (mRemaining - pBuffer->frameCount) * mFrameSize);
- mRemaining -= pBuffer->frameCount;
- } else if (pBuffer->frameCount == mRemaining) {
- mRemaining = 0;
- } else {
- LOG_ALWAYS_FATAL("Releasing more frames(%zu) than available(%zu)",
- pBuffer->frameCount, mRemaining);
- }
-
- pBuffer->raw = NULL;
- pBuffer->frameCount = 0;
-}
-
-void TimestretchBufferProvider::reset()
-{
- mRemaining = 0;
-}
-
-status_t TimestretchBufferProvider::setPlaybackRate(const AudioPlaybackRate &playbackRate)
-{
- mPlaybackRate = playbackRate;
- mFallbackFailErrorShown = false;
- sonicSetSpeed(mSonicStream, mPlaybackRate.mSpeed);
- //TODO: pitch is ignored for now
- //TODO: optimize: if parameters are the same, don't do any extra computation.
-
- mAudioPlaybackRateValid = isAudioPlaybackRateValid(mPlaybackRate);
- return OK;
-}
-
-void TimestretchBufferProvider::processFrames(void *dstBuffer, size_t *dstFrames,
- const void *srcBuffer, size_t *srcFrames)
-{
- ALOGV("processFrames(%zu %zu) remaining(%zu)", *dstFrames, *srcFrames, mRemaining);
- // Note dstFrames is the required number of frames.
-
- if (!mAudioPlaybackRateValid) {
- //fallback mode
- // Ensure consumption from src is as expected.
- // TODO: add logic to track "very accurate" consumption related to speed, original sampling
- // rate, actual frames processed.
-
- const size_t targetSrc = *dstFrames * mPlaybackRate.mSpeed;
- if (*srcFrames < targetSrc) { // limit dst frames to that possible
- *dstFrames = *srcFrames / mPlaybackRate.mSpeed;
- } else if (*srcFrames > targetSrc + 1) {
- *srcFrames = targetSrc + 1;
- }
- if (*dstFrames > 0) {
- switch(mPlaybackRate.mFallbackMode) {
- case AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT:
- if (*dstFrames <= *srcFrames) {
- size_t copySize = mFrameSize * *dstFrames;
- memcpy(dstBuffer, srcBuffer, copySize);
- } else {
- // cyclically repeat the source.
- for (size_t count = 0; count < *dstFrames; count += *srcFrames) {
- size_t remaining = min(*srcFrames, *dstFrames - count);
- memcpy((uint8_t*)dstBuffer + mFrameSize * count,
- srcBuffer, mFrameSize * remaining);
- }
- }
- break;
- case AUDIO_TIMESTRETCH_FALLBACK_DEFAULT:
- case AUDIO_TIMESTRETCH_FALLBACK_MUTE:
- memset(dstBuffer,0, mFrameSize * *dstFrames);
- break;
- case AUDIO_TIMESTRETCH_FALLBACK_FAIL:
- default:
- if(!mFallbackFailErrorShown) {
- ALOGE("invalid parameters in TimestretchBufferProvider fallbackMode:%d",
- mPlaybackRate.mFallbackMode);
- mFallbackFailErrorShown = true;
- }
- break;
- }
- }
- } else {
- switch (mFormat) {
- case AUDIO_FORMAT_PCM_FLOAT:
- if (sonicWriteFloatToStream(mSonicStream, (float*)srcBuffer, *srcFrames) != 1) {
- ALOGE("sonicWriteFloatToStream cannot realloc");
- *srcFrames = 0; // cannot consume all of srcBuffer
- }
- *dstFrames = sonicReadFloatFromStream(mSonicStream, (float*)dstBuffer, *dstFrames);
- break;
- case AUDIO_FORMAT_PCM_16_BIT:
- if (sonicWriteShortToStream(mSonicStream, (short*)srcBuffer, *srcFrames) != 1) {
- ALOGE("sonicWriteShortToStream cannot realloc");
- *srcFrames = 0; // cannot consume all of srcBuffer
- }
- *dstFrames = sonicReadShortFromStream(mSonicStream, (short*)dstBuffer, *dstFrames);
- break;
- default:
- // could also be caught on construction
- LOG_ALWAYS_FATAL("invalid format %#x for TimestretchBufferProvider", mFormat);
- }
- }
-}
-// ----------------------------------------------------------------------------
-} // namespace android
diff --git a/services/audioflinger/BufferProviders.h b/services/audioflinger/BufferProviders.h
deleted file mode 100644
index abd43c6..0000000
--- a/services/audioflinger/BufferProviders.h
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_BUFFER_PROVIDERS_H
-#define ANDROID_BUFFER_PROVIDERS_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <hardware/audio_effect.h>
-#include <media/AudioBufferProvider.h>
-#include <system/audio.h>
-#include <sonic.h>
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class PassthruBufferProvider : public AudioBufferProvider {
-public:
- PassthruBufferProvider() : mTrackBufferProvider(NULL) { }
-
- virtual ~PassthruBufferProvider() { }
-
- // call this to release the buffer to the upstream provider.
- // treat it as an audio discontinuity for future samples.
- virtual void reset() { }
-
- // set the upstream buffer provider. Consider calling "reset" before this function.
- virtual void setBufferProvider(AudioBufferProvider *p) {
- mTrackBufferProvider = p;
- }
-
-protected:
- AudioBufferProvider *mTrackBufferProvider;
-};
-
-// Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
-// and ReformatBufferProvider.
-// It handles a private buffer for use in converting format or channel masks from the
-// input data to a form acceptable by the mixer.
-// TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
-// processing pipeline.
-class CopyBufferProvider : public PassthruBufferProvider {
-public:
- // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
- // If bufferFrameCount is 0, no private buffer is created and in-place modification of
- // the upstream buffer provider's buffers is performed by copyFrames().
- CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
- size_t bufferFrameCount);
- virtual ~CopyBufferProvider();
-
- // Overrides AudioBufferProvider methods
- virtual status_t getNextBuffer(Buffer *buffer);
- virtual void releaseBuffer(Buffer *buffer);
-
- // Overrides PassthruBufferProvider
- virtual void reset();
-
- // this function should be supplied by the derived class. It converts
- // #frames in the *src pointer to the *dst pointer. It is public because
- // some providers will allow this to work on arbitrary buffers outside
- // of the internal buffers.
- virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
-
-protected:
- const size_t mInputFrameSize;
- const size_t mOutputFrameSize;
-private:
- AudioBufferProvider::Buffer mBuffer;
- const size_t mLocalBufferFrameCount;
- void *mLocalBufferData;
- size_t mConsumed;
-};
-
-// DownmixerBufferProvider derives from CopyBufferProvider to provide
-// position dependent downmixing by an Audio Effect.
-class DownmixerBufferProvider : public CopyBufferProvider {
-public:
- DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
- audio_channel_mask_t outputChannelMask, audio_format_t format,
- uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
- virtual ~DownmixerBufferProvider();
- //Overrides
- virtual void copyFrames(void *dst, const void *src, size_t frames);
-
- bool isValid() const { return mDownmixHandle != NULL; }
- static status_t init();
- static bool isMultichannelCapable() { return sIsMultichannelCapable; }
-
-protected:
- effect_handle_t mDownmixHandle;
- effect_config_t mDownmixConfig;
-
- // effect descriptor for the downmixer used by the mixer
- static effect_descriptor_t sDwnmFxDesc;
- // indicates whether a downmix effect has been found and is usable by this mixer
- static bool sIsMultichannelCapable;
- // FIXME: should we allow effects outside of the framework?
- // We need to here. A special ioId that must be <= -2 so it does not map to a session.
- static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
-};
-
-// RemixBufferProvider derives from CopyBufferProvider to perform an
-// upmix or downmix to the proper channel count and mask.
-class RemixBufferProvider : public CopyBufferProvider {
-public:
- RemixBufferProvider(audio_channel_mask_t inputChannelMask,
- audio_channel_mask_t outputChannelMask, audio_format_t format,
- size_t bufferFrameCount);
- //Overrides
- virtual void copyFrames(void *dst, const void *src, size_t frames);
-
-protected:
- const audio_format_t mFormat;
- const size_t mSampleSize;
- const size_t mInputChannels;
- const size_t mOutputChannels;
- int8_t mIdxAry[sizeof(uint32_t) * 8]; // 32 bits => channel indices
-};
-
-// ReformatBufferProvider derives from CopyBufferProvider to convert the input data
-// to an acceptable mixer input format type.
-class ReformatBufferProvider : public CopyBufferProvider {
-public:
- ReformatBufferProvider(int32_t channelCount,
- audio_format_t inputFormat, audio_format_t outputFormat,
- size_t bufferFrameCount);
- virtual void copyFrames(void *dst, const void *src, size_t frames);
-
-protected:
- const uint32_t mChannelCount;
- const audio_format_t mInputFormat;
- const audio_format_t mOutputFormat;
-};
-
-// TimestretchBufferProvider derives from PassthruBufferProvider for time stretching
-class TimestretchBufferProvider : public PassthruBufferProvider {
-public:
- TimestretchBufferProvider(int32_t channelCount,
- audio_format_t format, uint32_t sampleRate,
- const AudioPlaybackRate &playbackRate);
- virtual ~TimestretchBufferProvider();
-
- // Overrides AudioBufferProvider methods
- virtual status_t getNextBuffer(Buffer* buffer);
- virtual void releaseBuffer(Buffer* buffer);
-
- // Overrides PassthruBufferProvider
- virtual void reset();
-
- virtual status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
-
- // processes frames
- // dstBuffer is where to place the data
- // dstFrames [in/out] is the desired frames (return with actual placed in buffer)
- // srcBuffer is the source data
- // srcFrames [in/out] is the available source frames (return with consumed)
- virtual void processFrames(void *dstBuffer, size_t *dstFrames,
- const void *srcBuffer, size_t *srcFrames);
-
-protected:
- const uint32_t mChannelCount;
- const audio_format_t mFormat;
- const uint32_t mSampleRate; // const for now (TODO change this)
- const size_t mFrameSize;
- AudioPlaybackRate mPlaybackRate;
-
-private:
- AudioBufferProvider::Buffer mBuffer; // for upstream request
- size_t mLocalBufferFrameCount; // size of local buffer
- void *mLocalBufferData; // internally allocated buffer for data returned
- // to caller
- size_t mRemaining; // remaining data in local buffer
- sonicStream mSonicStream; // handle to sonic timestretch object
- //FIXME: this dependency should be abstracted out
- bool mFallbackFailErrorShown; // log fallback error only once
- bool mAudioPlaybackRateValid; // flag for current parameters validity
-};
-
-// ----------------------------------------------------------------------------
-} // namespace android
-
-#endif // ANDROID_BUFFER_PROVIDERS_H
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 09e7fd8..b4029c7 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -21,10 +21,11 @@
#include "Configuration.h"
#include <utils/Log.h>
-#include <audio_effects/effect_visualizer.h>
#include <audio_utils/primitives.h>
#include <private/media/AudioEffectShared.h>
-#include <media/EffectsFactoryApi.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <system/audio_effects/effect_visualizer.h>
#include "AudioFlinger.h"
#include "ServiceUtilities.h"
@@ -65,7 +66,6 @@
mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
mDescriptor(*desc),
// mConfig is set by configure() and not used before then
- mEffectInterface(NULL),
mStatus(NO_INIT), mState(IDLE),
// mMaxDisableWaitCnt is set by configure() and not used before then
// mDisableWaitCnt is set by process() and updateState() and not used before then
@@ -76,7 +76,15 @@
int lStatus;
// create effect engine from effect factory
- mStatus = EffectCreate(&desc->uuid, sessionId, thread->id(), &mEffectInterface);
+ mStatus = -ENODEV;
+ sp<AudioFlinger> audioFlinger = mAudioFlinger.promote();
+ if (audioFlinger != 0) {
+ sp<EffectsFactoryHalInterface> effectsFactory = audioFlinger->getEffectsFactory();
+ if (effectsFactory != 0) {
+ mStatus = effectsFactory->createEffect(
+ &desc->uuid, sessionId, thread->id(), &mEffectInterface);
+ }
+ }
if (mStatus != NO_ERROR) {
return;
@@ -88,22 +96,22 @@
}
setOffloaded(thread->type() == ThreadBase::OFFLOAD, thread->id());
+ ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface.get());
- ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface);
return;
Error:
- EffectRelease(mEffectInterface);
- mEffectInterface = NULL;
+ mEffectInterface.clear();
ALOGV("Constructor Error %d", mStatus);
}
AudioFlinger::EffectModule::~EffectModule()
{
ALOGV("Destructor %p", this);
- if (mEffectInterface != NULL) {
+ if (mEffectInterface != 0) {
ALOGW("EffectModule %p destructor called with unreleased interface", this);
release_l();
}
+
}
status_t AudioFlinger::EffectModule::addHandle(EffectHandle *handle)
@@ -180,6 +188,7 @@
// this object is released which can happen after next process is called.
if (mHandles.size() == 0 && !mPinned) {
mState = DESTROYED;
+ mEffectInterface->close();
}
return mHandles.size();
@@ -267,9 +276,7 @@
{
Mutex::Autolock _l(mLock);
- if (mState == DESTROYED || mEffectInterface == NULL ||
- mConfig.inputCfg.buffer.raw == NULL ||
- mConfig.outputCfg.buffer.raw == NULL) {
+ if (mState == DESTROYED || mEffectInterface == 0 || mInBuffer == 0 || mOutBuffer == 0) {
return;
}
@@ -283,9 +290,7 @@
int ret;
if (isProcessImplemented()) {
// do the actual processing in the effect engine
- ret = (*mEffectInterface)->process(mEffectInterface,
- &mConfig.inputCfg.buffer,
- &mConfig.outputCfg.buffer);
+ ret = mEffectInterface->process();
} else {
if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
size_t frameCnt = mConfig.inputCfg.buffer.frameCount * FCC_2; //always stereo here
@@ -331,10 +336,10 @@
void AudioFlinger::EffectModule::reset_l()
{
- if (mStatus != NO_ERROR || mEffectInterface == NULL) {
+ if (mStatus != NO_ERROR || mEffectInterface == 0) {
return;
}
- (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL);
+ mEffectInterface->command(EFFECT_CMD_RESET, 0, NULL, 0, NULL);
}
status_t AudioFlinger::EffectModule::configure()
@@ -344,7 +349,7 @@
uint32_t size;
audio_channel_mask_t channelMask;
- if (mEffectInterface == NULL) {
+ if (mEffectInterface == 0) {
status = NO_INIT;
goto exit;
}
@@ -403,18 +408,23 @@
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.inputCfg.buffer.frameCount = thread->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
+ if (mInBuffer != 0) {
+ mInBuffer->setFrameCount(mConfig.inputCfg.buffer.frameCount);
+ }
+ if (mOutBuffer != 0) {
+ mOutBuffer->setFrameCount(mConfig.outputCfg.buffer.frameCount);
+ }
ALOGV("configure() %p thread %p buffer %p framecount %zu",
this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
status_t cmdStatus;
size = sizeof(int);
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_CONFIG,
- sizeof(effect_config_t),
- &mConfig,
- &size,
- &cmdStatus);
+ status = mEffectInterface->command(EFFECT_CMD_SET_CONFIG,
+ sizeof(effect_config_t),
+ &mConfig,
+ &size,
+ &cmdStatus);
if (status == 0) {
status = cmdStatus;
}
@@ -436,12 +446,11 @@
}
*((int32_t *)p->data + 1)= latency;
- (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_PARAM,
- sizeof(effect_param_t) + 8,
- &buf32,
- &size,
- &cmdStatus);
+ mEffectInterface->command(EFFECT_CMD_SET_PARAM,
+ sizeof(effect_param_t) + 8,
+ &buf32,
+ &size,
+ &cmdStatus);
}
mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) /
@@ -455,17 +464,16 @@
status_t AudioFlinger::EffectModule::init()
{
Mutex::Autolock _l(mLock);
- if (mEffectInterface == NULL) {
+ if (mEffectInterface == 0) {
return NO_INIT;
}
status_t cmdStatus;
uint32_t size = sizeof(status_t);
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_INIT,
- 0,
- NULL,
- &size,
- &cmdStatus);
+ status_t status = mEffectInterface->command(EFFECT_CMD_INIT,
+ 0,
+ NULL,
+ &size,
+ &cmdStatus);
if (status == 0) {
status = cmdStatus;
}
@@ -478,9 +486,10 @@
(mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
- audio_stream_t *stream = thread->stream();
- if (stream != NULL) {
- stream->add_audio_effect(stream, mEffectInterface);
+ sp<StreamHalInterface> stream = thread->stream();
+ if (stream != 0) {
+ status_t result = stream->addEffect(mEffectInterface);
+ ALOGE_IF(result != OK, "Error when adding effect: %d", result);
}
}
}
@@ -506,7 +515,7 @@
status_t AudioFlinger::EffectModule::start_l()
{
- if (mEffectInterface == NULL) {
+ if (mEffectInterface == 0) {
return NO_INIT;
}
if (mStatus != NO_ERROR) {
@@ -514,12 +523,11 @@
}
status_t cmdStatus;
uint32_t size = sizeof(status_t);
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_ENABLE,
- 0,
- NULL,
- &size,
- &cmdStatus);
+ status_t status = mEffectInterface->command(EFFECT_CMD_ENABLE,
+ 0,
+ NULL,
+ &size,
+ &cmdStatus);
if (status == 0) {
status = cmdStatus;
}
@@ -537,7 +545,7 @@
status_t AudioFlinger::EffectModule::stop_l()
{
- if (mEffectInterface == NULL) {
+ if (mEffectInterface == 0) {
return NO_INIT;
}
if (mStatus != NO_ERROR) {
@@ -545,12 +553,11 @@
}
status_t cmdStatus = NO_ERROR;
uint32_t size = sizeof(status_t);
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_DISABLE,
- 0,
- NULL,
- &size,
- &cmdStatus);
+ status_t status = mEffectInterface->command(EFFECT_CMD_DISABLE,
+ 0,
+ NULL,
+ &size,
+ &cmdStatus);
if (status == NO_ERROR) {
status = cmdStatus;
}
@@ -563,11 +570,11 @@
// must be called with EffectChain::mLock held
void AudioFlinger::EffectModule::release_l()
{
- if (mEffectInterface != NULL) {
+ if (mEffectInterface != 0) {
remove_effect_from_hal_l();
// release effect engine
- EffectRelease(mEffectInterface);
- mEffectInterface = NULL;
+ mEffectInterface->close();
+ mEffectInterface.clear();
}
}
@@ -577,9 +584,10 @@
(mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
- audio_stream_t *stream = thread->stream();
- if (stream != NULL) {
- stream->remove_audio_effect(stream, mEffectInterface);
+ sp<StreamHalInterface> stream = thread->stream();
+ if (stream != 0) {
+ status_t result = stream->removeEffect(mEffectInterface);
+ ALOGE_IF(result != OK, "Error when removing effect: %d", result);
}
}
}
@@ -600,25 +608,26 @@
void *pReplyData)
{
Mutex::Autolock _l(mLock);
- ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface);
+ ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface.get());
- if (mState == DESTROYED || mEffectInterface == NULL) {
+ if (mState == DESTROYED || mEffectInterface == 0) {
return NO_INIT;
}
if (mStatus != NO_ERROR) {
return mStatus;
}
if (cmdCode == EFFECT_CMD_GET_PARAM &&
- (*replySize < sizeof(effect_param_t) ||
- ((effect_param_t *)pCmdData)->psize > *replySize - sizeof(effect_param_t))) {
- android_errorWriteLog(0x534e4554, "29251553");
- return -EINVAL;
- }
- if (cmdCode == EFFECT_CMD_GET_PARAM &&
(sizeof(effect_param_t) > cmdSize ||
((effect_param_t *)pCmdData)->psize > cmdSize
- sizeof(effect_param_t))) {
android_errorWriteLog(0x534e4554, "32438594");
+ android_errorWriteLog(0x534e4554, "33003822");
+ return -EINVAL;
+ }
+ if (cmdCode == EFFECT_CMD_GET_PARAM &&
+ (*replySize < sizeof(effect_param_t) ||
+ ((effect_param_t *)pCmdData)->psize > *replySize - sizeof(effect_param_t))) {
+ android_errorWriteLog(0x534e4554, "29251553");
return -EINVAL;
}
if (cmdCode == EFFECT_CMD_GET_PARAM &&
@@ -653,12 +662,11 @@
android_errorWriteLog(0x534e4554, "30204301");
return -EINVAL;
}
- status_t status = (*mEffectInterface)->command(mEffectInterface,
- cmdCode,
- cmdSize,
- pCmdData,
- replySize,
- pReplyData);
+ status_t status = mEffectInterface->command(cmdCode,
+ cmdSize,
+ pCmdData,
+ replySize,
+ pReplyData);
if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) {
uint32_t size = (replySize == NULL) ? 0 : *replySize;
for (size_t i = 1; i < mHandles.size(); i++) {
@@ -760,6 +768,28 @@
}
}
+void AudioFlinger::EffectModule::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (buffer != 0) {
+ mConfig.inputCfg.buffer.raw = buffer->audioBuffer()->raw;
+ buffer->setFrameCount(mConfig.inputCfg.buffer.frameCount);
+ } else {
+ mConfig.inputCfg.buffer.raw = NULL;
+ }
+ mInBuffer = buffer;
+ mEffectInterface->setInBuffer(buffer);
+}
+
+void AudioFlinger::EffectModule::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (buffer != 0) {
+ mConfig.outputCfg.buffer.raw = buffer->audioBuffer()->raw;
+ buffer->setFrameCount(mConfig.outputCfg.buffer.frameCount);
+ } else {
+ mConfig.outputCfg.buffer.raw = NULL;
+ }
+ mOutBuffer = buffer;
+ mEffectInterface->setOutBuffer(buffer);
+}
+
status_t AudioFlinger::EffectModule::setVolume(uint32_t *left, uint32_t *right, bool controller)
{
Mutex::Autolock _l(mLock);
@@ -780,12 +810,11 @@
if (controller) {
pVolume = volume;
}
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_VOLUME,
- size,
- volume,
- &size,
- pVolume);
+ status = mEffectInterface->command(EFFECT_CMD_SET_VOLUME,
+ size,
+ volume,
+ &size,
+ pVolume);
if (controller && status == NO_ERROR && size == sizeof(volume)) {
*left = volume[0];
*right = volume[1];
@@ -810,12 +839,11 @@
uint32_t size = sizeof(status_t);
uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE :
EFFECT_CMD_SET_INPUT_DEVICE;
- status = (*mEffectInterface)->command(mEffectInterface,
- cmd,
- sizeof(uint32_t),
- &device,
- &size,
- &cmdStatus);
+ status = mEffectInterface->command(cmd,
+ sizeof(uint32_t),
+ &device,
+ &size,
+ &cmdStatus);
}
return status;
}
@@ -830,12 +858,11 @@
if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_MODE_MASK) == EFFECT_FLAG_AUDIO_MODE_IND) {
status_t cmdStatus;
uint32_t size = sizeof(status_t);
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_AUDIO_MODE,
- sizeof(audio_mode_t),
- &mode,
- &size,
- &cmdStatus);
+ status = mEffectInterface->command(EFFECT_CMD_SET_AUDIO_MODE,
+ sizeof(audio_mode_t),
+ &mode,
+ &size,
+ &cmdStatus);
if (status == NO_ERROR) {
status = cmdStatus;
}
@@ -852,12 +879,11 @@
status_t status = NO_ERROR;
if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_SOURCE_MASK) == EFFECT_FLAG_AUDIO_SOURCE_IND) {
uint32_t size = 0;
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_SET_AUDIO_SOURCE,
- sizeof(audio_source_t),
- &source,
- &size,
- NULL);
+ status = mEffectInterface->command(EFFECT_CMD_SET_AUDIO_SOURCE,
+ sizeof(audio_source_t),
+ &source,
+ &size,
+ NULL);
}
return status;
}
@@ -903,12 +929,11 @@
cmd.isOffload = offloaded;
cmd.ioHandle = io;
- status = (*mEffectInterface)->command(mEffectInterface,
- EFFECT_CMD_OFFLOAD,
- sizeof(effect_offload_param_t),
- &cmd,
- &size,
- &cmdStatus);
+ status = mEffectInterface->command(EFFECT_CMD_OFFLOAD,
+ sizeof(effect_offload_param_t),
+ &cmd,
+ &size,
+ &cmdStatus);
if (status == NO_ERROR) {
status = cmdStatus;
}
@@ -1051,7 +1076,7 @@
result.append("\t\tSession Status State Engine:\n");
snprintf(buffer, SIZE, "\t\t%05d %03d %03d %p\n",
- mSessionId, mStatus, mState, mEffectInterface);
+ mSessionId, mStatus, mState, mEffectInterface.get());
result.append(buffer);
result.append("\t\tDescriptor:\n");
@@ -1087,7 +1112,7 @@
mConfig.inputCfg.samplingRate,
mConfig.inputCfg.channels,
mConfig.inputCfg.format,
- formatToString((audio_format_t)mConfig.inputCfg.format),
+ formatToString((audio_format_t)mConfig.inputCfg.format).c_str(),
mConfig.inputCfg.buffer.raw);
result.append(buffer);
@@ -1099,7 +1124,7 @@
mConfig.outputCfg.samplingRate,
mConfig.outputCfg.channels,
mConfig.outputCfg.format,
- formatToString((audio_format_t)mConfig.outputCfg.format));
+ formatToString((audio_format_t)mConfig.outputCfg.format).c_str());
result.append(buffer);
snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
@@ -1200,10 +1225,9 @@
mEnabled = false;
} else {
if (thread != 0) {
- if (thread->type() == ThreadBase::OFFLOAD) {
- PlaybackThread *t = (PlaybackThread *)thread.get();
- Mutex::Autolock _l(t->mLock);
- t->broadcast_l();
+ if (thread->type() == ThreadBase::OFFLOAD || thread->type() == ThreadBase::MMAP) {
+ Mutex::Autolock _l(thread->mLock);
+ thread->broadcast_l();
}
if (!effect->isOffloadable()) {
if (thread->type() == ThreadBase::OFFLOAD) {
@@ -1245,10 +1269,9 @@
sp<ThreadBase> thread = effect->thread().promote();
if (thread != 0) {
thread->checkSuspendOnEffectEnabled(effect, false, effect->sessionId());
- if (thread->type() == ThreadBase::OFFLOAD) {
- PlaybackThread *t = (PlaybackThread *)thread.get();
- Mutex::Autolock _l(t->mLock);
- t->broadcast_l();
+ if (thread->type() == ThreadBase::OFFLOAD || thread->type() == ThreadBase::MMAP) {
+ Mutex::Autolock _l(thread->mLock);
+ thread->broadcast_l();
}
}
@@ -1485,7 +1508,7 @@
AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
audio_session_t sessionId)
: mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
- mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
+ mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX)
{
mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
@@ -1498,9 +1521,6 @@
AudioFlinger::EffectChain::~EffectChain()
{
- if (mOwnInBuffer) {
- delete mInBuffer;
- }
}
// getEffectFromDesc_l() must be called with ThreadBase::mLock held
@@ -1559,13 +1579,17 @@
// Must be called with EffectChain::mLock locked
void AudioFlinger::EffectChain::clearInputBuffer_l(const sp<ThreadBase>& thread)
{
+ if (mInBuffer == NULL) {
+ return;
+ }
// TODO: This will change in the future, depending on multichannel
// and sample format changes for effects.
// Currently effects processing is only available for stereo, AUDIO_FORMAT_PCM_16_BIT
// (4 bytes frame size)
const size_t frameSize =
audio_bytes_per_sample(AUDIO_FORMAT_PCM_16_BIT) * min(FCC_2, thread->channelCount());
- memset(mInBuffer, 0, thread->frameCount() * frameSize);
+ memset(mInBuffer->audioBuffer()->raw, 0, thread->frameCount() * frameSize);
+ mInBuffer->commit();
}
// Must be called with EffectChain::mLock locked
@@ -1581,7 +1605,8 @@
// never process effects when:
// - on an OFFLOAD thread
// - no more tracks are on the session and the effect tail has been rendered
- bool doProcess = (thread->type() != ThreadBase::OFFLOAD);
+ bool doProcess = (thread->type() != ThreadBase::OFFLOAD)
+ && (thread->type() != ThreadBase::MMAP);
if (!isGlobalSession) {
bool tracksOnSession = (trackCnt() != 0);
@@ -1603,9 +1628,20 @@
size_t size = mEffects.size();
if (doProcess) {
+ // Only the input and output buffers of the chain can be external,
+ // and 'update' / 'commit' do nothing for allocated buffers, thus
+ // it's not needed to consider any other buffers here.
+ mInBuffer->update();
+ if (mInBuffer->audioBuffer()->raw != mOutBuffer->audioBuffer()->raw) {
+ mOutBuffer->update();
+ }
for (size_t i = 0; i < size; i++) {
mEffects[i]->process();
}
+ mInBuffer->commit();
+ if (mInBuffer->audioBuffer()->raw != mOutBuffer->audioBuffer()->raw) {
+ mOutBuffer->commit();
+ }
}
bool doResetVolume = false;
for (size_t i = 0; i < size; i++) {
@@ -1665,9 +1701,11 @@
// accumulation stage. Saturation is done in EffectModule::process() before
// calling the process in effect engine
size_t numSamples = thread->frameCount();
- int32_t *buffer = new int32_t[numSamples];
- memset(buffer, 0, numSamples * sizeof(int32_t));
- effect->setInBuffer((int16_t *)buffer);
+ sp<EffectBufferHalInterface> halBuffer;
+ status_t result = EffectBufferHalInterface::allocate(
+ numSamples * sizeof(int32_t), &halBuffer);
+ if (result != OK) return result;
+ effect->setInBuffer(halBuffer);
// auxiliary effects output samples to chain input buffer for further processing
// by insert effects
effect->setOutBuffer(mInBuffer);
@@ -1778,9 +1816,7 @@
mEffects[i]->release_l();
}
- if (type == EFFECT_FLAG_TYPE_AUXILIARY) {
- delete[] effect->inBuffer();
- } else {
+ if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
if (i == size - 1 && i != 0) {
mEffects[i - 1]->setOutBuffer(mOutBuffer);
mEffects[i - 1]->configure();
@@ -1906,6 +1942,19 @@
}
}
+static void dumpInOutBuffer(
+ char *dump, size_t dumpSize, bool isInput, EffectBufferHalInterface *buffer) {
+ if (buffer == nullptr) {
+ snprintf(dump, dumpSize, "%p", buffer);
+ } else if (buffer->externalData() != nullptr) {
+ snprintf(dump, dumpSize, "%p -> %p",
+ isInput ? buffer->externalData() : buffer->audioBuffer()->raw,
+ isInput ? buffer->audioBuffer()->raw : buffer->externalData());
+ } else {
+ snprintf(dump, dumpSize, "%p", buffer->audioBuffer()->raw);
+ }
+}
+
void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
@@ -1923,11 +1972,14 @@
result.append("\tCould not lock mutex:\n");
}
- result.append("\tIn buffer Out buffer Active tracks:\n");
- snprintf(buffer, SIZE, "\t%p %p %d\n",
- mInBuffer,
- mOutBuffer,
- mActiveTrackCnt);
+ char inBufferStr[64], outBufferStr[64];
+ dumpInOutBuffer(inBufferStr, sizeof(inBufferStr), true, mInBuffer.get());
+ dumpInOutBuffer(outBufferStr, sizeof(outBufferStr), false, mOutBuffer.get());
+ snprintf(buffer, SIZE, "\t%-*s%-*s Active tracks:\n",
+ (int)strlen(inBufferStr), "In buffer ",
+ (int)strlen(outBufferStr), "Out buffer ");
+ result.append(buffer);
+ snprintf(buffer, SIZE, "\t%s %s %d\n", inBufferStr, outBufferStr, mActiveTrackCnt);
result.append(buffer);
write(fd, result.string(), result.size());
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 8fe0b96..0755c52 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -25,10 +25,11 @@
// state changes or resource modifications. Always respect the following order
// if multiple mutexes must be acquired to avoid cross deadlock:
// AudioFlinger -> ThreadBase -> EffectChain -> EffectModule
+// AudioHandle -> ThreadBase -> EffectChain -> EffectModule
// In addition, methods that lock the AudioPolicyService mutex (getOutputForEffect(),
-// startOutput()...) should never be called with AudioFlinger or Threadbase mutex locked
-// to avoid cross deadlock with other clients calling AudioPolicyService methods that in turn
-// call AudioFlinger thus locking the same mutexes in the reverse order.
+// startOutput(), getInputForAttr(), releaseInput()...) should never be called with AudioFlinger or
+// Threadbase mutex locked to avoid cross deadlock with other clients calling AudioPolicyService
+// methods that in turn call AudioFlinger thus locking the same mutexes in the reverse order.
// The EffectModule class is a wrapper object controlling the effect engine implementation
// in the effect library. It prevents concurrent calls to process() and command() functions
@@ -85,10 +86,14 @@
bool isEnabled() const;
bool isProcessEnabled() const;
- void setInBuffer(int16_t *buffer) { mConfig.inputCfg.buffer.s16 = buffer; }
- int16_t *inBuffer() { return mConfig.inputCfg.buffer.s16; }
- void setOutBuffer(int16_t *buffer) { mConfig.outputCfg.buffer.s16 = buffer; }
- int16_t *outBuffer() { return mConfig.outputCfg.buffer.s16; }
+ void setInBuffer(const sp<EffectBufferHalInterface>& buffer);
+ int16_t *inBuffer() const {
+ return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
+ }
+ void setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
+ int16_t *outBuffer() const {
+ return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
+ }
void setChain(const wp<EffectChain>& chain) { mChain = chain; }
void setThread(const wp<ThreadBase>& thread) { mThread = thread; }
const wp<ThreadBase>& thread() { return mThread; }
@@ -151,7 +156,9 @@
const audio_session_t mSessionId; // audio session ID
const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
effect_config_t mConfig; // input and output audio configuration
- effect_handle_t mEffectInterface; // Effect module C API
+ sp<EffectHalInterface> mEffectInterface; // Effect module HAL
+ sp<EffectBufferHalInterface> mInBuffer; // Buffers for interacting with HAL
+ sp<EffectBufferHalInterface> mOutBuffer;
status_t mStatus; // initialization status
effect_state mState; // current activation state
Vector<EffectHandle *> mHandles; // list of client handles
@@ -300,18 +307,17 @@
void setMode_l(audio_mode_t mode);
void setAudioSource_l(audio_source_t source);
- void setInBuffer(int16_t *buffer, bool ownsBuffer = false) {
+ void setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
mInBuffer = buffer;
- mOwnInBuffer = ownsBuffer;
}
int16_t *inBuffer() const {
- return mInBuffer;
+ return mInBuffer != 0 ? reinterpret_cast<int16_t*>(mInBuffer->ptr()) : NULL;
}
- void setOutBuffer(int16_t *buffer) {
+ void setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
mOutBuffer = buffer;
}
int16_t *outBuffer() const {
- return mOutBuffer;
+ return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
}
void incTrackCnt() { android_atomic_inc(&mTrackCnt); }
@@ -393,8 +399,8 @@
mutable Mutex mLock; // mutex protecting effect list
Vector< sp<EffectModule> > mEffects; // list of effect modules
audio_session_t mSessionId; // audio session ID
- int16_t *mInBuffer; // chain input buffer
- int16_t *mOutBuffer; // chain output buffer
+ sp<EffectBufferHalInterface> mInBuffer; // chain input buffer
+ sp<EffectBufferHalInterface> mOutBuffer; // chain output buffer
// 'volatile' here means these are accessed with atomic operations instead of mutex
volatile int32_t mActiveTrackCnt; // number of active tracks connected
@@ -402,7 +408,6 @@
int32_t mTailBufferCount; // current effect tail buffer count
int32_t mMaxTailBuffers; // maximum effect tail buffers
- bool mOwnInBuffer; // true if the chain owns its input buffer
int mVolumeCtrlIdx; // index of insert effect having control over volume
uint32_t mLeftVolume; // previous volume on left channel
uint32_t mRightVolume; // previous volume on right channel
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 4f83c6d..103e7f8 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -39,7 +39,7 @@
#endif
#include <audio_utils/mono_blend.h>
#include <audio_utils/format.h>
-#include "AudioMixer.h"
+#include <media/AudioMixer.h>
#include "FastMixer.h"
namespace android {
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 301c5b1..8ef31d1 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -35,7 +35,7 @@
// This packed representation is used to keep the information atomic.
union FastTrackUnderruns {
FastTrackUnderruns() { mAtomic = 0;
- COMPILE_TIME_ASSERT_FUNCTION_SCOPE(sizeof(FastTrackUnderruns) == sizeof(uint32_t)); }
+ static_assert(sizeof(FastTrackUnderruns) == sizeof(uint32_t), "FastTrackUnderrun"); }
FastTrackUnderruns(const FastTrackUnderruns& copyFrom) : mAtomic(copyFrom.mAtomic) { }
FastTrackUnderruns& operator=(const FastTrackUnderruns& rhs)
{ if (this != &rhs) mAtomic = rhs.mAtomic; return *this; }
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index ad471fb..36d8eef 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#define LOG_TAG "FastMixerState"
+//#define LOG_NDEBUG 0
+
#include <cutils/properties.h>
#include "FastMixerState.h"
diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp
index dca7bf9..cf9fce3 100644
--- a/services/audioflinger/FastThread.cpp
+++ b/services/audioflinger/FastThread.cpp
@@ -22,6 +22,7 @@
#include "Configuration.h"
#include <linux/futex.h>
#include <sys/syscall.h>
+#include <cutils/atomic.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include "FastThread.h"
@@ -169,7 +170,7 @@
}
int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
if (!(policy == SCHED_FIFO || policy == SCHED_RR)) {
- ALOGE("did not receive expected priority boost");
+ ALOGE("did not receive expected priority boost on time");
}
// This may be overly conservative; there could be times that the normal mixer
// requests such a brief cold idle that it doesn't require resetting this flag.
diff --git a/services/audioflinger/FastThreadDumpState.cpp b/services/audioflinger/FastThreadDumpState.cpp
index 9df5c4c..964a725 100644
--- a/services/audioflinger/FastThreadDumpState.cpp
+++ b/services/audioflinger/FastThreadDumpState.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <audio_utils/roundup.h>
#include "FastThreadDumpState.h"
namespace android {
diff --git a/services/audioflinger/MmapTracks.h b/services/audioflinger/MmapTracks.h
new file mode 100644
index 0000000..e4fe8ac
--- /dev/null
+++ b/services/audioflinger/MmapTracks.h
@@ -0,0 +1,60 @@
+/*
+**
+** Copyright 2017, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef INCLUDING_FROM_AUDIOFLINGER_H
+ #error This header file should only be included from AudioFlinger.h
+#endif
+
+// playback track
+class MmapTrack : public TrackBase {
+public:
+ MmapTrack(ThreadBase *thread,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_session_t sessionId,
+ uid_t uid,
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
+ virtual ~MmapTrack();
+
+ // TrackBase virtual
+ virtual status_t initCheck() const;
+ virtual status_t start(AudioSystem::sync_event_t event,
+ audio_session_t triggerSession);
+ virtual void stop();
+ virtual bool isFastTrack() const { return false; }
+
+ static void appendDumpHeader(String8& result);
+ void dump(char* buffer, size_t size);
+
+protected:
+ friend class MmapThread;
+
+ MmapTrack(const MmapTrack&);
+ MmapTrack& operator = (const MmapTrack&);
+
+ // AudioBufferProvider interface
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+ // releaseBuffer() not overridden
+
+ // ExtendedAudioBufferProvider interface
+ virtual size_t framesReady() const;
+ virtual int64_t framesReleased() const;
+ virtual void onTimestamp(const ExtendedTimestamp ×tamp);
+
+}; // end of Track
+
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index bee17fd..d7c0728 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -202,9 +202,9 @@
if (hwModule != AUDIO_MODULE_HANDLE_NONE) {
ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(hwModule);
if (index >= 0) {
- audio_hw_device_t *hwDevice =
+ sp<DeviceHalInterface> hwDevice =
audioflinger->mAudioHwDevs.valueAt(index)->hwDevice();
- hwDevice->release_audio_patch(hwDevice, halHandle);
+ hwDevice->releaseAudioPatch(halHandle);
}
}
}
@@ -247,11 +247,11 @@
// - special patch request with 2 sources (reuse one existing output mix) OR
// - Device to device AND
// - source HW module != destination HW module OR
- // - audio HAL version < 3.0
+ // - audio HAL does not support audio patches creation
if ((patch->num_sources == 2) ||
((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
((patch->sinks[0].ext.device.hw_module != srcModule) ||
- (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0)))) {
+ !audioHwDevice->supportsAudioPatches()))) {
if (patch->num_sources == 2) {
if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
(patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
@@ -274,13 +274,14 @@
audio_devices_t device = patch->sinks[0].ext.device.type;
String8 address = String8(patch->sinks[0].ext.device.address);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- newPatch->mPlaybackThread = audioflinger->openOutput_l(
- patch->sinks[0].ext.device.hw_module,
- &output,
- &config,
- device,
- address,
- AUDIO_OUTPUT_FLAG_NONE);
+ sp<ThreadBase> thread = audioflinger->openOutput_l(
+ patch->sinks[0].ext.device.hw_module,
+ &output,
+ &config,
+ device,
+ address,
+ AUDIO_OUTPUT_FLAG_NONE);
+ newPatch->mPlaybackThread = (PlaybackThread *)thread.get();
ALOGV("audioflinger->openOutput_l() returned %p",
newPatch->mPlaybackThread.get());
if (newPatch->mPlaybackThread == 0) {
@@ -310,13 +311,14 @@
config.format = newPatch->mPlaybackThread->format();
}
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- newPatch->mRecordThread = audioflinger->openInput_l(srcModule,
+ sp<ThreadBase> thread = audioflinger->openInput_l(srcModule,
&input,
&config,
device,
address,
AUDIO_SOURCE_MIC,
AUDIO_INPUT_FLAG_NONE);
+ newPatch->mRecordThread = (RecordThread *)thread.get();
ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
newPatch->mRecordThread.get(), config.channel_mask);
if (newPatch->mRecordThread == 0) {
@@ -332,25 +334,23 @@
sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
patch->sinks[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() bad capture I/O handle %d",
- patch->sinks[0].ext.mix.handle);
- status = BAD_VALUE;
- goto exit;
+ thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+ if (thread == 0) {
+ ALOGW("createAudioPatch() bad capture I/O handle %d",
+ patch->sinks[0].ext.mix.handle);
+ status = BAD_VALUE;
+ goto exit;
+ }
}
status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
} else {
- if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {
- status = INVALID_OPERATION;
- goto exit;
- }
-
- audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
- status = hwDevice->create_audio_patch(hwDevice,
- patch->num_sources,
- patch->sources,
- patch->num_sinks,
- patch->sinks,
- &halHandle);
+ sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
+ status = hwDevice->createAudioPatch(patch->num_sources,
+ patch->sources,
+ patch->num_sinks,
+ patch->sinks,
+ &halHandle);
+ if (status == INVALID_OPERATION) goto exit;
}
}
} break;
@@ -381,14 +381,17 @@
sp<ThreadBase> thread =
audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("createAudioPatch() bad playback I/O handle %d",
- patch->sources[0].ext.mix.handle);
- status = BAD_VALUE;
- goto exit;
+ thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+ if (thread == 0) {
+ ALOGW("createAudioPatch() bad playback I/O handle %d",
+ patch->sources[0].ext.mix.handle);
+ status = BAD_VALUE;
+ goto exit;
+ }
}
if (thread == audioflinger->primaryPlaybackThread_l()) {
AudioParameter param = AudioParameter();
- param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
+ param.addInt(String8(AudioParameter::keyRouting), (int)type);
audioflinger->broacastParametersToRecordThreads_l(param.toString());
}
@@ -611,20 +614,19 @@
sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
patch->sinks[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("releaseAudioPatch() bad capture I/O handle %d",
- patch->sinks[0].ext.mix.handle);
- status = BAD_VALUE;
- break;
+ thread = audioflinger->checkMmapThread_l(patch->sinks[0].ext.mix.handle);
+ if (thread == 0) {
+ ALOGW("releaseAudioPatch() bad capture I/O handle %d",
+ patch->sinks[0].ext.mix.handle);
+ status = BAD_VALUE;
+ break;
+ }
}
status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
} else {
AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
- if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {
- status = INVALID_OPERATION;
- break;
- }
- audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
- status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
+ sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
+ status = hwDevice->releaseAudioPatch(removedPatch->mHalHandle);
}
} break;
case AUDIO_PORT_TYPE_MIX: {
@@ -638,10 +640,13 @@
sp<ThreadBase> thread =
audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
if (thread == 0) {
- ALOGW("releaseAudioPatch() bad playback I/O handle %d",
- patch->sources[0].ext.mix.handle);
- status = BAD_VALUE;
- break;
+ thread = audioflinger->checkMmapThread_l(patch->sources[0].ext.mix.handle);
+ if (thread == 0) {
+ ALOGW("releaseAudioPatch() bad playback I/O handle %d",
+ patch->sources[0].ext.mix.handle);
+ status = BAD_VALUE;
+ break;
+ }
}
status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
} break;
@@ -687,13 +692,7 @@
}
AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
- if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
- return hwDevice->set_audio_port_config(hwDevice, config);
- } else {
- return INVALID_OPERATION;
- }
- return NO_ERROR;
+ return audioHwDevice->hwDevice()->setAudioPortConfig(config);
}
} // namespace android
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 5601bde..f84ba08 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -32,9 +32,10 @@
void *buffer,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
audio_output_flags_t flags,
- track_type type);
+ track_type type,
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
virtual ~Track();
virtual status_t initCheck() const;
@@ -58,6 +59,10 @@
bool isOffloaded() const
{ return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
bool isDirect() const { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+ bool isOffloadedOrDirect() const { return (mFlags
+ & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
+ | AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }
+
status_t setParameters(const String8& keyValuePairs);
status_t attachAuxEffect(int EffectId);
void setAuxBuffer(int EffectId, int32_t *buffer);
@@ -75,6 +80,13 @@
virtual bool isFastTrack() const { return (mFlags & AUDIO_OUTPUT_FLAG_FAST) != 0; }
+// implement volume handling.
+ VolumeShaper::Status applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation);
+sp<VolumeShaper::State> getVolumeShaperState(int id);
+ sp<VolumeHandler> getVolumeHandler() { return mVolumeHandler; }
+
protected:
// for numerous
friend class PlaybackThread;
@@ -117,10 +129,9 @@
public:
void triggerEvents(AudioSystem::sync_event_t type);
- void invalidate();
+ virtual void invalidate();
void disable();
- bool isInvalid() const { return mIsInvalid; }
int fastIndex() const { return mFastIndex; }
protected:
@@ -152,6 +163,8 @@
ExtendedTimestamp mSinkTimestamp;
+ sp<VolumeHandler> mVolumeHandler; // handles multiple VolumeShaper configs and operations
+
private:
// The following fields are only for fast tracks, and should be in a subclass
int mFastIndex; // index within FastMixerState::mFastTracks[];
@@ -165,8 +178,7 @@
volatile float mCachedVolume; // combined master volume and stream type volume;
// 'volatile' means accessed without lock or
// barrier, but is read/written atomically
- bool mIsInvalid; // non-resettable latch, set by invalidate()
- AudioTrackServerProxy* mAudioTrackServerProxy;
+ sp<AudioTrackServerProxy> mAudioTrackServerProxy;
bool mResumeToStopping; // track was paused in stopping state.
bool mFlushHwPending; // track requests for thread flush
audio_output_flags_t mFlags;
@@ -188,7 +200,7 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- int uid);
+ uid_t uid);
virtual ~OutputTrack();
virtual status_t start(AudioSystem::sync_event_t event =
@@ -214,8 +226,8 @@
Vector < Buffer* > mBufferQueue;
AudioBufferProvider::Buffer mOutBuffer;
bool mActive;
- DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
- AudioTrackClientProxy* mClientProxy;
+ DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
+ sp<AudioTrackClientProxy> mClientProxy;
}; // end of OutputTrack
// playback track, used by PatchPanel
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 123e033..72ebc93 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -30,9 +30,10 @@
size_t frameCount,
void *buffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
audio_input_flags_t flags,
- track_type type);
+ track_type type,
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
virtual ~RecordTrack();
virtual status_t initCheck() const;
@@ -41,7 +42,7 @@
void destroy();
- void invalidate();
+ virtual void invalidate();
// clear the buffer overflow flag
void clearOverflow() { mOverflow = false; }
// set the buffer overflow flag and return previous value
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
index 004a068..a44ab2a 100644
--- a/services/audioflinger/SpdifStreamOut.cpp
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -17,13 +17,12 @@
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
-#include <hardware/audio.h>
+#include <system/audio.h>
#include <utils/Log.h>
#include <audio_utils/spdif/SPDIFEncoder.h>
#include "AudioHwDevice.h"
-#include "AudioStreamOut.h"
#include "SpdifStreamOut.h"
namespace android {
diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h
index c870250..fc9bb6e 100644
--- a/services/audioflinger/SpdifStreamOut.h
+++ b/services/audioflinger/SpdifStreamOut.h
@@ -23,9 +23,7 @@
#include <system/audio.h>
-#include "AudioHwDevice.h"
#include "AudioStreamOut.h"
-#include "SpdifStreamOut.h"
#include <audio_utils/spdif/SPDIFEncoder.h>
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 52c7899..a6857fe 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -29,17 +29,20 @@
#include <cutils/properties.h>
#include <media/AudioParameter.h>
#include <media/AudioResamplerPublic.h>
+#include <media/RecordBufferConverter.h>
+#include <media/TypeConverter.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <private/media/AudioTrackShared.h>
-#include <hardware/audio.h>
-#include <audio_effects/effect_ns.h>
-#include <audio_effects/effect_aec.h>
+#include <private/android_filesystem_config.h>
#include <audio_utils/mono_blend.h>
#include <audio_utils/primitives.h>
#include <audio_utils/format.h>
#include <audio_utils/minifloat.h>
+#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_aec.h>
+#include <system/audio.h>
// NBAIO implementations
#include <media/nbaio/AudioStreamInSource.h>
@@ -54,8 +57,6 @@
#include <powermanager/PowerManager.h>
#include "AudioFlinger.h"
-#include "AudioMixer.h"
-#include "BufferProviders.h"
#include "FastMixer.h"
#include "FastCapture.h"
#include "ServiceUtilities.h"
@@ -73,6 +74,9 @@
#include "AutoPark.h"
+#include <pthread.h>
+#include "TypedLogger.h"
+
// ----------------------------------------------------------------------------
// Note: the following macro is used for extremely verbose logging message. In
@@ -143,6 +147,12 @@
// Direct output thread minimum sleep time in idle or active(underrun) state
static const nsecs_t kDirectMinSleepTimeUs = 10000;
+// The universal constant for ubiquitous 20ms value. The value of 20ms seems to provide a good
+// balance between power consumption and latency, and allows threads to be scheduled reliably
+// by the CFS scheduler.
+// FIXME Express other hardcoded references to 20ms with references to this constant and move
+// it appropriately.
+#define FMS_20 20
// Whether to use fast mixer
static const enum {
@@ -437,173 +447,35 @@
return "RECORD";
case OFFLOAD:
return "OFFLOAD";
+ case MMAP:
+ return "MMAP";
default:
return "unknown";
}
}
-String8 devicesToString(audio_devices_t devices)
+std::string devicesToString(audio_devices_t devices)
{
- static const struct mapping {
- audio_devices_t mDevices;
- const char * mString;
- } mappingsOut[] = {
- {AUDIO_DEVICE_OUT_EARPIECE, "EARPIECE"},
- {AUDIO_DEVICE_OUT_SPEAKER, "SPEAKER"},
- {AUDIO_DEVICE_OUT_WIRED_HEADSET, "WIRED_HEADSET"},
- {AUDIO_DEVICE_OUT_WIRED_HEADPHONE, "WIRED_HEADPHONE"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_SCO, "BLUETOOTH_SCO"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, "BLUETOOTH_SCO_CARKIT"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, "BLUETOOTH_A2DP"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,"BLUETOOTH_A2DP_HEADPHONES"},
- {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, "BLUETOOTH_A2DP_SPEAKER"},
- {AUDIO_DEVICE_OUT_AUX_DIGITAL, "AUX_DIGITAL"},
- {AUDIO_DEVICE_OUT_HDMI, "HDMI"},
- {AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,"ANLG_DOCK_HEADSET"},
- {AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,"DGTL_DOCK_HEADSET"},
- {AUDIO_DEVICE_OUT_USB_ACCESSORY, "USB_ACCESSORY"},
- {AUDIO_DEVICE_OUT_USB_DEVICE, "USB_DEVICE"},
- {AUDIO_DEVICE_OUT_TELEPHONY_TX, "TELEPHONY_TX"},
- {AUDIO_DEVICE_OUT_LINE, "LINE"},
- {AUDIO_DEVICE_OUT_HDMI_ARC, "HDMI_ARC"},
- {AUDIO_DEVICE_OUT_SPDIF, "SPDIF"},
- {AUDIO_DEVICE_OUT_FM, "FM"},
- {AUDIO_DEVICE_OUT_AUX_LINE, "AUX_LINE"},
- {AUDIO_DEVICE_OUT_SPEAKER_SAFE, "SPEAKER_SAFE"},
- {AUDIO_DEVICE_OUT_IP, "IP"},
- {AUDIO_DEVICE_OUT_BUS, "BUS"},
- {AUDIO_DEVICE_NONE, "NONE"}, // must be last
- }, mappingsIn[] = {
- {AUDIO_DEVICE_IN_COMMUNICATION, "COMMUNICATION"},
- {AUDIO_DEVICE_IN_AMBIENT, "AMBIENT"},
- {AUDIO_DEVICE_IN_BUILTIN_MIC, "BUILTIN_MIC"},
- {AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "BLUETOOTH_SCO_HEADSET"},
- {AUDIO_DEVICE_IN_WIRED_HEADSET, "WIRED_HEADSET"},
- {AUDIO_DEVICE_IN_AUX_DIGITAL, "AUX_DIGITAL"},
- {AUDIO_DEVICE_IN_VOICE_CALL, "VOICE_CALL"},
- {AUDIO_DEVICE_IN_TELEPHONY_RX, "TELEPHONY_RX"},
- {AUDIO_DEVICE_IN_BACK_MIC, "BACK_MIC"},
- {AUDIO_DEVICE_IN_REMOTE_SUBMIX, "REMOTE_SUBMIX"},
- {AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET"},
- {AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET"},
- {AUDIO_DEVICE_IN_USB_ACCESSORY, "USB_ACCESSORY"},
- {AUDIO_DEVICE_IN_USB_DEVICE, "USB_DEVICE"},
- {AUDIO_DEVICE_IN_FM_TUNER, "FM_TUNER"},
- {AUDIO_DEVICE_IN_TV_TUNER, "TV_TUNER"},
- {AUDIO_DEVICE_IN_LINE, "LINE"},
- {AUDIO_DEVICE_IN_SPDIF, "SPDIF"},
- {AUDIO_DEVICE_IN_BLUETOOTH_A2DP, "BLUETOOTH_A2DP"},
- {AUDIO_DEVICE_IN_LOOPBACK, "LOOPBACK"},
- {AUDIO_DEVICE_IN_IP, "IP"},
- {AUDIO_DEVICE_IN_BUS, "BUS"},
- {AUDIO_DEVICE_NONE, "NONE"}, // must be last
- };
- String8 result;
- audio_devices_t allDevices = AUDIO_DEVICE_NONE;
- const mapping *entry;
+ std::string result;
if (devices & AUDIO_DEVICE_BIT_IN) {
- devices &= ~AUDIO_DEVICE_BIT_IN;
- entry = mappingsIn;
+ InputDeviceConverter::maskToString(devices, result);
} else {
- entry = mappingsOut;
- }
- for ( ; entry->mDevices != AUDIO_DEVICE_NONE; entry++) {
- allDevices = (audio_devices_t) (allDevices | entry->mDevices);
- if (devices & entry->mDevices) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.append(entry->mString);
- }
- }
- if (devices & ~allDevices) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.appendFormat("0x%X", devices & ~allDevices);
- }
- if (result.isEmpty()) {
- result.append(entry->mString);
+ OutputDeviceConverter::maskToString(devices, result);
}
return result;
}
-String8 inputFlagsToString(audio_input_flags_t flags)
+std::string inputFlagsToString(audio_input_flags_t flags)
{
- static const struct mapping {
- audio_input_flags_t mFlag;
- const char * mString;
- } mappings[] = {
- {AUDIO_INPUT_FLAG_FAST, "FAST"},
- {AUDIO_INPUT_FLAG_HW_HOTWORD, "HW_HOTWORD"},
- {AUDIO_INPUT_FLAG_RAW, "RAW"},
- {AUDIO_INPUT_FLAG_SYNC, "SYNC"},
- {AUDIO_INPUT_FLAG_NONE, "NONE"}, // must be last
- };
- String8 result;
- audio_input_flags_t allFlags = AUDIO_INPUT_FLAG_NONE;
- const mapping *entry;
- for (entry = mappings; entry->mFlag != AUDIO_INPUT_FLAG_NONE; entry++) {
- allFlags = (audio_input_flags_t) (allFlags | entry->mFlag);
- if (flags & entry->mFlag) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.append(entry->mString);
- }
- }
- if (flags & ~allFlags) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.appendFormat("0x%X", flags & ~allFlags);
- }
- if (result.isEmpty()) {
- result.append(entry->mString);
- }
+ std::string result;
+ InputFlagConverter::maskToString(flags, result);
return result;
}
-String8 outputFlagsToString(audio_output_flags_t flags)
+std::string outputFlagsToString(audio_output_flags_t flags)
{
- static const struct mapping {
- audio_output_flags_t mFlag;
- const char * mString;
- } mappings[] = {
- {AUDIO_OUTPUT_FLAG_DIRECT, "DIRECT"},
- {AUDIO_OUTPUT_FLAG_PRIMARY, "PRIMARY"},
- {AUDIO_OUTPUT_FLAG_FAST, "FAST"},
- {AUDIO_OUTPUT_FLAG_DEEP_BUFFER, "DEEP_BUFFER"},
- {AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD,"COMPRESS_OFFLOAD"},
- {AUDIO_OUTPUT_FLAG_NON_BLOCKING, "NON_BLOCKING"},
- {AUDIO_OUTPUT_FLAG_HW_AV_SYNC, "HW_AV_SYNC"},
- {AUDIO_OUTPUT_FLAG_RAW, "RAW"},
- {AUDIO_OUTPUT_FLAG_SYNC, "SYNC"},
- {AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO, "IEC958_NONAUDIO"},
- {AUDIO_OUTPUT_FLAG_NONE, "NONE"}, // must be last
- };
- String8 result;
- audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE;
- const mapping *entry;
- for (entry = mappings; entry->mFlag != AUDIO_OUTPUT_FLAG_NONE; entry++) {
- allFlags = (audio_output_flags_t) (allFlags | entry->mFlag);
- if (flags & entry->mFlag) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.append(entry->mString);
- }
- }
- if (flags & ~allFlags) {
- if (!result.isEmpty()) {
- result.append("|");
- }
- result.appendFormat("0x%X", flags & ~allFlags);
- }
- if (result.isEmpty()) {
- result.append(entry->mString);
- }
+ std::string result;
+ OutputFlagConverter::maskToString(flags, result);
return result;
}
@@ -641,7 +513,7 @@
// mName will be set by concrete (non-virtual) subclass
mDeathRecipient(new PMDeathRecipient(this)),
mSystemReady(systemReady),
- mNotifiedBatteryStart(false)
+ mSignalPending(false)
{
memset(&mPatch, 0, sizeof(struct audio_patch));
}
@@ -663,7 +535,7 @@
{
status_t status = initCheck();
if (status == NO_ERROR) {
- ALOGI("AudioFlinger's thread %p ready to run", this);
+ ALOGI("AudioFlinger's thread %p tid=%d ready to run", this, getTid());
} else {
ALOGE("No working audio driver found.");
}
@@ -744,16 +616,17 @@
sendConfigEvent_l(configEvent);
}
-void AudioFlinger::ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio)
+void AudioFlinger::ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp)
{
Mutex::Autolock _l(mLock);
- sendPrioConfigEvent_l(pid, tid, prio);
+ sendPrioConfigEvent_l(pid, tid, prio, forApp);
}
// sendPrioConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio)
+void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(
+ pid_t pid, pid_t tid, int32_t prio, bool forApp)
{
- sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio);
+ sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio, forApp);
sendConfigEvent_l(configEvent);
}
@@ -763,12 +636,12 @@
sp<ConfigEvent> configEvent;
AudioParameter param(keyValuePair);
int value;
- if (param.getInt(String8(AUDIO_PARAMETER_MONO_OUTPUT), value) == NO_ERROR) {
+ if (param.getInt(String8(AudioParameter::keyMonoOutput), value) == NO_ERROR) {
setMasterMono_l(value != 0);
if (param.size() == 1) {
return NO_ERROR; // should be a solo parameter - we don't pass down
}
- param.remove(String8(AUDIO_PARAMETER_MONO_OUTPUT));
+ param.remove(String8(AudioParameter::keyMonoOutput));
configEvent = new SetParameterConfigEvent(param.toString());
} else {
configEvent = new SetParameterConfigEvent(keyValuePair);
@@ -813,7 +686,7 @@
case CFG_EVENT_PRIO: {
PrioConfigEventData *data = (PrioConfigEventData *)event->mData.get();
// FIXME Need to understand why this has to be done asynchronously
- int err = requestPriority(data->mPid, data->mTid, data->mPrio,
+ int err = requestPriority(data->mPid, data->mTid, data->mPrio, data->mForApp,
true /*asynchronous*/);
if (err != 0) {
ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
@@ -828,17 +701,29 @@
SetParameterConfigEventData *data = (SetParameterConfigEventData *)event->mData.get();
if (checkForNewParameter_l(data->mKeyValuePairs, event->mStatus)) {
configChanged = true;
+ mLocalLog.log("CFG_EVENT_SET_PARAMETER: (%s) configuration changed",
+ data->mKeyValuePairs.string());
}
} break;
case CFG_EVENT_CREATE_AUDIO_PATCH: {
+ const audio_devices_t oldDevice = getDevice();
CreateAudioPatchConfigEventData *data =
(CreateAudioPatchConfigEventData *)event->mData.get();
event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
+ const audio_devices_t newDevice = getDevice();
+ mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %#x (%s) new device %#x (%s)",
+ (unsigned)oldDevice, devicesToString(oldDevice).c_str(),
+ (unsigned)newDevice, devicesToString(newDevice).c_str());
} break;
case CFG_EVENT_RELEASE_AUDIO_PATCH: {
+ const audio_devices_t oldDevice = getDevice();
ReleaseAudioPatchConfigEventData *data =
(ReleaseAudioPatchConfigEventData *)event->mData.get();
event->mStatus = releaseAudioPatch_l(data->mHandle);
+ const audio_devices_t newDevice = getDevice();
+ mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %#x (%s) new device %#x (%s)",
+ (unsigned)oldDevice, devicesToString(oldDevice).c_str(),
+ (unsigned)newDevice, devicesToString(newDevice).c_str());
} break;
default:
ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
@@ -926,23 +811,24 @@
char buffer[SIZE];
String8 result;
+ dprintf(fd, "\n%s thread %p, name %s, tid %d, type %d (%s):\n", isOutput() ? "Output" : "Input",
+ this, mThreadName, getTid(), type(), threadTypeToString(type()));
+
bool locked = AudioFlinger::dumpTryLock(mLock);
if (!locked) {
- dprintf(fd, "thread %p may be deadlocked\n", this);
+ dprintf(fd, " Thread may be deadlocked\n");
}
- dprintf(fd, " Thread name: %s\n", mThreadName);
dprintf(fd, " I/O handle: %d\n", mId);
- dprintf(fd, " TID: %d\n", getTid());
dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no");
dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
- dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
+ dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat).c_str());
dprintf(fd, " HAL buffer size: %zu bytes\n", mBufferSize);
dprintf(fd, " Channel count: %u\n", mChannelCount);
dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
channelMaskToString(mChannelMask, mType != RECORD).string());
- dprintf(fd, " Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
+ dprintf(fd, " Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat).c_str());
dprintf(fd, " Processing frame size: %zu bytes\n", mFrameSize);
dprintf(fd, " Pending config events:");
size_t numConfig = mConfigEvents.size();
@@ -955,8 +841,9 @@
} else {
dprintf(fd, " none\n");
}
- dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).string());
- dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).string());
+ // Note: output device may be used by capture threads for effects such as AEC.
+ dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).c_str());
+ dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
if (locked) {
@@ -982,10 +869,10 @@
}
}
-void AudioFlinger::ThreadBase::acquireWakeLock(int uid)
+void AudioFlinger::ThreadBase::acquireWakeLock()
{
Mutex::Autolock _l(mLock);
- acquireWakeLock_l(uid);
+ acquireWakeLock_l();
}
String16 AudioFlinger::ThreadBase::getWakeLockTag()
@@ -1001,42 +888,31 @@
return String16("AudioIn");
case OFFLOAD:
return String16("AudioOffload");
+ case MMAP:
+ return String16("Mmap");
default:
ALOG_ASSERT(false);
return String16("AudioUnknown");
}
}
-void AudioFlinger::ThreadBase::acquireWakeLock_l(int uid)
+void AudioFlinger::ThreadBase::acquireWakeLock_l()
{
getPowerManager_l();
if (mPowerManager != 0) {
sp<IBinder> binder = new BBinder();
- status_t status;
- if (uid >= 0) {
- status = mPowerManager->acquireWakeLockWithUid(POWERMANAGER_PARTIAL_WAKE_LOCK,
- binder,
- getWakeLockTag(),
- String16("audioserver"),
- uid,
- true /* FIXME force oneway contrary to .aidl */);
- } else {
- status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+ // Uses AID_AUDIOSERVER for wakelock. updateWakeLockUids_l() updates with client uids.
+ status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
getWakeLockTag(),
String16("audioserver"),
true /* FIXME force oneway contrary to .aidl */);
- }
if (status == NO_ERROR) {
mWakeLockToken = binder;
}
ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
}
- if (!mNotifiedBatteryStart) {
- BatteryNotifier::getInstance().noteStartAudio();
- mNotifiedBatteryStart = true;
- }
gBoottime.acquire(mWakeLockToken);
mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
gBoottime.getBoottimeOffset();
@@ -1059,16 +935,6 @@
}
mWakeLockToken.clear();
}
-
- if (mNotifiedBatteryStart) {
- BatteryNotifier::getInstance().noteStopAudio();
- mNotifiedBatteryStart = false;
- }
-}
-
-void AudioFlinger::ThreadBase::updateWakeLockUids(const SortedVector<int> &uids) {
- Mutex::Autolock _l(mLock);
- updateWakeLockUids_l(uids);
}
void AudioFlinger::ThreadBase::getPowerManager_l() {
@@ -1085,8 +951,17 @@
}
}
-void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uids) {
+void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<uid_t> &uids) {
getPowerManager_l();
+
+#if !LOG_NDEBUG
+ std::stringstream s;
+ for (uid_t uid : uids) {
+ s << uid << " ";
+ }
+ ALOGD("updateWakeLockUids_l %s uids:%s", mThreadName, s.str().c_str());
+#endif
+
if (mWakeLockToken == NULL) { // token may be NULL if AudioFlinger::systemReady() not called.
if (mSystemReady) {
ALOGE("no wake lock to update, but system ready!");
@@ -1096,10 +971,10 @@
return;
}
if (mPowerManager != 0) {
- sp<IBinder> binder = new BBinder();
- status_t status;
- status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
- true /* FIXME force oneway contrary to .aidl */);
+ std::vector<int> uidsAsInt(uids.begin(), uids.end()); // powermanager expects uids as ints
+ status_t status = mPowerManager->updateWakeLockUids(
+ mWakeLockToken, uidsAsInt.size(), uidsAsInt.data(),
+ true /* FIXME force oneway contrary to .aidl */);
ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status);
}
}
@@ -1412,6 +1287,7 @@
bool chainCreated = false;
bool effectCreated = false;
bool effectRegistered = false;
+ audio_unique_id_t effectId = AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
lStatus = initCheck();
if (lStatus != NO_ERROR) {
@@ -1445,15 +1321,16 @@
ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
if (effect == 0) {
- audio_unique_id_t id = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
+ effectId = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
// Check CPU and memory usage
- lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
+ lStatus = AudioSystem::registerEffect(
+ desc, mId, chain->strategy(), sessionId, effectId);
if (lStatus != NO_ERROR) {
goto Exit;
}
effectRegistered = true;
// create a new effect module if none present in the chain
- lStatus = chain->createEffect_l(effect, this, desc, id, sessionId, pinned);
+ lStatus = chain->createEffect_l(effect, this, desc, effectId, sessionId, pinned);
if (lStatus != NO_ERROR) {
goto Exit;
}
@@ -1482,7 +1359,7 @@
chain->removeEffect_l(effect);
}
if (effectRegistered) {
- AudioSystem::unregisterEffect(effect->id());
+ AudioSystem::unregisterEffect(effectId);
}
if (chainCreated) {
removeEffectChain_l(chain);
@@ -1669,6 +1546,83 @@
mPendingConfigEvents.clear();
}
+template <typename T>
+ssize_t AudioFlinger::ThreadBase::ActiveTracks<T>::add(const sp<T> &track) {
+ ssize_t index = mActiveTracks.indexOf(track);
+ if (index >= 0) {
+ ALOGW("ActiveTracks<T>::add track %p already there", track.get());
+ return index;
+ }
+ mActiveTracksGeneration++;
+ mLatestActiveTrack = track;
+ ++mBatteryCounter[track->uid()].second;
+ return mActiveTracks.add(track);
+}
+
+template <typename T>
+ssize_t AudioFlinger::ThreadBase::ActiveTracks<T>::remove(const sp<T> &track) {
+ ssize_t index = mActiveTracks.remove(track);
+ if (index < 0) {
+ ALOGW("ActiveTracks<T>::remove nonexistent track %p", track.get());
+ return index;
+ }
+ mActiveTracksGeneration++;
+ --mBatteryCounter[track->uid()].second;
+ // mLatestActiveTrack is not cleared even if is the same as track.
+ return index;
+}
+
+template <typename T>
+void AudioFlinger::ThreadBase::ActiveTracks<T>::clear() {
+ for (const sp<T> &track : mActiveTracks) {
+ BatteryNotifier::getInstance().noteStopAudio(track->uid());
+ }
+ mLastActiveTracksGeneration = mActiveTracksGeneration;
+ mActiveTracks.clear();
+ mLatestActiveTrack.clear();
+ mBatteryCounter.clear();
+}
+
+template <typename T>
+void AudioFlinger::ThreadBase::ActiveTracks<T>::updatePowerState(
+ sp<ThreadBase> thread, bool force) {
+ // Updates ActiveTracks client uids to the thread wakelock.
+ if (mActiveTracksGeneration != mLastActiveTracksGeneration || force) {
+ thread->updateWakeLockUids_l(getWakeLockUids());
+ mLastActiveTracksGeneration = mActiveTracksGeneration;
+ }
+
+ // Updates BatteryNotifier uids
+ for (auto it = mBatteryCounter.begin(); it != mBatteryCounter.end();) {
+ const uid_t uid = it->first;
+ ssize_t &previous = it->second.first;
+ ssize_t ¤t = it->second.second;
+ if (current > 0) {
+ if (previous == 0) {
+ BatteryNotifier::getInstance().noteStartAudio(uid);
+ }
+ previous = current;
+ ++it;
+ } else if (current == 0) {
+ if (previous > 0) {
+ BatteryNotifier::getInstance().noteStopAudio(uid);
+ }
+ it = mBatteryCounter.erase(it); // std::map<> is stable on iterator erase.
+ } else /* (current < 0) */ {
+ LOG_ALWAYS_FATAL("negative battery count %zd", current);
+ }
+ }
+}
+
+void AudioFlinger::ThreadBase::broadcast_l()
+{
+ // Thread could be blocked waiting for async
+ // so signal it to handle state changes immediately
+ // If threadLoop is currently unlocked a signal of mWaitWorkCV will
+ // be lost so we also flag to prevent it blocking on mWaitWorkCV
+ mSignalPending = true;
+ mWaitWorkCV.broadcast();
+}
// ----------------------------------------------------------------------------
// Playback
@@ -1695,7 +1649,6 @@
mSuspended(0), mBytesWritten(0),
mFramesWritten(0),
mSuspendedFrames(0),
- mActiveTracksGeneration(0),
// mStreamTypes[] initialized in constructor body
mOutput(output),
mLastWriteTime(-1), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
@@ -1707,7 +1660,6 @@
mUseAsyncWrite(false),
mWriteAckSequence(0),
mDrainSequence(0),
- mSignalPending(false),
mScreenState(AudioFlinger::mScreenState),
// index 0 is reserved for normal mixer's submix
mFastTrackAvailMask(((1 << FastMixerState::sMaxFastTracks) - 1) & ~1),
@@ -1758,6 +1710,8 @@
dumpInternals(fd, args);
dumpTracks(fd, args);
dumpEffectChains(fd, args);
+ dprintf(fd, " Local log:\n");
+ mLocalLog.dump(fd, " " /* prefix */, 40 /* lines */);
}
void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused)
@@ -1814,8 +1768,8 @@
result.append(buffer);
Track::appendDumpHeader(result);
for (size_t i = 0; i < numactive; ++i) {
- sp<Track> track = mActiveTracks[i].promote();
- if (track != 0 && mTracks.indexOf(track) < 0) {
+ sp<Track> track = mActiveTracks[i];
+ if (mTracks.indexOf(track) < 0) {
track->dump(buffer, SIZE, true);
result.append(buffer);
}
@@ -1827,8 +1781,6 @@
void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
{
- dprintf(fd, "\nOutput thread %p type %d (%s):\n", this, type(), threadTypeToString(type()));
-
dumpBase(fd, args);
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
@@ -1845,8 +1797,8 @@
dprintf(fd, " Standby delay ns=%lld\n", (long long)mStandbyDelayNs);
AudioStreamOut *output = mOutput;
audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
- String8 flagsAsString = outputFlagsToString(flags);
- dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n", output, flags, flagsAsString.string());
+ dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n",
+ output, flags, outputFlagsToString(flags).c_str());
dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten);
dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames);
if (mPipeSink.get() != nullptr) {
@@ -1854,7 +1806,7 @@
}
if (output != nullptr) {
dprintf(fd, " Hal stream dump:\n");
- (void)output->stream->common.dump(&output->stream->common, fd);
+ (void)output->stream->dump(fd);
}
}
@@ -1871,7 +1823,8 @@
ALOGV(" preExit()");
// FIXME this is using hard-coded strings but in the future, this functionality will be
// converted to use audio HAL extensions required to support tunneling
- mOutput->stream->common.set_parameters(&mOutput->stream->common, "exiting=1");
+ status_t result = mOutput->stream->setParameters(String8("exiting=1"));
+ ALOGE_IF(result != OK, "Error when setting parameters on exit: %d", result);
}
// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
@@ -1886,8 +1839,9 @@
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t tid,
- int uid,
- status_t *status)
+ uid_t uid,
+ status_t *status,
+ audio_port_handle_t portId)
{
size_t frameCount = *pFrameCount;
sp<Track> track;
@@ -1978,7 +1932,12 @@
&& audio_has_proportional_frames(format) && sharedBuffer == 0) {
// this must match AudioTrack.cpp calculateMinFrameCount().
// TODO: Move to a common library
- uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
+ uint32_t latencyMs = 0;
+ lStatus = mOutput->stream->getLatency(&latencyMs);
+ if (lStatus != OK) {
+ ALOGE("Error when retrieving output stream latency: %d", lStatus);
+ goto Exit;
+ }
uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
if (minBufCount < 2) {
minBufCount = 2;
@@ -2064,7 +2023,7 @@
track = new Track(this, client, streamType, sampleRate, format,
channelMask, frameCount, NULL, sharedBuffer,
- sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
+ sessionId, uid, *flags, TrackBase::TYPE_DEFAULT, portId);
lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
if (lStatus != NO_ERROR) {
@@ -2086,7 +2045,7 @@
pid_t callingPid = IPCThreadState::self()->getCallingPid();
// we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
// so ask activity manager to do this on our behalf
- sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
+ sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*isForApp*/);
}
}
@@ -2109,11 +2068,11 @@
}
uint32_t AudioFlinger::PlaybackThread::latency_l() const
{
- if (initCheck() == NO_ERROR) {
- return correctLatency_l(mOutput->stream->get_latency(mOutput->stream));
- } else {
- return 0;
+ uint32_t latency;
+ if (initCheck() == NO_ERROR && mOutput->stream->getLatency(&latency) == OK) {
+ return correctLatency_l(latency);
}
+ return 0;
}
void AudioFlinger::PlaybackThread::setMasterVolume(float value)
@@ -2130,6 +2089,9 @@
void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
{
+ if (isDuplicating()) {
+ return;
+ }
Mutex::Autolock _l(mLock);
// Don't apply master mute in SW if our HAL can do it for us.
if (mOutput && mOutput->audioHwDev &&
@@ -2212,9 +2174,6 @@
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
mActiveTracks.add(track);
- mWakeLockUids.add(track->uid());
- mActiveTracksGeneration++;
- mLatestActiveTrack = track;
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
@@ -2222,6 +2181,10 @@
chain->incActiveTrackCnt();
}
+ char buffer[256];
+ track->dump(buffer, ARRAY_SIZE(buffer), false /* active */);
+ mLocalLog.log("addTrack_l (%p) %s", track.get(), buffer + 4); // log for analysis
+
status = NO_ERROR;
}
@@ -2247,6 +2210,11 @@
void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
{
track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+
+ char buffer[256];
+ track->dump(buffer, ARRAY_SIZE(buffer), false /* active */);
+ mLocalLog.log("removeTrack_l (%p) %s", track.get(), buffer + 4); // log for analysis
+
mTracks.remove(track);
deleteTrackName_l(track->name());
// redundant as track is about to be destroyed, for dumpsys only
@@ -2265,27 +2233,14 @@
}
}
-void AudioFlinger::PlaybackThread::broadcast_l()
-{
- // Thread could be blocked waiting for async
- // so signal it to handle state changes immediately
- // If threadLoop is currently unlocked a signal of mWaitWorkCV will
- // be lost so we also flag to prevent it blocking on mWaitWorkCV
- mSignalPending = true;
- mWaitWorkCV.broadcast();
-}
-
String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
{
Mutex::Autolock _l(mLock);
- if (initCheck() != NO_ERROR) {
- return String8();
+ String8 out_s8;
+ if (initCheck() == NO_ERROR && mOutput->stream->getParameters(keys, &out_s8) == OK) {
+ return out_s8;
}
-
- char *s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
- const String8 out_s8(s);
- free(s);
- return out_s8;
+ return String8();
}
void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
@@ -2314,21 +2269,18 @@
mAudioFlinger->ioConfigChanged(event, desc, pid);
}
-void AudioFlinger::PlaybackThread::writeCallback()
+void AudioFlinger::PlaybackThread::onWriteReady()
{
- ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->resetWriteBlocked();
}
-void AudioFlinger::PlaybackThread::drainCallback()
+void AudioFlinger::PlaybackThread::onDrainReady()
{
- ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->resetDraining();
}
-void AudioFlinger::PlaybackThread::errorCallback()
+void AudioFlinger::PlaybackThread::onError()
{
- ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->setAsyncError();
}
@@ -2352,30 +2304,6 @@
}
}
-// static
-int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event,
- void *param __unused,
- void *cookie)
-{
- AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie;
- ALOGV("asyncCallback() event %d", event);
- switch (event) {
- case STREAM_CBK_EVENT_WRITE_READY:
- me->writeCallback();
- break;
- case STREAM_CBK_EVENT_DRAIN_READY:
- me->drainCallback();
- break;
- case STREAM_CBK_EVENT_ERROR:
- me->errorCallback();
- break;
- default:
- ALOGW("asyncCallback() unknown event %d", event);
- break;
- }
- return 0;
-}
-
void AudioFlinger::PlaybackThread::readOutputParameters_l()
{
// unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
@@ -2392,7 +2320,8 @@
mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
// Get actual HAL format.
- mHALFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
+ status_t result = mOutput->stream->getFormat(&mHALFormat);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
// Get format from the shim, which will be different than the HAL format
// if playing compressed audio over HDMI passthrough.
mFormat = mOutput->getFormat();
@@ -2405,17 +2334,17 @@
mFormat);
}
mFrameSize = mOutput->getFrameSize();
- mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
+ result = mOutput->stream->getBufferSize(&mBufferSize);
+ LOG_ALWAYS_FATAL_IF(result != OK,
+ "Error when retrieving output stream buffer size: %d", result);
mFrameCount = mBufferSize / mFrameSize;
if (mFrameCount & 15) {
ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
mFrameCount);
}
- if ((mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) &&
- (mOutput->stream->set_callback != NULL)) {
- if (mOutput->stream->set_callback(mOutput->stream,
- AudioFlinger::PlaybackThread::asyncCallback, this) == 0) {
+ if (mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) {
+ if (mOutput->stream->setCallback(this) == OK) {
mUseAsyncWrite = true;
mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
}
@@ -2423,14 +2352,15 @@
mHwSupportsPause = false;
if (mOutput->flags & AUDIO_OUTPUT_FLAG_DIRECT) {
- if (mOutput->stream->pause != NULL) {
- if (mOutput->stream->resume != NULL) {
+ bool supportsPause = false, supportsResume = false;
+ if (mOutput->stream->supportsPauseAndResume(&supportsPause, &supportsResume) == OK) {
+ if (supportsPause && supportsResume) {
mHwSupportsPause = true;
- } else {
+ } else if (supportsPause) {
ALOGW("direct output implements pause but not resume");
+ } else if (supportsResume) {
+ ALOGW("direct output implements resume but not pause");
}
- } else if (mOutput->stream->resume != NULL) {
- ALOGW("direct output implements resume but not pause");
}
}
if (!mHwSupportsPause && mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
@@ -2620,12 +2550,12 @@
}
// this method must always be called either with ThreadBase mLock held or inside the thread loop
-audio_stream_t* AudioFlinger::PlaybackThread::stream() const
+sp<StreamHalInterface> AudioFlinger::PlaybackThread::stream() const
{
if (mOutput == NULL) {
return NULL;
}
- return &mOutput->stream->common;
+ return mOutput->stream;
}
uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const
@@ -2763,7 +2693,8 @@
void AudioFlinger::PlaybackThread::threadLoop_drain()
{
- if (mOutput->stream->drain) {
+ bool supportsDrain = false;
+ if (mOutput->stream->supportsDrain(&supportsDrain) == OK && supportsDrain) {
ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
if (mUseAsyncWrite) {
ALOGW_IF(mDrainSequence & 1, "threadLoop_drain(): out of sequence drain request");
@@ -2771,9 +2702,8 @@
ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->setDraining(mDrainSequence);
}
- mOutput->stream->drain(mOutput->stream,
- (mMixerStatus == MIXER_DRAIN_TRACK) ? AUDIO_DRAIN_EARLY_NOTIFY
- : AUDIO_DRAIN_ALL);
+ status_t result = mOutput->stream->drain(mMixerStatus == MIXER_DRAIN_TRACK);
+ ALOGE_IF(result != OK, "Error when draining stream: %d", result);
}
}
@@ -2785,6 +2715,11 @@
sp<Track> track = mTracks[i];
track->invalidate();
}
+ // Clear ActiveTracks to update BatteryNotifier in case active tracks remain.
+ // After we exit there are no more track changes sent to BatteryNotifier
+ // because that requires an active threadLoop.
+ // TODO: should we decActiveTrackCnt() of the cleared track effect chain?
+ mActiveTracks.clear();
}
}
@@ -2849,9 +2784,14 @@
status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
{
audio_session_t session = chain->sessionId();
- int16_t* buffer = reinterpret_cast<int16_t*>(mEffectBufferEnabled
- ? mEffectBuffer : mSinkBuffer);
- bool ownsBuffer = false;
+ sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
+ status_t result = EffectBufferHalInterface::mirror(
+ mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
+ mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
+ &halInBuffer);
+ if (result != OK) return result;
+ halOutBuffer = halInBuffer;
+ int16_t *buffer = reinterpret_cast<int16_t*>(halInBuffer->externalData());
ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
if (session > AUDIO_SESSION_OUTPUT_MIX) {
@@ -2859,10 +2799,13 @@
// the sink buffer as input
if (mType != DIRECT) {
size_t numSamples = mNormalFrameCount * mChannelCount;
- buffer = new int16_t[numSamples];
- memset(buffer, 0, numSamples * sizeof(int16_t));
- ALOGV("addEffectChain_l() creating new input buffer %p session %d", buffer, session);
- ownsBuffer = true;
+ status_t result = EffectBufferHalInterface::allocate(
+ numSamples * sizeof(int16_t),
+ &halInBuffer);
+ if (result != OK) return result;
+ buffer = halInBuffer->audioBuffer()->s16;
+ ALOGV("addEffectChain_l() creating new input buffer %p session %d",
+ buffer, session);
}
// Attach all tracks with same session ID to this chain.
@@ -2877,11 +2820,7 @@
}
// indicate all active tracks in the chain
- for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
- sp<Track> track = mActiveTracks[i].promote();
- if (track == 0) {
- continue;
- }
+ for (const sp<Track> &track : mActiveTracks) {
if (session == track->sessionId()) {
ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session);
chain->incActiveTrackCnt();
@@ -2889,9 +2828,8 @@
}
}
chain->setThread(this);
- chain->setInBuffer(buffer, ownsBuffer);
- chain->setOutBuffer(reinterpret_cast<int16_t*>(mEffectBufferEnabled
- ? mEffectBuffer : mSinkBuffer));
+ chain->setInBuffer(halInBuffer);
+ chain->setOutBuffer(halOutBuffer);
// Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
// chains list in order to be processed last as it contains output stage effects.
// Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
@@ -2928,11 +2866,7 @@
if (chain == mEffectChains[i]) {
mEffectChains.removeAt(i);
// detach all active tracks from the chain
- for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
- sp<Track> track = mActiveTracks[i].promote();
- if (track == 0) {
- continue;
- }
+ for (const sp<Track> &track : mActiveTracks) {
if (session == track->sessionId()) {
ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d",
chain.get(), session);
@@ -2996,6 +2930,8 @@
bool AudioFlinger::PlaybackThread::threadLoop()
{
+ logWriterTLS = mNBLogWriter.get();
+
Vector< sp<Track> > tracksToRemove;
mStandbyTimeNs = systemTime();
@@ -3009,8 +2945,6 @@
// FIXME could this be made local to while loop?
writeFrames = 0;
- int lastGeneration = 0;
-
cacheParameters_l();
mSleepTimeUs = mIdleSleepTimeUs;
@@ -3028,10 +2962,20 @@
// and then that string will be logged at the next convenient opportunity.
const char *logString = NULL;
- checkSilentMode_l();
+ // Estimated time for next buffer to be written to hal. This is used only on
+ // suspended mode (for now) to help schedule the wait time until next iteration.
+ nsecs_t timeLoopNextNs = 0;
+ checkSilentMode_l();
+#if 0
+ int z = 0; // used in logFormat example
+#endif
while (!exitPending())
{
+ // Log merge requests are performed during AudioFlinger binder transactions, but
+ // that does not cover audio playback. It's requested here for that reason.
+ mAudioFlinger->requestLogMerge();
+
cpuStats.sample(myName);
Vector< sp<EffectChain> > effectChains;
@@ -3108,10 +3052,9 @@
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastWriteTime == -1
? systemTime() : mLastWriteTime;
}
- const size_t size = mActiveTracks.size();
- for (size_t i = 0; i < size; ++i) {
- sp<Track> t = mActiveTracks[i].promote();
- if (t != 0 && !t->isFastTrack()) {
+
+ for (const sp<Track> &t : mActiveTracks) {
+ if (!t->isFastTrack()) {
t->updateTrackFrameInfo(
t->mAudioTrackServerProxy->framesReleased(),
mFramesWritten,
@@ -3119,7 +3062,17 @@
}
}
}
-
+#if 0
+ // logFormat example
+ if (z % 100 == 0) {
+ timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ LOGT("This is an integer %d, this is a float %f, this is my "
+ "pid %p %% %s %t", 42, 3.14, "and this is a timestamp", ts);
+ LOGT("A deceptive null-terminated string %\0");
+ }
+ ++z;
+#endif
saveOutputTracks();
if (mSignalPending) {
// A signal was raised while we were unlocked
@@ -3132,11 +3085,14 @@
if (!keepWakeLock()) {
releaseWakeLock_l();
released = true;
- mWakeLockUids.clear();
- mActiveTracksGeneration++;
}
- ALOGV("wait async completion");
- mWaitWorkCV.wait(mLock);
+
+ const int64_t waitNs = computeWaitTimeNs_l();
+ ALOGV("wait async completion (wait time: %lld)", (long long)waitNs);
+ status_t status = mWaitWorkCV.waitRelative(mLock, waitNs);
+ if (status == TIMED_OUT) {
+ mSignalPending = true; // if timeout recheck everything
+ }
ALOGV("async completion/wake");
if (released) {
acquireWakeLock_l();
@@ -3167,8 +3123,6 @@
}
releaseWakeLock_l();
- mWakeLockUids.clear();
- mActiveTracksGeneration++;
// wait until we have something to do...
ALOGV("%s going to sleep", myName.string());
mWaitWorkCV.wait(mLock);
@@ -3193,12 +3147,7 @@
// mMixerStatusIgnoringFastTracks is also updated internally
mMixerStatus = prepareTracks_l(&tracksToRemove);
- // compare with previously applied list
- if (lastGeneration != mActiveTracksGeneration) {
- // update wakelock
- updateWakeLockUids_l(mWakeLockUids);
- lastGeneration = mActiveTracksGeneration;
- }
+ mActiveTracks.updatePowerState(this);
// prevent any changes in effect chain list and in each effect chain
// during mixing and effect process as the audio buffers could be deleted
@@ -3382,6 +3331,29 @@
} else {
ATRACE_BEGIN("sleep");
Mutex::Autolock _l(mLock);
+ // suspended requires accurate metering of sleep time.
+ if (isSuspended()) {
+ // advance by expected sleepTime
+ timeLoopNextNs += microseconds((nsecs_t)mSleepTimeUs);
+ const nsecs_t nowNs = systemTime();
+
+ // compute expected next time vs current time.
+ // (negative deltas are treated as delays).
+ nsecs_t deltaNs = timeLoopNextNs - nowNs;
+ if (deltaNs < -kMaxNextBufferDelayNs) {
+ // Delays longer than the max allowed trigger a reset.
+ ALOGV("DelayNs: %lld, resetting timeLoopNextNs", (long long) deltaNs);
+ deltaNs = microseconds((nsecs_t)mSleepTimeUs);
+ timeLoopNextNs = nowNs + deltaNs;
+ } else if (deltaNs < 0) {
+ // Delays within the max delay allowed: zero the delta/sleepTime
+ // to help the system catch up in the next iteration(s)
+ ALOGV("DelayNs: %lld, catching-up", (long long) deltaNs);
+ deltaNs = 0;
+ }
+ // update sleep time (which is >= 0)
+ mSleepTimeUs = deltaNs / 1000;
+ }
if (!mSignalPending && mConfigEvents.isEmpty() && !exitPending()) {
mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)mSleepTimeUs));
}
@@ -3416,8 +3388,6 @@
}
releaseWakeLock();
- mWakeLockUids.clear();
- mActiveTracksGeneration++;
ALOGV("Thread %p type %d exiting", this, mType);
return false;
@@ -3431,8 +3401,6 @@
for (size_t i=0 ; i<count ; i++) {
const sp<Track>& track = tracksToRemove.itemAt(i);
mActiveTracks.remove(track);
- mWakeLockUids.remove(track->uid());
- mActiveTracksGeneration++;
ALOGV("removeTracks_l removing track on session %d", track->sessionId());
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
@@ -3442,6 +3410,10 @@
}
if (track->isTerminated()) {
removeTrack_l(track);
+ } else { // inactive but not terminated
+ char buffer[256];
+ track->dump(buffer, ARRAY_SIZE(buffer), false /* active */);
+ mLocalLog.log("removeTracks_l(%p) %s", track.get(), buffer + 4);
}
}
}
@@ -3458,11 +3430,9 @@
}
return status;
}
- if ((mType == OFFLOAD || mType == DIRECT)
- && mOutput != NULL && mOutput->stream->get_presentation_position) {
+ if ((mType == OFFLOAD || mType == DIRECT) && mOutput != NULL) {
uint64_t position64;
- int ret = mOutput->getPresentationPosition(&position64, ×tamp.mTime);
- if (ret == 0) {
+ if (mOutput->getPresentationPosition(&position64, ×tamp.mTime) == OK) {
timestamp.mPosition = (uint32_t)position64;
return NO_ERROR;
}
@@ -3529,14 +3499,13 @@
mOutDevice = type;
mPatch = *patch;
- if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
- status = hwDevice->create_audio_patch(hwDevice,
- patch->num_sources,
- patch->sources,
- patch->num_sinks,
- patch->sinks,
- handle);
+ if (mOutput->audioHwDev->supportsAudioPatches()) {
+ sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
+ status = hwDevice->createAudioPatch(patch->num_sources,
+ patch->sources,
+ patch->num_sinks,
+ patch->sinks,
+ handle);
} else {
char *address;
if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
@@ -3549,9 +3518,8 @@
}
AudioParameter param = AudioParameter(String8(address));
free(address);
- param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- param.toString().string());
+ param.addInt(String8(AudioParameter::keyRouting), (int)type);
+ status = mOutput->stream->setParameters(param.toString());
*handle = AUDIO_PATCH_HANDLE_NONE;
}
if (configChanged) {
@@ -3581,14 +3549,13 @@
mOutDevice = AUDIO_DEVICE_NONE;
- if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
- status = hwDevice->release_audio_patch(hwDevice, handle);
+ if (mOutput->audioHwDev->supportsAudioPatches()) {
+ sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
+ status = hwDevice->releaseAudioPatch(handle);
} else {
AudioParameter param;
- param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- param.toString().string());
+ param.addInt(String8(AudioParameter::keyRouting), 0);
+ status = mOutput->stream->setParameters(param.toString());
}
return status;
}
@@ -3662,9 +3629,17 @@
break;
case FastMixer_Static:
case FastMixer_Dynamic:
- initFastMixer = mFrameCount < mNormalFrameCount;
+ // FastMixer was designed to operate with a HAL that pulls at a regular rate,
+ // where the period is less than an experimentally determined threshold that can be
+ // scheduled reliably with CFS. However, the BT A2DP HAL is
+ // bursty (does not pull at a regular rate) and so cannot operate with FastMixer.
+ initFastMixer = mFrameCount < mNormalFrameCount
+ && (mOutDevice & AUDIO_DEVICE_OUT_ALL_A2DP) == 0;
break;
}
+ ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
+ "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
+ mFrameCount, mNormalFrameCount);
if (initFastMixer) {
audio_format_t fastMixerFormat;
if (mMixerBufferEnabled && mEffectBufferEnabled) {
@@ -3763,7 +3738,8 @@
// start the fast mixer
mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
pid_t tid = mFastMixer->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer, false);
+ stream()->setHalThreadPriority(kPriorityFastMixer);
#ifdef AUDIO_WATCHDOG
// create and start the watchdog
@@ -3893,6 +3869,15 @@
FastMixerStateQueue *sq = mFastMixer->sq();
FastMixerState *state = sq->begin();
if (!(state->mCommand & FastMixerState::IDLE)) {
+ // Report any frames trapped in the Monopipe
+ MonoPipe *monoPipe = (MonoPipe *)mPipeSink.get();
+ const long long pipeFrames = monoPipe->maxFrames() - monoPipe->availableToWrite();
+ mLocalLog.log("threadLoop_standby: framesWritten:%lld suspendedFrames:%lld "
+ "monoPipeWritten:%lld monoPipeLeft:%lld",
+ (long long)mFramesWritten, (long long)mSuspendedFrames,
+ (long long)mPipeSink->framesWritten(), pipeFrames);
+ mLocalLog.log("threadLoop_standby: %s", mTimestamp.toString().c_str());
+
state->mCommand = FastMixerState::COLD_IDLE;
state->mColdFutexAddr = &mFastMixerFutex;
state->mColdGen++;
@@ -4047,19 +4032,18 @@
FastMixerState *state = NULL;
bool didModify = false;
FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
+ bool coldIdle = false;
if (mFastMixer != 0) {
sq = mFastMixer->sq();
state = sq->begin();
+ coldIdle = state->mCommand == FastMixerState::COLD_IDLE;
}
mMixerBufferValid = false; // mMixerBuffer has no valid data until appropriate tracks found.
mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
for (size_t i=0 ; i<count ; i++) {
- const sp<Track> t = mActiveTracks[i].promote();
- if (t == 0) {
- continue;
- }
+ const sp<Track> t = mActiveTracks[i];
// this const just means the local variable doesn't change
Track* const track = t.get();
@@ -4154,8 +4138,11 @@
// We have consumed all the buffers of this track.
// This would be incomplete if we auto-paused on underrun
{
- size_t audioHALFrames =
- (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+ uint32_t latency = 0;
+ status_t result = mOutput->stream->getLatency(&latency);
+ ALOGE_IF(result != OK,
+ "Error when retrieving output stream latency: %d", result);
+ size_t audioHALFrames = (latency * mSampleRate) / 1000;
int64_t framesWritten = mBytesWritten / mFrameSize;
if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
// track stays in active list until presentation is complete
@@ -4194,7 +4181,11 @@
}
// cache the combined master volume and stream type volume for fast mixer; this
// lacks any synchronization or barrier so VolumeProvider may read a stale value
- track->mCachedVolume = masterVolume * mStreamTypes[track->streamType()].volume;
+ const float vh = track->getVolumeHandler()->getVolume(
+ track->mAudioTrackServerProxy->framesReleased()).first;
+ track->mCachedVolume = masterVolume
+ * mStreamTypes[track->streamType()].volume
+ * vh;
++fastTracks;
} else {
// was it previously active?
@@ -4323,7 +4314,7 @@
// read original volumes with volume control
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = masterVolume * typeVolume;
- AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+ sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
@@ -4336,9 +4327,11 @@
ALOGV("Track right volume out of range: %.3g", vrf);
vrf = GAIN_FLOAT_UNITY;
}
- // now apply the master volume and stream type volume
- vlf *= v;
- vrf *= v;
+ const float vh = track->getVolumeHandler()->getVolume(
+ track->mAudioTrackServerProxy->framesReleased()).first;
+ // now apply the master volume and stream type volume and shaper volume
+ vlf *= v * vh;
+ vrf *= v * vh;
// assuming master volume and stream type volume each go up to 1.0,
// then derive vl and vr as U8.24 versions for the effect chain
const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT;
@@ -4539,7 +4532,15 @@
}
if (sq != NULL) {
sq->end(didModify);
- sq->push(block);
+ // No need to block if the FastMixer is in COLD_IDLE as the FastThread
+ // is not active. (We BLOCK_UNTIL_ACKED when entering COLD_IDLE
+ // when bringing the output sink into standby.)
+ //
+ // We will get the latest FastMixer state when we come out of COLD_IDLE.
+ //
+ // This occurs with BT suspend when we idle the FastMixer with
+ // active tracks, which may be added or removed.
+ sq->push(coldIdle ? FastMixerStateQueue::BLOCK_NEVER : block);
}
#ifdef AUDIO_WATCHDOG
if (pauseAudioWatchdog && mAudioWatchdog != 0) {
@@ -4552,11 +4553,7 @@
size_t i = __builtin_ctz(resetMask);
ALOG_ASSERT(i < count);
resetMask &= ~(1 << i);
- sp<Track> t = mActiveTracks[i].promote();
- if (t == 0) {
- continue;
- }
- Track* track = t.get();
+ sp<Track> track = mActiveTracks[i];
ALOG_ASSERT(track->isFastTrack() && track->isStopped());
track->reset();
}
@@ -4608,7 +4605,7 @@
{
uint32_t trackCount = 0;
for (size_t i = 0; i < mTracks.size() ; i++) {
- if (mTracks[i]->uid() == (int)uid) {
+ if (mTracks[i]->uid() == uid) {
trackCount++;
}
}
@@ -4711,14 +4708,12 @@
}
if (status == NO_ERROR) {
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- keyValuePair.string());
+ status = mOutput->stream->setParameters(keyValuePair);
if (!mStandby && status == INVALID_OPERATION) {
mOutput->standby();
mStandby = true;
mBytesWritten = 0;
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- keyValuePair.string());
+ status = mOutput->stream->setParameters(keyValuePair);
}
if (status == NO_ERROR && reconfig) {
readOutputParameters_l();
@@ -4747,34 +4742,42 @@
dprintf(fd, " AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
- // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
- // while we are dumping it. It may be inconsistent, but it won't mutate!
- // This is a large object so we place it on the heap.
- // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
- const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
- copy->dump(fd);
- delete copy;
+ if (hasFastMixer()) {
+ dprintf(fd, " FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
+
+ // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
+ // while we are dumping it. It may be inconsistent, but it won't mutate!
+ // This is a large object so we place it on the heap.
+ // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+ const FastMixerDumpState *copy = new FastMixerDumpState(mFastMixerDumpState);
+ copy->dump(fd);
+ delete copy;
#ifdef STATE_QUEUE_DUMP
- // Similar for state queue
- StateQueueObserverDump observerCopy = mStateQueueObserverDump;
- observerCopy.dump(fd);
- StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
- mutatorCopy.dump(fd);
+ // Similar for state queue
+ StateQueueObserverDump observerCopy = mStateQueueObserverDump;
+ observerCopy.dump(fd);
+ StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
+ mutatorCopy.dump(fd);
#endif
+#ifdef AUDIO_WATCHDOG
+ if (mAudioWatchdog != 0) {
+ // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
+ AudioWatchdogDump wdCopy = mAudioWatchdogDump;
+ wdCopy.dump(fd);
+ }
+#endif
+
+ } else {
+ dprintf(fd, " No FastMixer\n");
+ }
+
#ifdef TEE_SINK
// Write the tee output to a .wav file
dumpTee(fd, mTeeSource, mId);
#endif
-#ifdef AUDIO_WATCHDOG
- if (mAudioWatchdog != 0) {
- // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
- AudioWatchdogDump wdCopy = mAudioWatchdogDump;
- wdCopy.dump(fd);
- }
-#endif
}
uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
@@ -4812,6 +4815,7 @@
ThreadBase::type_t type, bool systemReady)
: PlaybackThread(audioFlinger, output, id, device, type, systemReady)
// mLeftVolFloat, mRightVolFloat
+ , mVolumeShaperActive(false)
{
}
@@ -4828,7 +4832,15 @@
} else {
float typeVolume = mStreamTypes[track->streamType()].volume;
float v = mMasterVolume * typeVolume;
- AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+ sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
+
+ // Get volumeshaper scaling
+ std::pair<float /* volume */, bool /* active */>
+ vh = track->getVolumeHandler()->getVolume(
+ track->mAudioTrackServerProxy->framesReleased());
+ v *= vh.first;
+ mVolumeShaperActive = vh.second;
+
gain_minifloat_packed_t vlr = proxy->getVolumeLR();
left = float_from_gain(gain_minifloat_unpack_left(vlr));
if (left > GAIN_FLOAT_UNITY) {
@@ -4859,9 +4871,8 @@
left = (float)vl / (1 << 24);
right = (float)vr / (1 << 24);
}
- if (mOutput->stream->set_volume) {
- mOutput->stream->set_volume(mOutput->stream, left, right);
- }
+ status_t result = mOutput->stream->setVolume(left, right);
+ ALOGE_IF(result != OK, "Error when setting output stream volume: %d", result);
}
}
}
@@ -4869,7 +4880,7 @@
void AudioFlinger::DirectOutputThread::onAddNewTrack_l()
{
sp<Track> previousTrack = mPreviousTrack.promote();
- sp<Track> latestTrack = mLatestActiveTrack.promote();
+ sp<Track> latestTrack = mActiveTracks.getLatest();
if (previousTrack != 0 && latestTrack != 0) {
if (mType == DIRECT) {
@@ -4895,13 +4906,7 @@
bool doHwResume = false;
// find out which tracks need to be processed
- for (size_t i = 0; i < count; i++) {
- sp<Track> t = mActiveTracks[i].promote();
- // The track died recently
- if (t == 0) {
- continue;
- }
-
+ for (const sp<Track> &t : mActiveTracks) {
if (t->isInvalid()) {
ALOGW("An invalidated track shouldn't be in active list");
tracksToRemove->add(t);
@@ -4916,7 +4921,7 @@
// In theory an older track could underrun and restart after the new one starts
// but as we only care about the transition phase between two tracks on a
// direct output, it is not a problem to ignore the underrun case.
- sp<Track> l = mLatestActiveTrack.promote();
+ sp<Track> l = mActiveTracks.getLatest();
bool last = l.get() == track;
if (track->isPausing()) {
@@ -5071,13 +5076,15 @@
// if resume is received before pause is executed.
if (mHwSupportsPause && !mStandby &&
(doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
- mOutput->stream->pause(mOutput->stream);
+ status_t result = mOutput->stream->pause();
+ ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
}
if (mFlushPending) {
flushHw_l();
}
if (mHwSupportsPause && !mStandby && doHwResume) {
- mOutput->stream->resume(mOutput->stream);
+ status_t result = mOutput->stream->resume();
+ ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
}
// remove all the tracks that need to be...
removeTracks_l(*tracksToRemove);
@@ -5218,14 +5225,12 @@
}
}
if (status == NO_ERROR) {
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- keyValuePair.string());
+ status = mOutput->stream->setParameters(keyValuePair);
if (!mStandby && status == INVALID_OPERATION) {
mOutput->standby();
mStandby = true;
mBytesWritten = 0;
- status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
- keyValuePair.string());
+ status = mOutput->stream->setParameters(keyValuePair);
}
if (status == NO_ERROR && reconfig) {
readOutputParameters_l();
@@ -5292,6 +5297,13 @@
mFlushPending = false;
}
+int64_t AudioFlinger::DirectOutputThread::computeWaitTimeNs_l() const {
+ // If a VolumeShaper is active, we must wake up periodically to update volume.
+ const int64_t NS_PER_MS = 1000000;
+ return mVolumeShaperActive ?
+ kMinNormalSinkBufferSizeMs * NS_PER_MS : PlaybackThread::computeWaitTimeNs_l();
+}
+
// ----------------------------------------------------------------------------
AudioFlinger::AsyncCallbackThread::AsyncCallbackThread(
@@ -5450,12 +5462,7 @@
ALOGV("OffloadThread::prepareTracks_l active tracks %zu", count);
// find out which tracks need to be processed
- for (size_t i = 0; i < count; i++) {
- sp<Track> t = mActiveTracks[i].promote();
- // The track died recently
- if (t == 0) {
- continue;
- }
+ for (const sp<Track> &t : mActiveTracks) {
Track* const track = t.get();
#ifdef VERY_VERY_VERBOSE_LOGGING
audio_track_cblk_t* cblk = track->cblk();
@@ -5464,7 +5471,7 @@
// In theory an older track could underrun and restart after the new one starts
// but as we only care about the transition phase between two tracks on a
// direct output, it is not a problem to ignore the underrun case.
- sp<Track> l = mLatestActiveTrack.promote();
+ sp<Track> l = mActiveTracks.getLatest();
bool last = l.get() == track;
if (track->isInvalid()) {
@@ -5612,8 +5619,11 @@
// Drain has completed or we are in standby, signal presentation complete
if (!(mDrainSequence & 1) || !last || mStandby) {
track->mState = TrackBase::STOPPED;
- size_t audioHALFrames =
- (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+ uint32_t latency = 0;
+ status_t result = mOutput->stream->getLatency(&latency);
+ ALOGE_IF(result != OK,
+ "Error when retrieving output stream latency: %d", result);
+ size_t audioHALFrames = (latency * mSampleRate) / 1000;
int64_t framesWritten =
mBytesWritten / mOutput->getFrameSize();
track->presentationComplete(framesWritten, audioHALFrames);
@@ -5625,16 +5635,15 @@
// fill a buffer, then remove it from active list.
if (--(track->mRetryCount) <= 0) {
bool running = false;
- if (mOutput->stream->get_presentation_position != nullptr) {
- uint64_t position = 0;
- struct timespec unused;
- // The running check restarts the retry counter at least once.
- int ret = mOutput->stream->get_presentation_position(
- mOutput->stream, &position, &unused);
- if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
- running = true;
- mOffloadUnderrunPosition = position;
- }
+ uint64_t position = 0;
+ struct timespec unused;
+ // The running check restarts the retry counter at least once.
+ status_t ret = mOutput->stream->getPresentationPosition(&position, &unused);
+ if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
+ running = true;
+ mOffloadUnderrunPosition = position;
+ }
+ if (ret == NO_ERROR) {
ALOGVV("underrun counter, running(%d): %lld vs %lld", running,
(long long)position, (long long)mOffloadUnderrunPosition);
}
@@ -5644,7 +5653,7 @@
ALOGV("OffloadThread: BUFFER TIMEOUT: remove(%d) from active list",
track->name());
tracksToRemove->add(track);
- // indicate to client process that the track was disabled because of underrun;
+ // tell client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
track->disable();
}
@@ -5662,13 +5671,15 @@
// before flush and then resume HW. This can happen in case of pause/flush/resume
// if resume is received before pause is executed.
if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
- mOutput->stream->pause(mOutput->stream);
+ status_t result = mOutput->stream->pause();
+ ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
}
if (mFlushPending) {
flushHw_l();
}
if (!mStandby && doHwResume) {
- mOutput->stream->resume(mOutput->stream);
+ status_t result = mOutput->stream->resume();
+ ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
}
// remove all the tracks that need to be...
@@ -5904,6 +5915,7 @@
MixerThread::cacheParameters_l();
}
+
// ----------------------------------------------------------------------------
// Record
// ----------------------------------------------------------------------------
@@ -5919,8 +5931,8 @@
#endif
) :
ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
- mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL),
- // mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l()
+ mInput(input), mRsmpInBuffer(NULL),
+ // mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
mRsmpInRear(0)
#ifdef TEE_SINK
, mTeeSink(teeSink)
@@ -5972,7 +5984,8 @@
if (initFastCapture) {
// create a Pipe for FastCapture to write to, and for us and fast tracks to read from
NBAIO_Format format = mInputSource->format();
- size_t pipeFramesP2 = roundup(mSampleRate / 25); // double-buffering of 20 ms each
+ // quadruple-buffering of 20 ms each; this ensures we can sleep for 20ms in RecordThread
+ size_t pipeFramesP2 = roundup(4 * FMS_20 * mSampleRate / 1000);
size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
void *pipeBuffer;
const sp<MemoryDealer> roHeap(readOnlyHeap());
@@ -6029,7 +6042,8 @@
// start the fast capture
mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
pid_t tid = mFastCapture->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture);
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture, false);
+ stream()->setHalThreadPriority(kPriorityFastCapture);
#ifdef AUDIO_WATCHDOG
// FIXME
#endif
@@ -6068,6 +6082,18 @@
run(mThreadName, PRIORITY_URGENT_AUDIO);
}
+void AudioFlinger::RecordThread::preExit()
+{
+ ALOGV(" preExit()");
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mTracks.size(); i++) {
+ sp<RecordTrack> track = mTracks[i];
+ track->invalidate();
+ }
+ mActiveTracks.clear();
+ mStartStopCond.broadcast();
+}
+
bool AudioFlinger::RecordThread::threadLoop()
{
nsecs_t lastWarning = 0;
@@ -6076,25 +6102,9 @@
reacquire_wakelock:
sp<RecordTrack> activeTrack;
- int activeTracksGen;
{
Mutex::Autolock _l(mLock);
- size_t size = mActiveTracks.size();
- activeTracksGen = mActiveTracksGen;
- if (size > 0) {
- // FIXME an arbitrary choice
- activeTrack = mActiveTracks[0];
- acquireWakeLock_l(activeTrack->uid());
- if (size > 1) {
- SortedVector<int> tmp;
- for (size_t i = 0; i < size; i++) {
- tmp.add(mActiveTracks[i]->uid());
- }
- updateWakeLockUids_l(tmp);
- }
- } else {
- acquireWakeLock_l(-1);
- }
+ acquireWakeLock_l();
}
// used to request a deferred sleep, to be executed later while mutex is unlocked
@@ -6146,15 +6156,6 @@
goto reacquire_wakelock;
}
- if (mActiveTracksGen != activeTracksGen) {
- activeTracksGen = mActiveTracksGen;
- SortedVector<int> tmp;
- for (size_t i = 0; i < size; i++) {
- tmp.add(mActiveTracks[i]->uid());
- }
- updateWakeLockUids_l(tmp);
- }
-
bool doBroadcast = false;
bool allStopped = true;
for (size_t i = 0; i < size; ) {
@@ -6167,7 +6168,6 @@
}
removeTrack_l(activeTrack);
mActiveTracks.remove(activeTrack);
- mActiveTracksGen++;
size--;
continue;
}
@@ -6177,7 +6177,6 @@
case TrackBase::PAUSING:
mActiveTracks.remove(activeTrack);
- mActiveTracksGen++;
doBroadcast = true;
size--;
continue;
@@ -6217,6 +6216,8 @@
}
}
+ mActiveTracks.updatePowerState(this);
+
if (allStopped) {
standbyIfNotAlreadyInStandby();
}
@@ -6302,20 +6303,41 @@
// If an NBAIO source is present, use it to read the normal capture's data
if (mPipeSource != 0) {
size_t framesToRead = mBufferSize / mFrameSize;
+ framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
framesToRead);
- if (framesRead == 0) {
- // since pipe is non-blocking, simulate blocking input
- sleepUs = (framesToRead * 1000000LL) / mSampleRate;
+ // since pipe is non-blocking, simulate blocking input by waiting for 1/2 of
+ // buffer size or at least for 20ms.
+ size_t sleepFrames = max(
+ min(mPipeFramesP2, mRsmpInFramesP2) / 2, FMS_20 * mSampleRate / 1000);
+ if (framesRead <= (ssize_t) sleepFrames) {
+ sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
+ }
+ if (framesRead < 0) {
+ status_t status = (status_t) framesRead;
+ switch (status) {
+ case OVERRUN:
+ ALOGW("overrun on read from pipe");
+ framesRead = 0;
+ break;
+ case NEGOTIATE:
+ ALOGE("re-negotiation is needed");
+ framesRead = -1; // Will cause an attempt to recover.
+ break;
+ default:
+ ALOGE("unknown error %d on read from pipe", status);
+ break;
+ }
}
// otherwise use the HAL / AudioStreamIn directly
} else {
ATRACE_BEGIN("read");
- ssize_t bytesRead = mInput->stream->read(mInput->stream,
- (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize);
+ size_t bytesRead;
+ status_t result = mInput->stream->read(
+ (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize, &bytesRead);
ATRACE_END();
- if (bytesRead < 0) {
- framesRead = bytesRead;
+ if (result < 0) {
+ framesRead = result;
} else {
framesRead = bytesRead / mFrameSize;
}
@@ -6327,10 +6349,9 @@
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
// Update server timestamp with kernel stats
- if (mInput->stream->get_capture_position != nullptr
- && mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
+ if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
int64_t position, time;
- int ret = mInput->stream->get_capture_position(mInput->stream, &position, &time);
+ int ret = mInput->stream->getCapturePosition(&position, &time);
if (ret == NO_ERROR) {
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
@@ -6495,7 +6516,6 @@
track->invalidate();
}
mActiveTracks.clear();
- mActiveTracksGen++;
mStartStopCond.broadcast();
}
@@ -6539,7 +6559,18 @@
sq->end(false /*didModify*/);
}
}
- mInput->stream->common.standby(&mInput->stream->common);
+ status_t result = mInput->stream->standby();
+ ALOGE_IF(result != OK, "Error when putting input stream into standby: %d", result);
+
+ // If going into standby, flush the pipe source.
+ if (mPipeSource.get() != nullptr) {
+ const ssize_t flushed = mPipeSource->flush();
+ if (flushed > 0) {
+ ALOGV("Input standby flushed PipeSource %zd frames", flushed);
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += flushed;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
+ }
+ }
}
// RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
@@ -6551,10 +6582,11 @@
size_t *pFrameCount,
audio_session_t sessionId,
size_t *notificationFrames,
- int uid,
+ uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
- status_t *status)
+ status_t *status,
+ audio_port_handle_t portId)
{
size_t frameCount = *pFrameCount;
sp<RecordTrack> track;
@@ -6662,7 +6694,7 @@
track = new RecordTrack(this, client, sampleRate,
format, channelMask, frameCount, NULL, sessionId, uid,
- *flags, TrackBase::TYPE_DEFAULT);
+ *flags, TrackBase::TYPE_DEFAULT, portId);
lStatus = track->initCheck();
if (lStatus != NO_ERROR) {
@@ -6682,7 +6714,7 @@
pid_t callingPid = IPCThreadState::self()->getCallingPid();
// we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
// so ask activity manager to do this on our behalf
- sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
+ sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true);
}
}
@@ -6738,7 +6770,6 @@
// or using a separate command thread
recordTrack->mState = TrackBase::STARTING_1;
mActiveTracks.add(recordTrack);
- mActiveTracksGen++;
status_t status = NO_ERROR;
if (recordTrack->isExternalTrack()) {
mLock.unlock();
@@ -6747,7 +6778,6 @@
// FIXME should verify that recordTrack is still in mActiveTracks
if (status != NO_ERROR) {
mActiveTracks.remove(recordTrack);
- mActiveTracksGen++;
recordTrack->clearSyncStartEvent();
ALOGV("RecordThread::start error %d", status);
return status;
@@ -6797,7 +6827,7 @@
bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
ALOGV("RecordThread::stop");
AutoMutex _l(mLock);
- if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) {
+ if (mActiveTracks.indexOf(recordTrack) < 0 || recordTrack->mState == TrackBase::PAUSING) {
return false;
}
// note that threadLoop may still be processing the track at this point [without lock]
@@ -6811,7 +6841,7 @@
// FIXME incorrect usage of wait: no explicit predicate or loop
mStartStopCond.wait(mLock);
// if we have been restarted, recordTrack is in mActiveTracks here
- if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) {
+ if (exitPending() || mActiveTracks.indexOf(recordTrack) < 0) {
ALOGV("Record stopped OK");
return true;
}
@@ -6878,10 +6908,12 @@
void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args)
{
- dprintf(fd, "\nInput thread %p:\n", this);
-
dumpBase(fd, args);
+ AudioStreamIn *input = mInput;
+ audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
+ dprintf(fd, " AudioStreamIn: %p flags %#x (%s)\n",
+ input, flags, inputFlagsToString(flags).c_str());
if (mActiveTracks.size() == 0) {
dprintf(fd, " No active record clients\n");
}
@@ -7040,252 +7072,6 @@
buffer->frameCount = 0;
}
-AudioFlinger::RecordThread::RecordBufferConverter::RecordBufferConverter(
- audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
- uint32_t srcSampleRate,
- audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
- uint32_t dstSampleRate) :
- mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
- // mSrcFormat
- // mSrcSampleRate
- // mDstChannelMask
- // mDstFormat
- // mDstSampleRate
- // mSrcChannelCount
- // mDstChannelCount
- // mDstFrameSize
- mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
- mResampler(NULL),
- mIsLegacyDownmix(false),
- mIsLegacyUpmix(false),
- mRequiresFloat(false),
- mInputConverterProvider(NULL)
-{
- (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
- dstChannelMask, dstFormat, dstSampleRate);
-}
-
-AudioFlinger::RecordThread::RecordBufferConverter::~RecordBufferConverter() {
- free(mBuf);
- delete mResampler;
- delete mInputConverterProvider;
-}
-
-size_t AudioFlinger::RecordThread::RecordBufferConverter::convert(void *dst,
- AudioBufferProvider *provider, size_t frames)
-{
- if (mInputConverterProvider != NULL) {
- mInputConverterProvider->setBufferProvider(provider);
- provider = mInputConverterProvider;
- }
-
- if (mResampler == NULL) {
- ALOGVV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
- mSrcSampleRate, mSrcFormat, mDstFormat);
-
- AudioBufferProvider::Buffer buffer;
- for (size_t i = frames; i > 0; ) {
- buffer.frameCount = i;
- status_t status = provider->getNextBuffer(&buffer);
- if (status != OK || buffer.frameCount == 0) {
- frames -= i; // cannot fill request.
- break;
- }
- // format convert to destination buffer
- convertNoResampler(dst, buffer.raw, buffer.frameCount);
-
- dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
- i -= buffer.frameCount;
- provider->releaseBuffer(&buffer);
- }
- } else {
- ALOGVV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
- mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
-
- // reallocate buffer if needed
- if (mBufFrameSize != 0 && mBufFrames < frames) {
- free(mBuf);
- mBufFrames = frames;
- (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
- }
- // resampler accumulates, but we only have one source track
- memset(mBuf, 0, frames * mBufFrameSize);
- frames = mResampler->resample((int32_t*)mBuf, frames, provider);
- // format convert to destination buffer
- convertResampler(dst, mBuf, frames);
- }
- return frames;
-}
-
-status_t AudioFlinger::RecordThread::RecordBufferConverter::updateParameters(
- audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
- uint32_t srcSampleRate,
- audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
- uint32_t dstSampleRate)
-{
- // quick evaluation if there is any change.
- if (mSrcFormat == srcFormat
- && mSrcChannelMask == srcChannelMask
- && mSrcSampleRate == srcSampleRate
- && mDstFormat == dstFormat
- && mDstChannelMask == dstChannelMask
- && mDstSampleRate == dstSampleRate) {
- return NO_ERROR;
- }
-
- ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
- " srcFormat:%#x dstFormat:%#x srcRate:%u dstRate:%u",
- srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
- const bool valid =
- audio_is_input_channel(srcChannelMask)
- && audio_is_input_channel(dstChannelMask)
- && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
- && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
- && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
- ; // no upsampling checks for now
- if (!valid) {
- return BAD_VALUE;
- }
-
- mSrcFormat = srcFormat;
- mSrcChannelMask = srcChannelMask;
- mSrcSampleRate = srcSampleRate;
- mDstFormat = dstFormat;
- mDstChannelMask = dstChannelMask;
- mDstSampleRate = dstSampleRate;
-
- // compute derived parameters
- mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
- mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
- mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
-
- // do we need to resample?
- delete mResampler;
- mResampler = NULL;
- if (mSrcSampleRate != mDstSampleRate) {
- mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
- mSrcChannelCount, mDstSampleRate);
- mResampler->setSampleRate(mSrcSampleRate);
- mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
- }
-
- // are we running legacy channel conversion modes?
- mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
- || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
- && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
- mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
- && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
- || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
-
- // do we need to process in float?
- mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
-
- // do we need a staging buffer to convert for destination (we can still optimize this)?
- // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
- if (mResampler != NULL) {
- mBufFrameSize = max(mSrcChannelCount, FCC_2)
- * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
- } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
- mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
- } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
- mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
- } else {
- mBufFrameSize = 0;
- }
- mBufFrames = 0; // force the buffer to be resized.
-
- // do we need an input converter buffer provider to give us float?
- delete mInputConverterProvider;
- mInputConverterProvider = NULL;
- if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
- mInputConverterProvider = new ReformatBufferProvider(
- audio_channel_count_from_in_mask(mSrcChannelMask),
- mSrcFormat,
- AUDIO_FORMAT_PCM_FLOAT,
- 256 /* provider buffer frame count */);
- }
-
- // do we need a remixer to do channel mask conversion
- if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
- (void) memcpy_by_index_array_initialization_from_channel_mask(
- mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
- }
- return NO_ERROR;
-}
-
-void AudioFlinger::RecordThread::RecordBufferConverter::convertNoResampler(
- void *dst, const void *src, size_t frames)
-{
- // src is native type unless there is legacy upmix or downmix, whereupon it is float.
- if (mBufFrameSize != 0 && mBufFrames < frames) {
- free(mBuf);
- mBufFrames = frames;
- (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
- }
- // do we need to do legacy upmix and downmix?
- if (mIsLegacyUpmix || mIsLegacyDownmix) {
- void *dstBuf = mBuf != NULL ? mBuf : dst;
- if (mIsLegacyUpmix) {
- upmix_to_stereo_float_from_mono_float((float *)dstBuf,
- (const float *)src, frames);
- } else /*mIsLegacyDownmix */ {
- downmix_to_mono_float_from_stereo_float((float *)dstBuf,
- (const float *)src, frames);
- }
- if (mBuf != NULL) {
- memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
- frames * mDstChannelCount);
- }
- return;
- }
- // do we need to do channel mask conversion?
- if (mSrcChannelMask != mDstChannelMask) {
- void *dstBuf = mBuf != NULL ? mBuf : dst;
- memcpy_by_index_array(dstBuf, mDstChannelCount,
- src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
- if (dstBuf == dst) {
- return; // format is the same
- }
- }
- // convert to destination buffer
- const void *convertBuf = mBuf != NULL ? mBuf : src;
- memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
- frames * mDstChannelCount);
-}
-
-void AudioFlinger::RecordThread::RecordBufferConverter::convertResampler(
- void *dst, /*not-a-const*/ void *src, size_t frames)
-{
- // src buffer format is ALWAYS float when entering this routine
- if (mIsLegacyUpmix) {
- ; // mono to stereo already handled by resampler
- } else if (mIsLegacyDownmix
- || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
- // the resampler outputs stereo for mono input channel (a feature?)
- // must convert to mono
- downmix_to_mono_float_from_stereo_float((float *)src,
- (const float *)src, frames);
- } else if (mSrcChannelMask != mDstChannelMask) {
- // convert to mono channel again for channel mask conversion (could be skipped
- // with further optimization).
- if (mSrcChannelCount == 1) {
- downmix_to_mono_float_from_stereo_float((float *)src,
- (const float *)src, frames);
- }
- // convert to destination format (in place, OK as float is larger than other types)
- if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
- memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
- frames * mSrcChannelCount);
- }
- // channel convert and save to dst
- memcpy_by_index_array(dst, mDstChannelCount,
- src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
- return;
- }
- // convert to destination format and save to dst
- memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
- frames * mDstChannelCount);
-}
bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair,
status_t& status)
@@ -7382,22 +7168,22 @@
}
if (status == NO_ERROR) {
- status = mInput->stream->common.set_parameters(&mInput->stream->common,
- keyValuePair.string());
+ status = mInput->stream->setParameters(keyValuePair);
if (status == INVALID_OPERATION) {
inputStandBy();
- status = mInput->stream->common.set_parameters(&mInput->stream->common,
- keyValuePair.string());
+ status = mInput->stream->setParameters(keyValuePair);
}
if (reconfig) {
- if (status == BAD_VALUE &&
- audio_is_linear_pcm(mInput->stream->common.get_format(&mInput->stream->common)) &&
- audio_is_linear_pcm(reqFormat) &&
- (mInput->stream->common.get_sample_rate(&mInput->stream->common)
- <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate)) &&
- audio_channel_count_from_in_mask(
- mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_8) {
- status = NO_ERROR;
+ if (status == BAD_VALUE) {
+ uint32_t sRate;
+ audio_channel_mask_t channelMask;
+ audio_format_t format;
+ if (mInput->stream->getAudioProperties(&sRate, &channelMask, &format) == OK &&
+ audio_is_linear_pcm(format) && audio_is_linear_pcm(reqFormat) &&
+ sRate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
+ audio_channel_count_from_in_mask(channelMask) <= FCC_8) {
+ status = NO_ERROR;
+ }
}
if (status == NO_ERROR) {
readInputParameters_l();
@@ -7412,14 +7198,13 @@
String8 AudioFlinger::RecordThread::getParameters(const String8& keys)
{
Mutex::Autolock _l(mLock);
- if (initCheck() != NO_ERROR) {
- return String8();
+ if (initCheck() == NO_ERROR) {
+ String8 out_s8;
+ if (mInput->stream->getParameters(keys, &out_s8) == OK) {
+ return out_s8;
+ }
}
-
- char *s = mInput->stream->common.get_parameters(&mInput->stream->common, keys.string());
- const String8 out_s8(s);
- free(s);
- return out_s8;
+ return String8();
}
void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
@@ -7448,19 +7233,16 @@
void AudioFlinger::RecordThread::readInputParameters_l()
{
- mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
- mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
+ status_t result = mInput->stream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
- if (mChannelCount > FCC_8) {
- ALOGE("HAL channel count %d > %d", mChannelCount, FCC_8);
- }
- mHALFormat = mInput->stream->common.get_format(&mInput->stream->common);
+ LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_8, "HAL channel count %d > %d", mChannelCount, FCC_8);
mFormat = mHALFormat;
- if (!audio_is_linear_pcm(mFormat)) {
- ALOGE("HAL format %#x is not linear pcm", mFormat);
- }
- mFrameSize = audio_stream_in_frame_size(mInput->stream);
- mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
+ LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
+ result = mInput->stream->getFrameSize(&mFrameSize);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
+ result = mInput->stream->getBufferSize(&mBufferSize);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
mFrameCount = mBufferSize / mFrameSize;
// This is the formula for calculating the temporary buffer size.
// With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
@@ -7483,9 +7265,10 @@
// The current value is higher than necessary. However it should not add to latency.
// Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
- size_t bufferSize = (mRsmpInFramesP2 + mFrameCount - 1) * mFrameSize;
- (void)posix_memalign(&mRsmpInBuffer, 32, bufferSize);
- memset(mRsmpInBuffer, 0, bufferSize); // if posix_memalign fails, will segv here.
+ mRsmpInFramesOA = mRsmpInFramesP2 + mFrameCount - 1;
+ (void)posix_memalign(&mRsmpInBuffer, 32, mRsmpInFramesOA * mFrameSize);
+ // if posix_memalign fails, will segv here.
+ memset(mRsmpInBuffer, 0, mRsmpInFramesOA * mFrameSize);
// AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
// But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
@@ -7494,11 +7277,11 @@
uint32_t AudioFlinger::RecordThread::getInputFramesLost()
{
Mutex::Autolock _l(mLock);
- if (initCheck() != NO_ERROR) {
- return 0;
+ uint32_t result;
+ if (initCheck() == NO_ERROR && mInput->stream->getInputFramesLost(&result) == OK) {
+ return result;
}
-
- return mInput->stream->get_input_frames_lost(mInput->stream);
+ return 0;
}
// hasAudioSession_l() must be called with ThreadBase::mLock held
@@ -7545,12 +7328,12 @@
}
// this method must always be called either with ThreadBase mLock held or inside the thread loop
-audio_stream_t* AudioFlinger::RecordThread::stream() const
+sp<StreamHalInterface> AudioFlinger::RecordThread::stream() const
{
if (mInput == NULL) {
return NULL;
}
- return &mInput->stream->common;
+ return mInput->stream;
}
status_t AudioFlinger::RecordThread::addEffectChain_l(const sp<EffectChain>& chain)
@@ -7620,14 +7403,13 @@
}
}
- if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
- status = hwDevice->create_audio_patch(hwDevice,
- patch->num_sources,
- patch->sources,
- patch->num_sinks,
- patch->sinks,
- handle);
+ if (mInput->audioHwDev->supportsAudioPatches()) {
+ sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
+ status = hwDevice->createAudioPatch(patch->num_sources,
+ patch->sources,
+ patch->num_sinks,
+ patch->sinks,
+ handle);
} else {
char *address;
if (strcmp(patch->sources[0].ext.device.address, "") != 0) {
@@ -7639,12 +7421,11 @@
}
AudioParameter param = AudioParameter(String8(address));
free(address);
- param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING),
+ param.addInt(String8(AudioParameter::keyRouting),
(int)patch->sources[0].ext.device.type);
- param.addInt(String8(AUDIO_PARAMETER_STREAM_INPUT_SOURCE),
+ param.addInt(String8(AudioParameter::keyInputSource),
(int)patch->sinks[0].ext.mix.usecase.source);
- status = mInput->stream->common.set_parameters(&mInput->stream->common,
- param.toString().string());
+ status = mInput->stream->setParameters(param.toString());
*handle = AUDIO_PATCH_HANDLE_NONE;
}
@@ -7662,14 +7443,13 @@
mInDevice = AUDIO_DEVICE_NONE;
- if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
- audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
- status = hwDevice->release_audio_patch(hwDevice, handle);
+ if (mInput->audioHwDev->supportsAudioPatches()) {
+ sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
+ status = hwDevice->releaseAudioPatch(handle);
} else {
AudioParameter param;
- param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
- status = mInput->stream->common.set_parameters(&mInput->stream->common,
- param.toString().string());
+ param.addInt(String8(AudioParameter::keyRouting), 0);
+ status = mInput->stream->setParameters(param.toString());
}
return status;
}
@@ -7694,4 +7474,919 @@
config->ext.mix.usecase.source = mAudioSource;
}
+// ----------------------------------------------------------------------------
+// Mmap
+// ----------------------------------------------------------------------------
+
+AudioFlinger::MmapThreadHandle::MmapThreadHandle(const sp<MmapThread>& thread)
+ : mThread(thread)
+{
+}
+
+AudioFlinger::MmapThreadHandle::~MmapThreadHandle()
+{
+ MmapThread *thread = mThread.get();
+ // clear our strong reference before disconnecting the thread: the last strong reference
+ // will be removed when closeInput/closeOutput is executed upon call from audio policy manager
+ // and the thread removed from mMMapThreads list causing the thread destruction.
+ mThread.clear();
+ if (thread != nullptr) {
+ thread->disconnect();
+ }
+}
+
+status_t AudioFlinger::MmapThreadHandle::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info)
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->createMmapBuffer(minSizeFrames, info);
+}
+
+status_t AudioFlinger::MmapThreadHandle::getMmapPosition(struct audio_mmap_position *position)
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->getMmapPosition(position);
+}
+
+status_t AudioFlinger::MmapThreadHandle::start(const MmapStreamInterface::Client& client,
+ audio_port_handle_t *handle)
+
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->start(client, handle);
+}
+
+status_t AudioFlinger::MmapThreadHandle::stop(audio_port_handle_t handle)
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->stop(handle);
+}
+
+status_t AudioFlinger::MmapThreadHandle::standby()
+{
+ if (mThread == 0) {
+ return NO_INIT;
+ }
+ return mThread->standby();
+}
+
+
+AudioFlinger::MmapThread::MmapThread(
+ const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, sp<StreamHalInterface> stream,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady)
+ : ThreadBase(audioFlinger, id, outDevice, inDevice, MMAP, systemReady),
+ mHalStream(stream), mHalDevice(hwDev->hwDevice()), mAudioHwDev(hwDev)
+{
+ mStandby = true;
+ readHalParameters_l();
+}
+
+AudioFlinger::MmapThread::~MmapThread()
+{
+ releaseWakeLock_l();
+}
+
+void AudioFlinger::MmapThread::onFirstRef()
+{
+ run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
+}
+
+void AudioFlinger::MmapThread::disconnect()
+{
+ for (const sp<MmapTrack> &t : mActiveTracks) {
+ stop(t->portId());
+ }
+ // this will cause the destruction of this thread.
+ if (isOutput()) {
+ AudioSystem::releaseOutput(mId, streamType(), mSessionId);
+ } else {
+ AudioSystem::releaseInput(mId, mSessionId);
+ }
+}
+
+
+void AudioFlinger::MmapThread::configure(const audio_attributes_t *attr,
+ audio_stream_type_t streamType __unused,
+ audio_session_t sessionId,
+ const sp<MmapStreamCallback>& callback,
+ audio_port_handle_t portId)
+{
+ mAttr = *attr;
+ mSessionId = sessionId;
+ mCallback = callback;
+ mPortId = portId;
+}
+
+status_t AudioFlinger::MmapThread::createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info)
+{
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+ mStandby = true;
+ acquireWakeLock();
+ return mHalStream->createMmapBuffer(minSizeFrames, info);
+}
+
+status_t AudioFlinger::MmapThread::getMmapPosition(struct audio_mmap_position *position)
+{
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+ return mHalStream->getMmapPosition(position);
+}
+
+status_t AudioFlinger::MmapThread::start(const MmapStreamInterface::Client& client,
+ audio_port_handle_t *handle)
+{
+ ALOGV("%s clientUid %d mStandby %d", __FUNCTION__, client.clientUid, mStandby);
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+
+ status_t ret;
+ audio_session_t sessionId;
+ audio_port_handle_t portId;
+
+ if (mActiveTracks.size() == 0) {
+ // for the first track, reuse portId and session allocated when the stream was opened
+ ret = mHalStream->start();
+ if (ret != NO_ERROR) {
+ ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
+ return ret;
+ }
+ portId = mPortId;
+ sessionId = mSessionId;
+ mStandby = false;
+ } else {
+ // for other tracks than first one, get a new port ID from APM.
+ sessionId = (audio_session_t)mAudioFlinger->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ audio_io_handle_t io;
+ if (isOutput()) {
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ config.sample_rate = mSampleRate;
+ config.channel_mask = mChannelMask;
+ config.format = mFormat;
+ audio_stream_type_t stream = streamType();
+ audio_output_flags_t flags =
+ (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
+ ret = AudioSystem::getOutputForAttr(&mAttr, &io,
+ sessionId,
+ &stream,
+ client.clientUid,
+ &config,
+ flags,
+ AUDIO_PORT_HANDLE_NONE,
+ &portId);
+ } else {
+ audio_config_base_t config;
+ config.sample_rate = mSampleRate;
+ config.channel_mask = mChannelMask;
+ config.format = mFormat;
+ ret = AudioSystem::getInputForAttr(&mAttr, &io,
+ sessionId,
+ client.clientPid,
+ client.clientUid,
+ &config,
+ AUDIO_INPUT_FLAG_MMAP_NOIRQ,
+ AUDIO_PORT_HANDLE_NONE,
+ &portId);
+ }
+ // APM should not chose a different input or output stream for the same set of attributes
+ // and audo configuration
+ if (ret != NO_ERROR || io != mId) {
+ ALOGE("%s: error getting output or input from APM (error %d, io %d expected io %d)",
+ __FUNCTION__, ret, io, mId);
+ return BAD_VALUE;
+ }
+ }
+
+ if (isOutput()) {
+ ret = AudioSystem::startOutput(mId, streamType(), sessionId);
+ } else {
+ ret = AudioSystem::startInput(mId, sessionId);
+ }
+
+ // abort if start is rejected by audio policy manager
+ if (ret != NO_ERROR) {
+ ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
+ if (mActiveTracks.size() != 0) {
+ if (isOutput()) {
+ AudioSystem::releaseOutput(mId, streamType(), sessionId);
+ } else {
+ AudioSystem::releaseInput(mId, sessionId);
+ }
+ } else {
+ mHalStream->stop();
+ }
+ return PERMISSION_DENIED;
+ }
+
+ sp<MmapTrack> track = new MmapTrack(this, mSampleRate, mFormat, mChannelMask, sessionId,
+ client.clientUid, portId);
+
+ mActiveTracks.add(track);
+ sp<EffectChain> chain = getEffectChain_l(sessionId);
+ if (chain != 0) {
+ chain->setStrategy(AudioSystem::getStrategyForStream(streamType()));
+ chain->incTrackCnt();
+ chain->incActiveTrackCnt();
+ }
+
+ *handle = portId;
+
+ broadcast_l();
+
+ ALOGV("%s DONE handle %d stream %p", __FUNCTION__, portId, mHalStream.get());
+
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::MmapThread::stop(audio_port_handle_t handle)
+{
+ ALOGV("%s handle %d", __FUNCTION__, handle);
+
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+
+ sp<MmapTrack> track;
+ for (const sp<MmapTrack> &t : mActiveTracks) {
+ if (handle == t->portId()) {
+ track = t;
+ break;
+ }
+ }
+ if (track == 0) {
+ return BAD_VALUE;
+ }
+
+ mActiveTracks.remove(track);
+
+ if (isOutput()) {
+ AudioSystem::stopOutput(mId, streamType(), track->sessionId());
+ if (mActiveTracks.size() != 0) {
+ AudioSystem::releaseOutput(mId, streamType(), track->sessionId());
+ }
+ } else {
+ AudioSystem::stopInput(mId, track->sessionId());
+ if (mActiveTracks.size() != 0) {
+ AudioSystem::releaseInput(mId, track->sessionId());
+ }
+ }
+
+ sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+ if (chain != 0) {
+ chain->decActiveTrackCnt();
+ chain->decTrackCnt();
+ }
+
+ broadcast_l();
+
+ if (mActiveTracks.size() == 0) {
+ mHalStream->stop();
+ }
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::MmapThread::standby()
+{
+ ALOGV("%s", __FUNCTION__);
+
+ if (mHalStream == 0) {
+ return NO_INIT;
+ }
+ if (mActiveTracks.size() != 0) {
+ return INVALID_OPERATION;
+ }
+ mHalStream->standby();
+ mStandby = true;
+ releaseWakeLock();
+ return NO_ERROR;
+}
+
+
+void AudioFlinger::MmapThread::readHalParameters_l()
+{
+ status_t result = mHalStream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
+ mFormat = mHALFormat;
+ LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
+ result = mHalStream->getFrameSize(&mFrameSize);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
+ result = mHalStream->getBufferSize(&mBufferSize);
+ LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
+ mFrameCount = mBufferSize / mFrameSize;
+}
+
+bool AudioFlinger::MmapThread::threadLoop()
+{
+ checkSilentMode_l();
+
+ const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
+
+ while (!exitPending())
+ {
+ Mutex::Autolock _l(mLock);
+ Vector< sp<EffectChain> > effectChains;
+
+ if (mSignalPending) {
+ // A signal was raised while we were unlocked
+ mSignalPending = false;
+ } else {
+ if (mConfigEvents.isEmpty()) {
+ // we're about to wait, flush the binder command buffer
+ IPCThreadState::self()->flushCommands();
+
+ if (exitPending()) {
+ break;
+ }
+
+ // wait until we have something to do...
+ ALOGV("%s going to sleep", myName.string());
+ mWaitWorkCV.wait(mLock);
+ ALOGV("%s waking up", myName.string());
+
+ checkSilentMode_l();
+
+ continue;
+ }
+ }
+
+ processConfigEvents_l();
+
+ processVolume_l();
+
+ checkInvalidTracks_l();
+
+ mActiveTracks.updatePowerState(this);
+
+ lockEffectChains_l(effectChains);
+ for (size_t i = 0; i < effectChains.size(); i ++) {
+ effectChains[i]->process_l();
+ }
+ // enable changes in effect chain
+ unlockEffectChains(effectChains);
+ // Effect chains will be actually deleted here if they were removed from
+ // mEffectChains list during mixing or effects processing
+ }
+
+ threadLoop_exit();
+
+ if (!mStandby) {
+ threadLoop_standby();
+ mStandby = true;
+ }
+
+ ALOGV("Thread %p type %d exiting", this, mType);
+ return false;
+}
+
+// checkForNewParameter_l() must be called with ThreadBase::mLock held
+bool AudioFlinger::MmapThread::checkForNewParameter_l(const String8& keyValuePair,
+ status_t& status)
+{
+ AudioParameter param = AudioParameter(keyValuePair);
+ int value;
+ if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+ // forward device change to effects that have requested to be
+ // aware of attached audio device.
+ if (value != AUDIO_DEVICE_NONE) {
+ mOutDevice = value;
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setDevice_l(mOutDevice);
+ }
+ }
+ }
+ status = mHalStream->setParameters(keyValuePair);
+
+ return false;
+}
+
+String8 AudioFlinger::MmapThread::getParameters(const String8& keys)
+{
+ Mutex::Autolock _l(mLock);
+ String8 out_s8;
+ if (initCheck() == NO_ERROR && mHalStream->getParameters(keys, &out_s8) == OK) {
+ return out_s8;
+ }
+ return String8();
+}
+
+void AudioFlinger::MmapThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
+ sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
+
+ desc->mIoHandle = mId;
+
+ switch (event) {
+ case AUDIO_INPUT_OPENED:
+ case AUDIO_INPUT_CONFIG_CHANGED:
+ case AUDIO_OUTPUT_OPENED:
+ case AUDIO_OUTPUT_CONFIG_CHANGED:
+ desc->mPatch = mPatch;
+ desc->mChannelMask = mChannelMask;
+ desc->mSamplingRate = mSampleRate;
+ desc->mFormat = mFormat;
+ desc->mFrameCount = mFrameCount;
+ desc->mFrameCountHAL = mFrameCount;
+ desc->mLatency = 0;
+ break;
+
+ case AUDIO_INPUT_CLOSED:
+ case AUDIO_OUTPUT_CLOSED:
+ default:
+ break;
+ }
+ mAudioFlinger->ioConfigChanged(event, desc, pid);
+}
+
+status_t AudioFlinger::MmapThread::createAudioPatch_l(const struct audio_patch *patch,
+ audio_patch_handle_t *handle)
+{
+ status_t status = NO_ERROR;
+
+ // store new device and send to effects
+ audio_devices_t type = AUDIO_DEVICE_NONE;
+ audio_port_handle_t deviceId;
+ if (isOutput()) {
+ for (unsigned int i = 0; i < patch->num_sinks; i++) {
+ type |= patch->sinks[i].ext.device.type;
+ }
+ deviceId = patch->sinks[0].id;
+ } else {
+ type = patch->sources[0].ext.device.type;
+ deviceId = patch->sources[0].id;
+ }
+
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setDevice_l(type);
+ }
+
+ if (isOutput()) {
+ mOutDevice = type;
+ } else {
+ mInDevice = type;
+ // store new source and send to effects
+ if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
+ mAudioSource = patch->sinks[0].ext.mix.usecase.source;
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ mEffectChains[i]->setAudioSource_l(mAudioSource);
+ }
+ }
+ }
+
+ if (mAudioHwDev->supportsAudioPatches()) {
+ status = mHalDevice->createAudioPatch(patch->num_sources,
+ patch->sources,
+ patch->num_sinks,
+ patch->sinks,
+ handle);
+ } else {
+ char *address;
+ if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
+ //FIXME: we only support address on first sink with HAL version < 3.0
+ address = audio_device_address_to_parameter(
+ patch->sinks[0].ext.device.type,
+ patch->sinks[0].ext.device.address);
+ } else {
+ address = (char *)calloc(1, 1);
+ }
+ AudioParameter param = AudioParameter(String8(address));
+ free(address);
+ param.addInt(String8(AudioParameter::keyRouting), (int)type);
+ if (!isOutput()) {
+ param.addInt(String8(AudioParameter::keyInputSource),
+ (int)patch->sinks[0].ext.mix.usecase.source);
+ }
+ status = mHalStream->setParameters(param.toString());
+ *handle = AUDIO_PATCH_HANDLE_NONE;
+ }
+
+ if (isOutput() && mPrevOutDevice != mOutDevice) {
+ mPrevOutDevice = type;
+ sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
+ sp<MmapStreamCallback> callback = mCallback.promote();
+ if (callback != 0) {
+ callback->onRoutingChanged(deviceId);
+ }
+ }
+ if (!isOutput() && mPrevInDevice != mInDevice) {
+ mPrevInDevice = type;
+ sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
+ sp<MmapStreamCallback> callback = mCallback.promote();
+ if (callback != 0) {
+ callback->onRoutingChanged(deviceId);
+ }
+ }
+ return status;
+}
+
+status_t AudioFlinger::MmapThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
+{
+ status_t status = NO_ERROR;
+
+ mInDevice = AUDIO_DEVICE_NONE;
+
+ bool supportsAudioPatches = mHalDevice->supportsAudioPatches(&supportsAudioPatches) == OK ?
+ supportsAudioPatches : false;
+
+ if (supportsAudioPatches) {
+ status = mHalDevice->releaseAudioPatch(handle);
+ } else {
+ AudioParameter param;
+ param.addInt(String8(AudioParameter::keyRouting), 0);
+ status = mHalStream->setParameters(param.toString());
+ }
+ return status;
+}
+
+void AudioFlinger::MmapThread::getAudioPortConfig(struct audio_port_config *config)
+{
+ ThreadBase::getAudioPortConfig(config);
+ if (isOutput()) {
+ config->role = AUDIO_PORT_ROLE_SOURCE;
+ config->ext.mix.hw_module = mAudioHwDev->handle();
+ config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+ } else {
+ config->role = AUDIO_PORT_ROLE_SINK;
+ config->ext.mix.hw_module = mAudioHwDev->handle();
+ config->ext.mix.usecase.source = mAudioSource;
+ }
+}
+
+status_t AudioFlinger::MmapThread::addEffectChain_l(const sp<EffectChain>& chain)
+{
+ audio_session_t session = chain->sessionId();
+
+ ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
+ // Attach all tracks with same session ID to this chain.
+ // indicate all active tracks in the chain
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (session == track->sessionId()) {
+ chain->incTrackCnt();
+ chain->incActiveTrackCnt();
+ }
+ }
+
+ chain->setThread(this);
+ chain->setInBuffer(nullptr);
+ chain->setOutBuffer(nullptr);
+ chain->syncHalEffectsState();
+
+ mEffectChains.add(chain);
+ checkSuspendOnAddEffectChain_l(chain);
+ return NO_ERROR;
+}
+
+size_t AudioFlinger::MmapThread::removeEffectChain_l(const sp<EffectChain>& chain)
+{
+ audio_session_t session = chain->sessionId();
+
+ ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
+
+ for (size_t i = 0; i < mEffectChains.size(); i++) {
+ if (chain == mEffectChains[i]) {
+ mEffectChains.removeAt(i);
+ // detach all active tracks from the chain
+ // detach all tracks with same session ID from this chain
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (session == track->sessionId()) {
+ chain->decActiveTrackCnt();
+ chain->decTrackCnt();
+ }
+ }
+ break;
+ }
+ }
+ return mEffectChains.size();
+}
+
+// hasAudioSession_l() must be called with ThreadBase::mLock held
+uint32_t AudioFlinger::MmapThread::hasAudioSession_l(audio_session_t sessionId) const
+{
+ uint32_t result = 0;
+ if (getEffectChain_l(sessionId) != 0) {
+ result = EFFECT_SESSION;
+ }
+
+ for (size_t i = 0; i < mActiveTracks.size(); i++) {
+ sp<MmapTrack> track = mActiveTracks[i];
+ if (sessionId == track->sessionId()) {
+ result |= TRACK_SESSION;
+ if (track->isFastTrack()) {
+ result |= FAST_SESSION;
+ }
+ break;
+ }
+ }
+
+ return result;
+}
+
+void AudioFlinger::MmapThread::threadLoop_standby()
+{
+ mHalStream->standby();
+}
+
+void AudioFlinger::MmapThread::threadLoop_exit()
+{
+ sp<MmapStreamCallback> callback = mCallback.promote();
+ if (callback != 0) {
+ callback->onTearDown();
+ }
+}
+
+status_t AudioFlinger::MmapThread::setSyncEvent(const sp<SyncEvent>& event __unused)
+{
+ return BAD_VALUE;
+}
+
+bool AudioFlinger::MmapThread::isValidSyncEvent(const sp<SyncEvent>& event __unused) const
+{
+ return false;
+}
+
+status_t AudioFlinger::MmapThread::checkEffectCompatibility_l(
+ const effect_descriptor_t *desc, audio_session_t sessionId)
+{
+ // No global effect sessions on mmap threads
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX || sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+ ALOGW("checkEffectCompatibility_l(): global effect %s on record thread %s",
+ desc->name, mThreadName);
+ return BAD_VALUE;
+ }
+
+ if (!isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC)) {
+ ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on capture mmap thread",
+ desc->name);
+ return BAD_VALUE;
+ }
+ if (isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
+ ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback mmap "
+ "thread", desc->name);
+ return BAD_VALUE;
+ }
+
+ // Only allow effects without processing load or latency
+ if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) != EFFECT_FLAG_NO_PROCESS) {
+ return BAD_VALUE;
+ }
+
+ return NO_ERROR;
+
+}
+
+void AudioFlinger::MmapThread::checkInvalidTracks_l()
+{
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ if (track->isInvalid()) {
+ sp<MmapStreamCallback> callback = mCallback.promote();
+ if (callback != 0) {
+ callback->onTearDown();
+ }
+ break;
+ }
+ }
+}
+
+void AudioFlinger::MmapThread::dump(int fd, const Vector<String16>& args)
+{
+ dumpInternals(fd, args);
+ dumpTracks(fd, args);
+ dumpEffectChains(fd, args);
+}
+
+void AudioFlinger::MmapThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ dumpBase(fd, args);
+
+ dprintf(fd, " Attributes: content type %d usage %d source %d\n",
+ mAttr.content_type, mAttr.usage, mAttr.source);
+ dprintf(fd, " Session: %d port Id: %d\n", mSessionId, mPortId);
+ if (mActiveTracks.size() == 0) {
+ dprintf(fd, " No active clients\n");
+ }
+}
+
+void AudioFlinger::MmapThread::dumpTracks(int fd, const Vector<String16>& args __unused)
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ size_t numtracks = mActiveTracks.size();
+ dprintf(fd, " %zu Tracks", numtracks);
+ if (numtracks) {
+ MmapTrack::appendDumpHeader(result);
+ for (size_t i = 0; i < numtracks ; ++i) {
+ sp<MmapTrack> track = mActiveTracks[i];
+ track->dump(buffer, SIZE);
+ result.append(buffer);
+ }
+ } else {
+ dprintf(fd, "\n");
+ }
+ write(fd, result.string(), result.size());
+}
+
+AudioFlinger::MmapPlaybackThread::MmapPlaybackThread(
+ const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, AudioStreamOut *output,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady)
+ : MmapThread(audioFlinger, id, hwDev, output->stream, outDevice, inDevice, systemReady),
+ mStreamType(AUDIO_STREAM_MUSIC),
+ mStreamVolume(1.0), mStreamMute(false), mOutput(output)
+{
+ snprintf(mThreadName, kThreadNameLength, "AudioMmapOut_%X", id);
+ mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
+ mMasterVolume = audioFlinger->masterVolume_l();
+ mMasterMute = audioFlinger->masterMute_l();
+ if (mAudioHwDev) {
+ if (mAudioHwDev->canSetMasterVolume()) {
+ mMasterVolume = 1.0;
+ }
+
+ if (mAudioHwDev->canSetMasterMute()) {
+ mMasterMute = false;
+ }
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::configure(const audio_attributes_t *attr,
+ audio_stream_type_t streamType,
+ audio_session_t sessionId,
+ const sp<MmapStreamCallback>& callback,
+ audio_port_handle_t portId)
+{
+ MmapThread::configure(attr, streamType, sessionId, callback, portId);
+ mStreamType = streamType;
+}
+
+AudioStreamOut* AudioFlinger::MmapPlaybackThread::clearOutput()
+{
+ Mutex::Autolock _l(mLock);
+ AudioStreamOut *output = mOutput;
+ mOutput = NULL;
+ return output;
+}
+
+void AudioFlinger::MmapPlaybackThread::setMasterVolume(float value)
+{
+ Mutex::Autolock _l(mLock);
+ // Don't apply master volume in SW if our HAL can do it for us.
+ if (mAudioHwDev &&
+ mAudioHwDev->canSetMasterVolume()) {
+ mMasterVolume = 1.0;
+ } else {
+ mMasterVolume = value;
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::setMasterMute(bool muted)
+{
+ Mutex::Autolock _l(mLock);
+ // Don't apply master mute in SW if our HAL can do it for us.
+ if (mAudioHwDev && mAudioHwDev->canSetMasterMute()) {
+ mMasterMute = false;
+ } else {
+ mMasterMute = muted;
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
+{
+ Mutex::Autolock _l(mLock);
+ if (stream == mStreamType) {
+ mStreamVolume = value;
+ broadcast_l();
+ }
+}
+
+float AudioFlinger::MmapPlaybackThread::streamVolume(audio_stream_type_t stream) const
+{
+ Mutex::Autolock _l(mLock);
+ if (stream == mStreamType) {
+ return mStreamVolume;
+ }
+ return 0.0f;
+}
+
+void AudioFlinger::MmapPlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
+{
+ Mutex::Autolock _l(mLock);
+ if (stream == mStreamType) {
+ mStreamMute= muted;
+ broadcast_l();
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::invalidateTracks(audio_stream_type_t streamType)
+{
+ Mutex::Autolock _l(mLock);
+ if (streamType == mStreamType) {
+ for (const sp<MmapTrack> &track : mActiveTracks) {
+ track->invalidate();
+ }
+ broadcast_l();
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::processVolume_l()
+{
+ float volume;
+
+ if (mMasterMute || mStreamMute) {
+ volume = 0;
+ } else {
+ volume = mMasterVolume * mStreamVolume;
+ }
+
+ if (volume != mHalVolFloat) {
+ mHalVolFloat = volume;
+
+ // Convert volumes from float to 8.24
+ uint32_t vol = (uint32_t)(volume * (1 << 24));
+
+ // Delegate volume control to effect in track effect chain if needed
+ // only one effect chain can be present on DirectOutputThread, so if
+ // there is one, the track is connected to it
+ if (!mEffectChains.isEmpty()) {
+ mEffectChains[0]->setVolume_l(&vol, &vol);
+ volume = (float)vol / (1 << 24);
+ }
+ // Try to use HW volume control and fall back to SW control if not implemented
+ if (mOutput->stream->setVolume(volume, volume) != NO_ERROR) {
+ sp<MmapStreamCallback> callback = mCallback.promote();
+ if (callback != 0) {
+ int channelCount;
+ if (isOutput()) {
+ channelCount = audio_channel_count_from_out_mask(mChannelMask);
+ } else {
+ channelCount = audio_channel_count_from_in_mask(mChannelMask);
+ }
+ Vector<float> values;
+ for (int i = 0; i < channelCount; i++) {
+ values.add(volume);
+ }
+ callback->onVolumeChanged(mChannelMask, values);
+ } else {
+ ALOGW("Could not set MMAP stream volume: no volume callback!");
+ }
+ }
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::checkSilentMode_l()
+{
+ if (!mMasterMute) {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("ro.audio.silent", value, "0") > 0) {
+ char *endptr;
+ unsigned long ul = strtoul(value, &endptr, 0);
+ if (*endptr == '\0' && ul != 0) {
+ ALOGD("Silence is golden");
+ // The setprop command will not allow a property to be changed after
+ // the first time it is set, so we don't have to worry about un-muting.
+ setMasterMute_l(true);
+ }
+ }
+ }
+}
+
+void AudioFlinger::MmapPlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ MmapThread::dumpInternals(fd, args);
+
+ dprintf(fd, " Stream type: %d Stream volume: %f HAL volume: %f Stream mute %d\n",
+ mStreamType, mStreamVolume, mHalVolFloat, mStreamMute);
+ dprintf(fd, " Master volume: %f Master mute %d\n", mMasterVolume, mMasterMute);
+}
+
+AudioFlinger::MmapCaptureThread::MmapCaptureThread(
+ const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, AudioStreamIn *input,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady)
+ : MmapThread(audioFlinger, id, hwDev, input->stream, outDevice, inDevice, systemReady),
+ mInput(input)
+{
+ snprintf(mThreadName, kThreadNameLength, "AudioMmapIn_%X", id);
+ mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+}
+
+AudioFlinger::AudioStreamIn* AudioFlinger::MmapCaptureThread::clearInput()
+{
+ Mutex::Autolock _l(mLock);
+ AudioStreamIn *input = mInput;
+ mInput = NULL;
+ return input;
+}
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index b3b4cf2..80b368e 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -29,7 +29,9 @@
DIRECT, // Thread class is DirectOutputThread
DUPLICATING, // Thread class is DuplicatingThread
RECORD, // Thread class is RecordThread
- OFFLOAD // Thread class is OffloadThread
+ OFFLOAD, // Thread class is OffloadThread
+ MMAP // control thread for MMAP stream
+ // If you add any values here, also update ThreadBase::threadTypeToString()
};
static const char *threadTypeToString(type_t type);
@@ -126,23 +128,25 @@
class PrioConfigEventData : public ConfigEventData {
public:
- PrioConfigEventData(pid_t pid, pid_t tid, int32_t prio) :
- mPid(pid), mTid(tid), mPrio(prio) {}
+ PrioConfigEventData(pid_t pid, pid_t tid, int32_t prio, bool forApp) :
+ mPid(pid), mTid(tid), mPrio(prio), mForApp(forApp) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d\n", mPid, mTid, mPrio);
+ snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d, for app? %d\n",
+ mPid, mTid, mPrio, mForApp);
}
const pid_t mPid;
const pid_t mTid;
const int32_t mPrio;
+ const bool mForApp;
};
class PrioConfigEvent : public ConfigEvent {
public:
- PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) :
+ PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp) :
ConfigEvent(CFG_EVENT_PRIO, true) {
- mData = new PrioConfigEventData(pid, tid, prio);
+ mData = new PrioConfigEventData(pid, tid, prio, forApp);
}
virtual ~PrioConfigEvent() {}
};
@@ -267,8 +271,8 @@
status_t sendConfigEvent_l(sp<ConfigEvent>& event);
void sendIoConfigEvent(audio_io_config_event event, pid_t pid = 0);
void sendIoConfigEvent_l(audio_io_config_event event, pid_t pid = 0);
- void sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio);
- void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
+ void sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp);
+ void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio, bool forApp);
status_t sendSetParameterConfigEvent_l(const String8& keyValuePair);
status_t sendCreateAudioPatchConfigEvent(const struct audio_patch *patch,
audio_patch_handle_t *handle);
@@ -285,8 +289,11 @@
bool standby() const { return mStandby; }
audio_devices_t outDevice() const { return mOutDevice; }
audio_devices_t inDevice() const { return mInDevice; }
+ audio_devices_t getDevice() const { return isOutput() ? mOutDevice : mInDevice; }
- virtual audio_stream_t* stream() const = 0;
+ virtual bool isOutput() const = 0;
+
+ virtual sp<StreamHalInterface> stream() const = 0;
sp<EffectHandle> createEffect_l(
const sp<AudioFlinger::Client>& client,
@@ -390,6 +397,8 @@
virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
audio_session_t sessionId) = 0;
+ void broadcast_l();
+
mutable Mutex mLock;
protected:
@@ -403,12 +412,11 @@
effect_uuid_t mType; // effect type UUID
};
- void acquireWakeLock(int uid = -1);
- virtual void acquireWakeLock_l(int uid = -1);
+ void acquireWakeLock();
+ virtual void acquireWakeLock_l();
void releaseWakeLock();
void releaseWakeLock_l();
- void updateWakeLockUids(const SortedVector<int> &uids);
- void updateWakeLockUids_l(const SortedVector<int> &uids);
+ void updateWakeLockUids_l(const SortedVector<uid_t> &uids);
void getPowerManager_l();
void setEffectSuspended_l(const effect_uuid_t *type,
bool suspend,
@@ -482,12 +490,115 @@
static const size_t kLogSize = 4 * 1024;
sp<NBLog::Writer> mNBLogWriter;
bool mSystemReady;
- bool mNotifiedBatteryStart;
ExtendedTimestamp mTimestamp;
+ // A condition that must be evaluated by the thread loop has changed and
+ // we must not wait for async write callback in the thread loop before evaluating it
+ bool mSignalPending;
+
+ // ActiveTracks is a sorted vector of track type T representing the
+ // active tracks of threadLoop() to be considered by the locked prepare portion.
+ // ActiveTracks should be accessed with the ThreadBase lock held.
+ //
+ // During processing and I/O, the threadLoop does not hold the lock;
+ // hence it does not directly use ActiveTracks. Care should be taken
+ // to hold local strong references or defer removal of tracks
+ // if the threadLoop may still be accessing those tracks due to mix, etc.
+ //
+ // This class updates power information appropriately.
+ //
+
+ template <typename T>
+ class ActiveTracks {
+ public:
+ ActiveTracks()
+ : mActiveTracksGeneration(0)
+ , mLastActiveTracksGeneration(0)
+ { }
+
+ ~ActiveTracks() {
+ ALOGW_IF(!mActiveTracks.isEmpty(),
+ "ActiveTracks should be empty in destructor");
+ }
+ // returns the last track added (even though it may have been
+ // subsequently removed from ActiveTracks).
+ //
+ // Used for DirectOutputThread to ensure a flush is called when transitioning
+ // to a new track (even though it may be on the same session).
+ // Used for OffloadThread to ensure that volume and mixer state is
+ // taken from the latest track added.
+ //
+ // The latest track is saved with a weak pointer to prevent keeping an
+ // otherwise useless track alive. Thus the function will return nullptr
+ // if the latest track has subsequently been removed and destroyed.
+ sp<T> getLatest() {
+ return mLatestActiveTrack.promote();
+ }
+
+ // SortedVector methods
+ ssize_t add(const sp<T> &track);
+ ssize_t remove(const sp<T> &track);
+ size_t size() const {
+ return mActiveTracks.size();
+ }
+ ssize_t indexOf(const sp<T>& item) {
+ return mActiveTracks.indexOf(item);
+ }
+ sp<T> operator[](size_t index) const {
+ return mActiveTracks[index];
+ }
+ typename SortedVector<sp<T>>::iterator begin() {
+ return mActiveTracks.begin();
+ }
+ typename SortedVector<sp<T>>::iterator end() {
+ return mActiveTracks.end();
+ }
+
+ // Due to Binder recursion optimization, clear() and updatePowerState()
+ // cannot be called from a Binder thread because they may call back into
+ // the original calling process (system server) for BatteryNotifier
+ // (which requires a Java environment that may not be present).
+ // Hence, call clear() and updatePowerState() only from the
+ // ThreadBase thread.
+ void clear();
+ // periodically called in the threadLoop() to update power state uids.
+ void updatePowerState(sp<ThreadBase> thread, bool force = false);
+
+ private:
+ SortedVector<uid_t> getWakeLockUids() {
+ SortedVector<uid_t> wakeLockUids;
+ for (const sp<T> &track : mActiveTracks) {
+ wakeLockUids.add(track->uid());
+ }
+ return wakeLockUids; // moved by underlying SharedBuffer
+ }
+
+ std::map<uid_t, std::pair<ssize_t /* previous */, ssize_t /* current */>>
+ mBatteryCounter;
+ SortedVector<sp<T>> mActiveTracks;
+ int mActiveTracksGeneration;
+ int mLastActiveTracksGeneration;
+ wp<T> mLatestActiveTrack; // latest track added to ActiveTracks
+ };
+
+ SimpleLog mLocalLog;
+};
+
+class VolumeInterface {
+ public:
+
+ virtual ~VolumeInterface() {}
+
+ virtual void setMasterVolume(float value) = 0;
+ virtual void setMasterMute(bool muted) = 0;
+ virtual void setStreamVolume(audio_stream_type_t stream, float value) = 0;
+ virtual void setStreamMute(audio_stream_type_t stream, bool muted) = 0;
+ virtual float streamVolume(audio_stream_type_t stream) const = 0;
+
};
// --- PlaybackThread ---
-class PlaybackThread : public ThreadBase {
+class PlaybackThread : public ThreadBase, public StreamOutHalInterfaceCallback,
+ public VolumeInterface {
public:
#include "PlaybackTracks.h"
@@ -512,6 +623,12 @@
// 14 tracks max per client allows for 2 misbehaving application leaving 4 available tracks.
static const uint32_t kMaxTracksPerUid = 14;
+ // Maximum delay (in nanoseconds) for upcoming buffers in suspend mode, otherwise
+ // if delay is greater, the estimated time for timeLoopNextNs is reset.
+ // This allows for catch-up to be done for small delays, while resetting the estimate
+ // for initial conditions or large delays.
+ static const nsecs_t kMaxNextBufferDelayNs = 100000000;
+
PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
virtual ~PlaybackThread();
@@ -544,13 +661,13 @@
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
void removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
- void writeCallback();
- void resetWriteBlocked(uint32_t sequence);
- void drainCallback();
- void resetDraining(uint32_t sequence);
- void errorCallback();
+ // StreamOutHalInterfaceCallback implementation
+ virtual void onWriteReady();
+ virtual void onDrainReady();
+ virtual void onError();
- static int asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+ void resetWriteBlocked(uint32_t sequence);
+ void resetDraining(uint32_t sequence);
virtual bool waitingAsyncCallback();
virtual bool waitingAsyncCallback_l();
@@ -562,6 +679,10 @@
virtual void preExit();
virtual bool keepWakeLock() const { return true; }
+ virtual void acquireWakeLock_l() {
+ ThreadBase::acquireWakeLock_l();
+ mActiveTracks.updatePowerState(this, true /* force */);
+ }
public:
@@ -572,13 +693,12 @@
// same, but lock must already be held
uint32_t latency_l() const;
- void setMasterVolume(float value);
- void setMasterMute(bool muted);
-
- void setStreamVolume(audio_stream_type_t stream, float value);
- void setStreamMute(audio_stream_type_t stream, bool muted);
-
- float streamVolume(audio_stream_type_t stream) const;
+ // VolumeInterface
+ virtual void setMasterVolume(float value);
+ virtual void setMasterMute(bool muted);
+ virtual void setStreamVolume(audio_stream_type_t stream, float value);
+ virtual void setStreamMute(audio_stream_type_t stream, bool muted);
+ virtual float streamVolume(audio_stream_type_t stream) const;
sp<Track> createTrack_l(
const sp<AudioFlinger::Client>& client,
@@ -591,12 +711,13 @@
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t tid,
- int uid,
- status_t *status /*non-NULL*/);
+ uid_t uid,
+ status_t *status /*non-NULL*/,
+ audio_port_handle_t portId);
AudioStreamOut* getOutput() const;
AudioStreamOut* clearOutput();
- virtual audio_stream_t* stream() const;
+ virtual sp<StreamHalInterface> stream() const;
// a very large number of suspend() will eventually wraparound, but unlikely
void suspend() { (void) android_atomic_inc(&mSuspended); }
@@ -648,6 +769,11 @@
virtual void getAudioPortConfig(struct audio_port_config *config);
+ // Return the asynchronous signal wait time.
+ virtual int64_t computeWaitTimeNs_l() const { return INT64_MAX; }
+
+ virtual bool isOutput() const override { return true; }
+
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -734,10 +860,7 @@
bool mMasterMute;
void setMasterMute_l(bool muted) { mMasterMute = muted; }
protected:
- SortedVector< wp<Track> > mActiveTracks; // FIXME check if this could be sp<>
- SortedVector<int> mWakeLockUids;
- int mActiveTracksGeneration;
- wp<Track> mLatestActiveTrack; // latest track added to mActiveTracks
+ ActiveTracks<Track> mActiveTracks;
// Allocate a track name for a given channel mask.
// Returns name >= 0 if successful, -1 on failure.
@@ -783,7 +906,6 @@
status_t addTrack_l(const sp<Track>& track);
bool destroyTrack_l(const sp<Track>& track);
void removeTrack_l(const sp<Track>& track);
- void broadcast_l();
void readOutputParameters_l();
@@ -845,9 +967,6 @@
// Bit 0 is reset by the async callback thread calling resetDraining(). Out of sequence
// callbacks are ignored.
uint32_t mDrainSequence;
- // A condition that must be evaluated by prepareTrack_l() has changed and we must not wait
- // for async write callback in the thread loop before evaluating it
- bool mSignalPending;
sp<AsyncCallbackThread> mCallbackThread;
private:
@@ -865,6 +984,8 @@
uint32_t mScreenState; // cached copy of gScreenState
static const size_t kFastMixerLogSize = 4 * 1024;
sp<NBLog::Writer> mFastMixerNBLogWriter;
+
+
public:
virtual bool hasFastMixer() const = 0;
virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex __unused) const
@@ -903,8 +1024,8 @@
virtual uint32_t suspendSleepTimeUs() const;
virtual void cacheParameters_l();
- virtual void acquireWakeLock_l(int uid = -1) {
- PlaybackThread::acquireWakeLock_l(uid);
+ virtual void acquireWakeLock_l() {
+ PlaybackThread::acquireWakeLock_l();
if (hasFastMixer()) {
mFastMixer->setBoottimeOffset(
mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME]);
@@ -996,6 +1117,7 @@
// volumes last sent to audio HAL with stream->set_volume()
float mLeftVolFloat;
float mRightVolFloat;
+ bool mVolumeShaperActive;
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, uint32_t device, ThreadBase::type_t type,
@@ -1009,6 +1131,8 @@
public:
virtual bool hasFastMixer() const { return false; }
+
+ virtual int64_t computeWaitTimeNs_l() const override;
};
class OffloadThread : public DirectOutputThread {
@@ -1113,7 +1237,6 @@
virtual bool hasFastMixer() const { return false; }
};
-
// record thread
class RecordThread : public ThreadBase
{
@@ -1162,92 +1285,6 @@
// rolling counter that is never cleared
};
- /* The RecordBufferConverter is used for format, channel, and sample rate
- * conversion for a RecordTrack.
- *
- * TODO: Self contained, so move to a separate file later.
- *
- * RecordBufferConverter uses the convert() method rather than exposing a
- * buffer provider interface; this is to save a memory copy.
- */
- class RecordBufferConverter
- {
- public:
- RecordBufferConverter(
- audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
- uint32_t srcSampleRate,
- audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
- uint32_t dstSampleRate);
-
- ~RecordBufferConverter();
-
- /* Converts input data from an AudioBufferProvider by format, channelMask,
- * and sampleRate to a destination buffer.
- *
- * Parameters
- * dst: buffer to place the converted data.
- * provider: buffer provider to obtain source data.
- * frames: number of frames to convert
- *
- * Returns the number of frames converted.
- */
- size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
-
- // returns NO_ERROR if constructor was successful
- status_t initCheck() const {
- // mSrcChannelMask set on successful updateParameters
- return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
- }
-
- // allows dynamic reconfigure of all parameters
- status_t updateParameters(
- audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
- uint32_t srcSampleRate,
- audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
- uint32_t dstSampleRate);
-
- // called to reset resampler buffers on record track discontinuity
- void reset() {
- if (mResampler != NULL) {
- mResampler->reset();
- }
- }
-
- private:
- // format conversion when not using resampler
- void convertNoResampler(void *dst, const void *src, size_t frames);
-
- // format conversion when using resampler; modifies src in-place
- void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
-
- // user provided information
- audio_channel_mask_t mSrcChannelMask;
- audio_format_t mSrcFormat;
- uint32_t mSrcSampleRate;
- audio_channel_mask_t mDstChannelMask;
- audio_format_t mDstFormat;
- uint32_t mDstSampleRate;
-
- // derived information
- uint32_t mSrcChannelCount;
- uint32_t mDstChannelCount;
- size_t mDstFrameSize;
-
- // format conversion buffer
- void *mBuf;
- size_t mBufFrames;
- size_t mBufFrameSize;
-
- // resampler info
- AudioResampler *mResampler;
-
- bool mIsLegacyDownmix; // legacy stereo to mono conversion needed
- bool mIsLegacyUpmix; // legacy mono to stereo conversion needed
- bool mRequiresFloat; // data processing requires float (e.g. resampler)
- PassthruBufferProvider *mInputConverterProvider; // converts input to float
- int8_t mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
- };
-
#include "RecordTracks.h"
RecordThread(const sp<AudioFlinger>& audioFlinger,
@@ -1271,6 +1308,7 @@
// Thread virtuals
virtual bool threadLoop();
+ virtual void preExit();
// RefBase
virtual void onFirstRef();
@@ -1289,10 +1327,11 @@
size_t *pFrameCount,
audio_session_t sessionId,
size_t *notificationFrames,
- int uid,
+ uid_t uid,
audio_input_flags_t *flags,
pid_t tid,
- status_t *status /*non-NULL*/);
+ status_t *status /*non-NULL*/,
+ audio_port_handle_t portId);
status_t start(RecordTrack* recordTrack,
AudioSystem::sync_event_t event,
@@ -1304,7 +1343,7 @@
void dump(int fd, const Vector<String16>& args);
AudioStreamIn* clearInput();
- virtual audio_stream_t* stream() const;
+ virtual sp<StreamHalInterface> stream() const;
virtual bool checkForNewParameter_l(const String8& keyValuePair,
@@ -1343,6 +1382,12 @@
virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
audio_session_t sessionId);
+ virtual void acquireWakeLock_l() {
+ ThreadBase::acquireWakeLock_l();
+ mActiveTracks.updatePowerState(this, true /* force */);
+ }
+ virtual bool isOutput() const override { return false; }
+
private:
// Enter standby if not already in standby, and set mStandby flag
void standbyIfNotAlreadyInStandby();
@@ -1354,15 +1399,15 @@
SortedVector < sp<RecordTrack> > mTracks;
// mActiveTracks has dual roles: it indicates the current active track(s), and
// is used together with mStartStopCond to indicate start()/stop() progress
- SortedVector< sp<RecordTrack> > mActiveTracks;
- // generation counter for mActiveTracks
- int mActiveTracksGen;
+ ActiveTracks<RecordTrack> mActiveTracks;
+
Condition mStartStopCond;
// resampler converts input at HAL Hz to output at AudioRecord client Hz
- void *mRsmpInBuffer; //
+ void *mRsmpInBuffer; // size = mRsmpInFramesOA
size_t mRsmpInFrames; // size of resampler input in frames
size_t mRsmpInFramesP2;// size rounded up to a power-of-2
+ size_t mRsmpInFramesOA;// mRsmpInFramesP2 + over-allocation
// rolling index that is never cleared
int32_t mRsmpInRear; // last filled frame + 1
@@ -1410,3 +1455,153 @@
bool mFastTrackAvail; // true if fast track available
};
+
+class MmapThread : public ThreadBase
+{
+ public:
+
+#include "MmapTracks.h"
+
+ MmapThread(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, sp<StreamHalInterface> stream,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady);
+ virtual ~MmapThread();
+
+ virtual void configure(const audio_attributes_t *attr,
+ audio_stream_type_t streamType,
+ audio_session_t sessionId,
+ const sp<MmapStreamCallback>& callback,
+ audio_port_handle_t portId);
+
+ void disconnect();
+
+ // MmapStreamInterface
+ status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info);
+ status_t getMmapPosition(struct audio_mmap_position *position);
+ status_t start(const MmapStreamInterface::Client& client, audio_port_handle_t *handle);
+ status_t stop(audio_port_handle_t handle);
+ status_t standby();
+
+ // RefBase
+ virtual void onFirstRef();
+
+ // Thread virtuals
+ virtual bool threadLoop();
+
+ virtual void threadLoop_exit();
+ virtual void threadLoop_standby();
+ virtual bool shouldStandby_l() { return false; }
+
+ virtual status_t initCheck() const { return (mHalStream == 0) ? NO_INIT : NO_ERROR; }
+ virtual size_t frameCount() const { return mFrameCount; }
+ virtual bool checkForNewParameter_l(const String8& keyValuePair,
+ status_t& status);
+ virtual String8 getParameters(const String8& keys);
+ virtual void ioConfigChanged(audio_io_config_event event, pid_t pid = 0);
+ void readHalParameters_l();
+ virtual void cacheParameters_l() {}
+ virtual status_t createAudioPatch_l(const struct audio_patch *patch,
+ audio_patch_handle_t *handle);
+ virtual status_t releaseAudioPatch_l(const audio_patch_handle_t handle);
+ virtual void getAudioPortConfig(struct audio_port_config *config);
+
+ virtual sp<StreamHalInterface> stream() const { return mHalStream; }
+ virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
+ virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
+ virtual status_t checkEffectCompatibility_l(const effect_descriptor_t *desc,
+ audio_session_t sessionId);
+
+ virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const;
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
+ virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
+
+ virtual void checkSilentMode_l() {}
+ virtual void processVolume_l() {}
+ void checkInvalidTracks_l();
+
+ virtual audio_stream_type_t streamType() { return AUDIO_STREAM_DEFAULT; }
+
+ virtual void invalidateTracks(audio_stream_type_t streamType __unused) {}
+
+ void dump(int fd, const Vector<String16>& args);
+ virtual void dumpInternals(int fd, const Vector<String16>& args);
+ void dumpTracks(int fd, const Vector<String16>& args);
+
+ protected:
+
+ audio_attributes_t mAttr;
+ audio_session_t mSessionId;
+ audio_port_handle_t mPortId;
+
+ wp<MmapStreamCallback> mCallback;
+ sp<StreamHalInterface> mHalStream;
+ sp<DeviceHalInterface> mHalDevice;
+ AudioHwDevice* const mAudioHwDev;
+ ActiveTracks<MmapTrack> mActiveTracks;
+};
+
+class MmapPlaybackThread : public MmapThread, public VolumeInterface
+{
+
+public:
+ MmapPlaybackThread(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, AudioStreamOut *output,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady);
+ virtual ~MmapPlaybackThread() {}
+
+ virtual void configure(const audio_attributes_t *attr,
+ audio_stream_type_t streamType,
+ audio_session_t sessionId,
+ const sp<MmapStreamCallback>& callback,
+ audio_port_handle_t portId);
+
+ AudioStreamOut* clearOutput();
+
+ // VolumeInterface
+ virtual void setMasterVolume(float value);
+ virtual void setMasterMute(bool muted);
+ virtual void setStreamVolume(audio_stream_type_t stream, float value);
+ virtual void setStreamMute(audio_stream_type_t stream, bool muted);
+ virtual float streamVolume(audio_stream_type_t stream) const;
+
+ void setMasterMute_l(bool muted) { mMasterMute = muted; }
+
+ virtual void invalidateTracks(audio_stream_type_t streamType);
+
+ virtual audio_stream_type_t streamType() { return mStreamType; }
+ virtual void checkSilentMode_l();
+ virtual void processVolume_l();
+
+ virtual void dumpInternals(int fd, const Vector<String16>& args);
+
+ virtual bool isOutput() const override { return true; }
+
+protected:
+
+ audio_stream_type_t mStreamType;
+ float mMasterVolume;
+ float mStreamVolume;
+ bool mMasterMute;
+ bool mStreamMute;
+ float mHalVolFloat;
+ AudioStreamOut* mOutput;
+};
+
+class MmapCaptureThread : public MmapThread
+{
+
+public:
+ MmapCaptureThread(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+ AudioHwDevice *hwDev, AudioStreamIn *input,
+ audio_devices_t outDevice, audio_devices_t inDevice, bool systemReady);
+ virtual ~MmapCaptureThread() {}
+
+ AudioStreamIn* clearInput();
+
+ virtual bool isOutput() const override { return false; }
+
+protected:
+
+ AudioStreamIn* mInput;
+};
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 6b97246..e0c09f7 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -62,10 +62,11 @@
size_t frameCount,
void *buffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
bool isOut,
alloc_type alloc = ALLOC_CBLK,
- track_type type = TYPE_DEFAULT);
+ track_type type = TYPE_DEFAULT,
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
virtual ~TrackBase();
virtual status_t initCheck() const;
@@ -75,7 +76,8 @@
sp<IMemory> getCblk() const { return mCblkMemory; }
audio_track_cblk_t* cblk() const { return mCblk; }
audio_session_t sessionId() const { return mSessionId; }
- int uid() const { return mUid; }
+ uid_t uid() const { return mUid; }
+ audio_port_handle_t portId() const { return mPortId; }
virtual status_t setSyncEvent(const sp<SyncEvent>& event);
sp<IMemory> getBuffers() const { return mBufferMemory; }
@@ -85,6 +87,10 @@
bool isPatchTrack() const { return (mType == TYPE_PATCH); }
bool isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
+ virtual void invalidate() { mIsInvalid = true; }
+ bool isInvalid() const { return mIsInvalid; }
+
+
protected:
TrackBase(const TrackBase&);
TrackBase& operator = (const TrackBase&);
@@ -153,16 +159,18 @@
// openRecord(), and then adjusted as needed
const audio_session_t mSessionId;
- int mUid;
+ uid_t mUid;
Vector < sp<SyncEvent> >mSyncEvents;
const bool mIsOut;
- ServerProxy* mServerProxy;
+ sp<ServerProxy> mServerProxy;
const int mId;
sp<NBAIO_Sink> mTeeSink;
sp<NBAIO_Source> mTeeSource;
bool mTerminated;
track_type mType; // must be one of TYPE_DEFAULT, TYPE_OUTPUT, TYPE_PATCH ...
audio_io_handle_t mThreadIoHandle; // I/O handle of the thread the track is attached to
+ audio_port_handle_t mPortId; // unique ID for this track used by audio policy
+ bool mIsInvalid; // non-resettable latch, set by invalidate()
};
// PatchProxyBufferProvider interface is implemented by PatchTrack and PatchRecord.
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index ba6e6e5..301510c 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -27,12 +27,12 @@
#include <private/media/AudioTrackShared.h>
-#include "AudioMixer.h"
#include "AudioFlinger.h"
#include "ServiceUtilities.h"
#include <media/nbaio/Pipe.h>
#include <media/nbaio/PipeReader.h>
+#include <media/RecordBufferConverter.h>
#include <audio_utils/minifloat.h>
// ----------------------------------------------------------------------------
@@ -52,7 +52,7 @@
// TODO move to a common header (Also shared with AudioTrack.cpp)
#define NANOS_PER_SECOND 1000000000
-#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * NANOS_PER_SECOND + time.tv_nsec)
+#define TIME_TO_NANOS(time) ((uint64_t)(time).tv_sec * NANOS_PER_SECOND + (time).tv_nsec)
namespace android {
@@ -72,10 +72,11 @@
size_t frameCount,
void *buffer,
audio_session_t sessionId,
- int clientUid,
+ uid_t clientUid,
bool isOut,
alloc_type alloc,
- track_type type)
+ track_type type,
+ audio_port_handle_t portId)
: RefBase(),
mThread(thread),
mClient(client),
@@ -93,26 +94,42 @@
mFrameCount(frameCount),
mSessionId(sessionId),
mIsOut(isOut),
- mServerProxy(NULL),
mId(android_atomic_inc(&nextTrackId)),
mTerminated(false),
mType(type),
- mThreadIoHandle(thread->id())
+ mThreadIoHandle(thread->id()),
+ mPortId(portId),
+ mIsInvalid(false)
{
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
- if (!isTrustedCallingUid(callingUid) || clientUid == -1) {
- ALOGW_IF(clientUid != -1 && clientUid != (int)callingUid,
+ if (!isTrustedCallingUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
+ ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
- clientUid = (int)callingUid;
+ clientUid = callingUid;
}
// clientUid contains the uid of the app that is responsible for this track, so we can blame
// battery usage on it.
mUid = clientUid;
// ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
+
+ size_t bufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
+ // check overflow when computing bufferSize due to multiplication by mFrameSize.
+ if (bufferSize < frameCount // roundup rounds down for values above UINT_MAX / 2
+ || mFrameSize == 0 // format needs to be correct
+ || bufferSize > SIZE_MAX / mFrameSize) {
+ android_errorWriteLog(0x534e4554, "34749571");
+ return;
+ }
+ bufferSize *= mFrameSize;
+
size_t size = sizeof(audio_track_cblk_t);
- size_t bufferSize = (buffer == NULL ? roundup(frameCount) : frameCount) * mFrameSize;
if (buffer == NULL && alloc == ALLOC_CBLK) {
+ // check overflow when computing allocation size for streaming tracks.
+ if (size > SIZE_MAX - bufferSize) {
+ android_errorWriteLog(0x534e4554, "34749571");
+ return;
+ }
size += bufferSize;
}
@@ -126,9 +143,11 @@
return;
}
} else {
- // this syntax avoids calling the audio_track_cblk_t constructor twice
- mCblk = (audio_track_cblk_t *) new uint8_t[size];
- // assume mCblk != NULL
+ mCblk = (audio_track_cblk_t *) malloc(size);
+ if (mCblk == NULL) {
+ ALOGE("not enough memory for AudioTrack size=%zu", size);
+ return;
+ }
}
// construct the shared structure in-place.
@@ -218,12 +237,11 @@
dumpTee(-1, mTeeSource, mId);
#endif
// delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
- delete mServerProxy;
+ mServerProxy.clear();
if (mCblk != NULL) {
+ mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
if (mClient == 0) {
- delete mCblk;
- } else {
- mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
+ free(mCblk);
}
}
mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
@@ -311,6 +329,16 @@
return mTrack->setParameters(keyValuePairs);
}
+VolumeShaper::Status AudioFlinger::TrackHandle::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation) {
+ return mTrack->applyVolumeShaper(configuration, operation);
+}
+
+sp<VolumeShaper::State> AudioFlinger::TrackHandle::getVolumeShaperState(int id) {
+ return mTrack->getVolumeShaperState(id);
+}
+
status_t AudioFlinger::TrackHandle::getTimestamp(AudioTimestamp& timestamp)
{
return mTrack->getTimestamp(timestamp);
@@ -342,14 +370,15 @@
void *buffer,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
audio_output_flags_t flags,
- track_type type)
+ track_type type,
+ audio_port_handle_t portId)
: TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
(sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
sessionId, uid, true /*isOut*/,
(type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
- type),
+ type, portId),
mFillingUpStatus(FS_INVALID),
// mRetryCount initialized later when needed
mSharedBuffer(sharedBuffer),
@@ -360,11 +389,10 @@
mAuxEffectId(0), mHasVolumeController(false),
mPresentationCompleteFrames(0),
mFrameMap(16 /* sink-frame-to-track-frame map memory */),
+ mVolumeHandler(new VolumeHandler(sampleRate)),
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
- mIsInvalid(false),
- mAudioTrackServerProxy(NULL),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags)
@@ -383,6 +411,21 @@
mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
mFrameSize, !isExternalTrack(), sampleRate);
} else {
+ // Is the shared buffer of sufficient size?
+ // (frameCount * mFrameSize) is <= SIZE_MAX, checked in TrackBase.
+ if (sharedBuffer->size() < frameCount * mFrameSize) {
+ // Workaround: clear out mCblk to indicate track hasn't been properly created.
+ mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
+ if (mClient == 0) {
+ free(mCblk);
+ }
+ mCblk = NULL;
+
+ mSharedBuffer.clear(); // release shared buffer early
+ android_errorWriteLog(0x534e4554, "38340117");
+ return;
+ }
+
mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
mFrameSize);
}
@@ -461,7 +504,7 @@
/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
{
result.append(" Name Active Client Type Fmt Chn mask Session fCount S F SRate "
- "L dB R dB Server Main buf Aux Buf Flags UndFrmCnt\n");
+ "L dB R dB VS dB Server Main buf Aux buf Flags UndFrmCnt Flushed\n");
}
void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size, bool active)
@@ -527,8 +570,11 @@
nowInUnderrun = '?';
break;
}
- snprintf(&buffer[8], size-8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g "
- "%08X %p %p 0x%03X %9u%c\n",
+
+ std::pair<float /* volume */, bool /* active */> vsVolume = mVolumeHandler->getLastVolume();
+ snprintf(&buffer[8], size - 8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u "
+ "%5.2g %5.2g %5.2g%c "
+ "%08X %08zX %08zX 0x%03X %9u%c %7u\n",
active ? "yes" : "no",
(mClient == 0) ? getpid_cached : mClient->pid(),
mStreamType,
@@ -541,12 +587,15 @@
mAudioTrackServerProxy->getSampleRate(),
20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
+ 20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
+ vsVolume.second ? 'A' : ' ', // if any VolumeShapers active
mCblk->mServer,
- mMainBuffer,
- mAuxBuffer,
+ (size_t)mMainBuffer, // use %zX as %p appends 0x
+ (size_t)mAuxBuffer, // use %zX as %p appends 0x
mCblk->mFlags,
mAudioTrackServerProxy->getUnderrunFrames(),
- nowInUnderrun);
+ nowInUnderrun,
+ (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000); // 7 digits
}
uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
@@ -563,7 +612,9 @@
status_t status = mServerProxy->obtainBuffer(&buf);
buffer->frameCount = buf.mFrameCount;
buffer->raw = buf.mRaw;
- if (buf.mFrameCount == 0) {
+ if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused()) {
+ ALOGV("underrun, framesReady(%zu) < framesDesired(%zd), state: %d",
+ buf.mFrameCount, desiredFrames, mState);
mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
} else {
mAudioTrackServerProxy->tallyUnderrunFrames(0);
@@ -872,6 +923,47 @@
}
}
+VolumeShaper::Status AudioFlinger::PlaybackThread::Track::applyVolumeShaper(
+ const sp<VolumeShaper::Configuration>& configuration,
+ const sp<VolumeShaper::Operation>& operation)
+{
+ sp<VolumeShaper::Configuration> newConfiguration;
+
+ if (isOffloadedOrDirect()) {
+ const VolumeShaper::Configuration::OptionFlag optionFlag
+ = configuration->getOptionFlags();
+ if ((optionFlag & VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME) == 0) {
+ ALOGW("%s tracks do not support frame counted VolumeShaper,"
+ " using clock time instead", isOffloaded() ? "Offload" : "Direct");
+ newConfiguration = new VolumeShaper::Configuration(*configuration);
+ newConfiguration->setOptionFlags(
+ VolumeShaper::Configuration::OptionFlag(optionFlag
+ | VolumeShaper::Configuration::OPTION_FLAG_CLOCK_TIME));
+ }
+ }
+
+ VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(
+ (newConfiguration.get() != nullptr ? newConfiguration : configuration), operation);
+
+ if (isOffloadedOrDirect()) {
+ // Signal thread to fetch new volume.
+ sp<ThreadBase> thread = mThread.promote();
+ if (thread != 0) {
+ Mutex::Autolock _l(thread->mLock);
+ thread->broadcast_l();
+ }
+ }
+ return status;
+}
+
+sp<VolumeShaper::State> AudioFlinger::PlaybackThread::Track::getVolumeShaperState(int id)
+{
+ // Note: We don't check if Thread exists.
+
+ // mVolumeHandler is thread safe.
+ return mVolumeHandler->getVolumeShaperState(id);
+}
+
status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
{
if (!isOffloaded() && !isDirect()) {
@@ -1040,8 +1132,8 @@
void AudioFlinger::PlaybackThread::Track::invalidate()
{
+ TrackBase::invalidate();
signalClientFlag(CBLK_INVALID);
- mIsInvalid = true;
}
void AudioFlinger::PlaybackThread::Track::disable()
@@ -1140,12 +1232,12 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
- int uid)
+ uid_t uid)
: Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
sampleRate, format, channelMask, frameCount,
NULL, 0, AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
TYPE_OUTPUT),
- mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
+ mActive(false), mSourceThread(sourceThread)
{
if (mCblk != NULL) {
@@ -1170,7 +1262,6 @@
AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
{
clearBufferQueue();
- delete mClientProxy;
// superclass destructor will now delete the server proxy and shared memory both refer to
}
@@ -1477,15 +1568,16 @@
size_t frameCount,
void *buffer,
audio_session_t sessionId,
- int uid,
+ uid_t uid,
audio_input_flags_t flags,
- track_type type)
+ track_type type,
+ audio_port_handle_t portId)
: TrackBase(thread, client, sampleRate, format,
channelMask, frameCount, buffer, sessionId, uid, false /*isOut*/,
(type == TYPE_DEFAULT) ?
((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
- type),
+ type, portId),
mOverflow(false),
mFramesToDrop(0),
mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
@@ -1597,6 +1689,7 @@
void AudioFlinger::RecordThread::RecordTrack::invalidate()
{
+ TrackBase::invalidate();
// FIXME should use proxy, and needs work
audio_track_cblk_t* cblk = mCblk;
android_atomic_or(CBLK_INVALID, &cblk->mFlags);
@@ -1733,4 +1826,76 @@
mProxy->releaseBuffer(buffer);
}
+
+
+AudioFlinger::MmapThread::MmapTrack::MmapTrack(ThreadBase *thread,
+ uint32_t sampleRate,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_session_t sessionId,
+ uid_t uid,
+ audio_port_handle_t portId)
+ : TrackBase(thread, NULL, sampleRate, format,
+ channelMask, 0, NULL, sessionId, uid, false,
+ ALLOC_NONE,
+ TYPE_DEFAULT, portId)
+{
+}
+
+AudioFlinger::MmapThread::MmapTrack::~MmapTrack()
+{
+}
+
+status_t AudioFlinger::MmapThread::MmapTrack::initCheck() const
+{
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::MmapThread::MmapTrack::start(AudioSystem::sync_event_t event __unused,
+ audio_session_t triggerSession __unused)
+{
+ return NO_ERROR;
+}
+
+void AudioFlinger::MmapThread::MmapTrack::stop()
+{
+}
+
+// AudioBufferProvider interface
+status_t AudioFlinger::MmapThread::MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+{
+ buffer->frameCount = 0;
+ buffer->raw = nullptr;
+ return INVALID_OPERATION;
+}
+
+// ExtendedAudioBufferProvider interface
+size_t AudioFlinger::MmapThread::MmapTrack::framesReady() const {
+ return 0;
+}
+
+int64_t AudioFlinger::MmapThread::MmapTrack::framesReleased() const
+{
+ return 0;
+}
+
+void AudioFlinger::MmapThread::MmapTrack::onTimestamp(const ExtendedTimestamp ×tamp __unused)
+{
+}
+
+/*static*/ void AudioFlinger::MmapThread::MmapTrack::appendDumpHeader(String8& result)
+{
+ result.append(" Client Fmt Chn mask SRate\n");
+}
+
+void AudioFlinger::MmapThread::MmapTrack::dump(char* buffer, size_t size)
+{
+ snprintf(buffer, size, " %6u %3u %08X %5u\n",
+ mUid,
+ mFormat,
+ mChannelMask,
+ mSampleRate);
+
+}
+
} // namespace android
diff --git a/services/audioflinger/TypedLogger.cpp b/services/audioflinger/TypedLogger.cpp
new file mode 100644
index 0000000..e08f6f6
--- /dev/null
+++ b/services/audioflinger/TypedLogger.cpp
@@ -0,0 +1,27 @@
+/*
+ *
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <pthread.h>
+#include "TypedLogger.h"
+
+namespace android {
+thread_local NBLog::Writer *logWriterTLS;
+}
diff --git a/services/audioflinger/TypedLogger.h b/services/audioflinger/TypedLogger.h
new file mode 100644
index 0000000..0b23c7c
--- /dev/null
+++ b/services/audioflinger/TypedLogger.h
@@ -0,0 +1,30 @@
+/*
+ *
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_TYPED_LOGGER_H
+#define ANDROID_TYPED_LOGGER_H
+
+#include <media/nbaio/NBLog.h>
+#define LOGT(fmt, ...) logWriterTLS->logFormat(fmt, ##__VA_ARGS__) // TODO: check null pointer
+
+namespace android {
+extern "C" {
+extern thread_local NBLog::Writer *logWriterTLS;
+}
+} // namespace android
+
+#endif // ANDROID_TYPED_LOGGER_H
diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp
deleted file mode 100644
index bae3c5b..0000000
--- a/services/audioflinger/test-resample.cpp
+++ /dev/null
@@ -1,515 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <unistd.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <errno.h>
-#include <inttypes.h>
-#include <time.h>
-#include <math.h>
-#include <audio_utils/primitives.h>
-#include <audio_utils/sndfile.h>
-#include <utils/Vector.h>
-#include <media/AudioBufferProvider.h>
-#include "AudioResampler.h"
-
-using namespace android;
-
-static bool gVerbose = false;
-
-static int usage(const char* name) {
- fprintf(stderr,"Usage: %s [-p] [-f] [-F] [-v] [-c channels]"
- " [-q {dq|lq|mq|hq|vhq|dlq|dmq|dhq}]"
- " [-i input-sample-rate] [-o output-sample-rate]"
- " [-O csv] [-P csv] [<input-file>]"
- " <output-file>\n", name);
- fprintf(stderr," -p enable profiling\n");
- fprintf(stderr," -f enable filter profiling\n");
- fprintf(stderr," -F enable floating point -q {dlq|dmq|dhq} only");
- fprintf(stderr," -v verbose : log buffer provider calls\n");
- fprintf(stderr," -c # channels (1-2 for lq|mq|hq; 1-8 for dlq|dmq|dhq)\n");
- fprintf(stderr," -q resampler quality\n");
- fprintf(stderr," dq : default quality\n");
- fprintf(stderr," lq : low quality\n");
- fprintf(stderr," mq : medium quality\n");
- fprintf(stderr," hq : high quality\n");
- fprintf(stderr," vhq : very high quality\n");
- fprintf(stderr," dlq : dynamic low quality\n");
- fprintf(stderr," dmq : dynamic medium quality\n");
- fprintf(stderr," dhq : dynamic high quality\n");
- fprintf(stderr," -i input file sample rate (ignored if input file is specified)\n");
- fprintf(stderr," -o output file sample rate\n");
- fprintf(stderr," -O # frames output per call to resample() in CSV format\n");
- fprintf(stderr," -P # frames provided per call to resample() in CSV format\n");
- return -1;
-}
-
-// Convert a list of integers in CSV format to a Vector of those values.
-// Returns the number of elements in the list, or -1 on error.
-int parseCSV(const char *string, Vector<int>& values)
-{
- // pass 1: count the number of values and do syntax check
- size_t numValues = 0;
- bool hadDigit = false;
- for (const char *p = string; ; ) {
- switch (*p++) {
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- hadDigit = true;
- break;
- case '\0':
- if (hadDigit) {
- // pass 2: allocate and initialize vector of values
- values.resize(++numValues);
- values.editItemAt(0) = atoi(p = optarg);
- for (size_t i = 1; i < numValues; ) {
- if (*p++ == ',') {
- values.editItemAt(i++) = atoi(p);
- }
- }
- return numValues;
- }
- // fall through
- case ',':
- if (hadDigit) {
- hadDigit = false;
- numValues++;
- break;
- }
- // fall through
- default:
- return -1;
- }
- }
-}
-
-int main(int argc, char* argv[]) {
- const char* const progname = argv[0];
- bool profileResample = false;
- bool profileFilter = false;
- bool useFloat = false;
- int channels = 1;
- int input_freq = 0;
- int output_freq = 0;
- AudioResampler::src_quality quality = AudioResampler::DEFAULT_QUALITY;
- Vector<int> Ovalues;
- Vector<int> Pvalues;
-
- int ch;
- while ((ch = getopt(argc, argv, "pfFvc:q:i:o:O:P:")) != -1) {
- switch (ch) {
- case 'p':
- profileResample = true;
- break;
- case 'f':
- profileFilter = true;
- break;
- case 'F':
- useFloat = true;
- break;
- case 'v':
- gVerbose = true;
- break;
- case 'c':
- channels = atoi(optarg);
- break;
- case 'q':
- if (!strcmp(optarg, "dq"))
- quality = AudioResampler::DEFAULT_QUALITY;
- else if (!strcmp(optarg, "lq"))
- quality = AudioResampler::LOW_QUALITY;
- else if (!strcmp(optarg, "mq"))
- quality = AudioResampler::MED_QUALITY;
- else if (!strcmp(optarg, "hq"))
- quality = AudioResampler::HIGH_QUALITY;
- else if (!strcmp(optarg, "vhq"))
- quality = AudioResampler::VERY_HIGH_QUALITY;
- else if (!strcmp(optarg, "dlq"))
- quality = AudioResampler::DYN_LOW_QUALITY;
- else if (!strcmp(optarg, "dmq"))
- quality = AudioResampler::DYN_MED_QUALITY;
- else if (!strcmp(optarg, "dhq"))
- quality = AudioResampler::DYN_HIGH_QUALITY;
- else {
- usage(progname);
- return -1;
- }
- break;
- case 'i':
- input_freq = atoi(optarg);
- break;
- case 'o':
- output_freq = atoi(optarg);
- break;
- case 'O':
- if (parseCSV(optarg, Ovalues) < 0) {
- fprintf(stderr, "incorrect syntax for -O option\n");
- return -1;
- }
- break;
- case 'P':
- if (parseCSV(optarg, Pvalues) < 0) {
- fprintf(stderr, "incorrect syntax for -P option\n");
- return -1;
- }
- break;
- case '?':
- default:
- usage(progname);
- return -1;
- }
- }
-
- if (channels < 1
- || channels > (quality < AudioResampler::DYN_LOW_QUALITY ? 2 : 8)) {
- fprintf(stderr, "invalid number of audio channels %d\n", channels);
- return -1;
- }
- if (useFloat && quality < AudioResampler::DYN_LOW_QUALITY) {
- fprintf(stderr, "float processing is only possible for dynamic resamplers\n");
- return -1;
- }
-
- argc -= optind;
- argv += optind;
-
- const char* file_in = NULL;
- const char* file_out = NULL;
- if (argc == 1) {
- file_out = argv[0];
- } else if (argc == 2) {
- file_in = argv[0];
- file_out = argv[1];
- } else {
- usage(progname);
- return -1;
- }
-
- // ----------------------------------------------------------
-
- size_t input_size;
- void* input_vaddr;
- if (argc == 2) {
- SF_INFO info;
- info.format = 0;
- SNDFILE *sf = sf_open(file_in, SFM_READ, &info);
- if (sf == NULL) {
- perror(file_in);
- return EXIT_FAILURE;
- }
- input_size = info.frames * info.channels * sizeof(short);
- input_vaddr = malloc(input_size);
- (void) sf_readf_short(sf, (short *) input_vaddr, info.frames);
- sf_close(sf);
- channels = info.channels;
- input_freq = info.samplerate;
- } else {
- // data for testing is exactly (input sampling rate/1000)/2 seconds
- // so 44.1khz input is 22.05 seconds
- double k = 1000; // Hz / s
- double time = (input_freq / 2) / k;
- size_t input_frames = size_t(input_freq * time);
- input_size = channels * sizeof(int16_t) * input_frames;
- input_vaddr = malloc(input_size);
- int16_t* in = (int16_t*)input_vaddr;
- for (size_t i=0 ; i<input_frames ; i++) {
- double t = double(i) / input_freq;
- double y = sin(M_PI * k * t * t);
- int16_t yi = floor(y * 32767.0 + 0.5);
- for (int j = 0; j < channels; j++) {
- in[i*channels + j] = yi / (1 + j);
- }
- }
- }
- size_t input_framesize = channels * sizeof(int16_t);
- size_t input_frames = input_size / input_framesize;
-
- // For float processing, convert input int16_t to float array
- if (useFloat) {
- void *new_vaddr;
-
- input_framesize = channels * sizeof(float);
- input_size = input_frames * input_framesize;
- new_vaddr = malloc(input_size);
- memcpy_to_float_from_i16(reinterpret_cast<float*>(new_vaddr),
- reinterpret_cast<int16_t*>(input_vaddr), input_frames * channels);
- free(input_vaddr);
- input_vaddr = new_vaddr;
- }
-
- // ----------------------------------------------------------
-
- class Provider: public AudioBufferProvider {
- const void* mAddr; // base address
- const size_t mNumFrames; // total frames
- const size_t mFrameSize; // size of each frame in bytes
- size_t mNextFrame; // index of next frame to provide
- size_t mUnrel; // number of frames not yet released
- const Vector<int> mPvalues; // number of frames provided per call
- size_t mNextPidx; // index of next entry in mPvalues to use
- public:
- Provider(const void* addr, size_t frames, size_t frameSize, const Vector<int>& Pvalues)
- : mAddr(addr),
- mNumFrames(frames),
- mFrameSize(frameSize),
- mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) {
- }
- virtual status_t getNextBuffer(Buffer* buffer) {
- size_t requestedFrames = buffer->frameCount;
- if (requestedFrames > mNumFrames - mNextFrame) {
- buffer->frameCount = mNumFrames - mNextFrame;
- }
- if (!mPvalues.isEmpty()) {
- size_t provided = mPvalues[mNextPidx++];
- printf("mPvalue[%zu]=%zu not %zu\n", mNextPidx-1, provided, buffer->frameCount);
- if (provided < buffer->frameCount) {
- buffer->frameCount = provided;
- }
- if (mNextPidx >= mPvalues.size()) {
- mNextPidx = 0;
- }
- }
- if (gVerbose) {
- printf("getNextBuffer() requested %zu frames out of %zu frames available,"
- " and returned %zu frames\n",
- requestedFrames, (size_t) (mNumFrames - mNextFrame), buffer->frameCount);
- }
- mUnrel = buffer->frameCount;
- if (buffer->frameCount > 0) {
- buffer->raw = (char *)mAddr + mFrameSize * mNextFrame;
- return NO_ERROR;
- } else {
- buffer->raw = NULL;
- return NOT_ENOUGH_DATA;
- }
- }
- virtual void releaseBuffer(Buffer* buffer) {
- if (buffer->frameCount > mUnrel) {
- fprintf(stderr, "ERROR releaseBuffer() released %zu frames but only %zu available "
- "to release\n", buffer->frameCount, mUnrel);
- mNextFrame += mUnrel;
- mUnrel = 0;
- } else {
- if (gVerbose) {
- printf("releaseBuffer() released %zu frames out of %zu frames available "
- "to release\n", buffer->frameCount, mUnrel);
- }
- mNextFrame += buffer->frameCount;
- mUnrel -= buffer->frameCount;
- }
- buffer->frameCount = 0;
- buffer->raw = NULL;
- }
- void reset() {
- mNextFrame = 0;
- }
- } provider(input_vaddr, input_frames, input_framesize, Pvalues);
-
- if (gVerbose) {
- printf("%zu input frames\n", input_frames);
- }
-
- audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
- int output_channels = channels > 2 ? channels : 2; // output is at least stereo samples
- size_t output_framesize = output_channels * (useFloat ? sizeof(float) : sizeof(int32_t));
- size_t output_frames = ((int64_t) input_frames * output_freq) / input_freq;
- size_t output_size = output_frames * output_framesize;
-
- if (profileFilter) {
- // Check how fast sample rate changes are that require filter changes.
- // The delta sample rate changes must indicate a downsampling ratio,
- // and must be larger than 10% changes.
- //
- // On fast devices, filters should be generated between 0.1ms - 1ms.
- // (single threaded).
- AudioResampler* resampler = AudioResampler::create(format, channels,
- 8000, quality);
- int looplimit = 100;
- timespec start, end;
- clock_gettime(CLOCK_MONOTONIC, &start);
- for (int i = 0; i < looplimit; ++i) {
- resampler->setSampleRate(9000);
- resampler->setSampleRate(12000);
- resampler->setSampleRate(20000);
- resampler->setSampleRate(30000);
- }
- clock_gettime(CLOCK_MONOTONIC, &end);
- int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
- int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
- int64_t time = end_ns - start_ns;
- printf("%.2f sample rate changes with filter calculation/sec\n",
- looplimit * 4 / (time / 1e9));
-
- // Check how fast sample rate changes are without filter changes.
- // This should be very fast, probably 0.1us - 1us per sample rate
- // change.
- resampler->setSampleRate(1000);
- looplimit = 1000;
- clock_gettime(CLOCK_MONOTONIC, &start);
- for (int i = 0; i < looplimit; ++i) {
- resampler->setSampleRate(1000+i);
- }
- clock_gettime(CLOCK_MONOTONIC, &end);
- start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
- end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
- time = end_ns - start_ns;
- printf("%.2f sample rate changes without filter calculation/sec\n",
- looplimit / (time / 1e9));
- resampler->reset();
- delete resampler;
- }
-
- void* output_vaddr = malloc(output_size);
- AudioResampler* resampler = AudioResampler::create(format, channels,
- output_freq, quality);
-
- resampler->setSampleRate(input_freq);
- resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT);
-
- if (profileResample) {
- /*
- * For profiling on mobile devices, upon experimentation
- * it is better to run a few trials with a shorter loop limit,
- * and take the minimum time.
- *
- * Long tests can cause CPU temperature to build up and thermal throttling
- * to reduce CPU frequency.
- *
- * For frequency checks (index=0, or 1, etc.):
- * "cat /sys/devices/system/cpu/cpu${index}/cpufreq/scaling_*_freq"
- *
- * For temperature checks (index=0, or 1, etc.):
- * "cat /sys/class/thermal/thermal_zone${index}/temp"
- *
- * Another way to avoid thermal throttling is to fix the CPU frequency
- * at a lower level which prevents excessive temperatures.
- */
- const int trials = 4;
- const int looplimit = 4;
- timespec start, end;
- int64_t time = 0;
-
- for (int n = 0; n < trials; ++n) {
- clock_gettime(CLOCK_MONOTONIC, &start);
- for (int i = 0; i < looplimit; ++i) {
- resampler->resample((int*) output_vaddr, output_frames, &provider);
- provider.reset(); // during benchmarking reset only the provider
- }
- clock_gettime(CLOCK_MONOTONIC, &end);
- int64_t start_ns = start.tv_sec * 1000000000LL + start.tv_nsec;
- int64_t end_ns = end.tv_sec * 1000000000LL + end.tv_nsec;
- int64_t diff_ns = end_ns - start_ns;
- if (n == 0 || diff_ns < time) {
- time = diff_ns; // save the best out of our trials.
- }
- }
- // Mfrms/s is "Millions of output frames per second".
- printf("quality: %d channels: %d msec: %" PRId64 " Mfrms/s: %.2lf\n",
- quality, channels, time/1000000, output_frames * looplimit / (time / 1e9) / 1e6);
- resampler->reset();
-
- // TODO fix legacy bug: reset does not clear buffers.
- // delete and recreate resampler here.
- delete resampler;
- resampler = AudioResampler::create(format, channels,
- output_freq, quality);
- resampler->setSampleRate(input_freq);
- resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT);
- }
-
- memset(output_vaddr, 0, output_size);
- if (gVerbose) {
- printf("resample() %zu output frames\n", output_frames);
- }
- if (Ovalues.isEmpty()) {
- Ovalues.push(output_frames);
- }
- for (size_t i = 0, j = 0; i < output_frames; ) {
- size_t thisFrames = Ovalues[j++];
- if (j >= Ovalues.size()) {
- j = 0;
- }
- if (thisFrames == 0 || thisFrames > output_frames - i) {
- thisFrames = output_frames - i;
- }
- resampler->resample((int*) output_vaddr + output_channels*i, thisFrames, &provider);
- i += thisFrames;
- }
- if (gVerbose) {
- printf("resample() complete\n");
- }
- resampler->reset();
- if (gVerbose) {
- printf("reset() complete\n");
- }
- delete resampler;
- resampler = NULL;
-
- // For float processing, convert output format from float to Q4.27,
- // which is then converted to int16_t for final storage.
- if (useFloat) {
- memcpy_to_q4_27_from_float(reinterpret_cast<int32_t*>(output_vaddr),
- reinterpret_cast<float*>(output_vaddr), output_frames * output_channels);
- }
-
- // mono takes left channel only (out of stereo output pair)
- // stereo and multichannel preserve all channels.
- int32_t* out = (int32_t*) output_vaddr;
- int16_t* convert = (int16_t*) malloc(output_frames * channels * sizeof(int16_t));
-
- const int volumeShift = 12; // shift requirement for Q4.27 to Q.15
- // round to half towards zero and saturate at int16 (non-dithered)
- const int roundVal = (1<<(volumeShift-1)) - 1; // volumePrecision > 0
-
- for (size_t i = 0; i < output_frames; i++) {
- for (int j = 0; j < channels; j++) {
- int32_t s = out[i * output_channels + j] + roundVal; // add offset here
- if (s < 0) {
- s = (s + 1) >> volumeShift; // round to 0
- if (s < -32768) {
- s = -32768;
- }
- } else {
- s = s >> volumeShift;
- if (s > 32767) {
- s = 32767;
- }
- }
- convert[i * channels + j] = int16_t(s);
- }
- }
-
- // write output to disk
- SF_INFO info;
- info.frames = 0;
- info.samplerate = output_freq;
- info.channels = channels;
- info.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
- SNDFILE *sf = sf_open(file_out, SFM_WRITE, &info);
- if (sf == NULL) {
- perror(file_out);
- return EXIT_FAILURE;
- }
- (void) sf_writef_short(sf, convert, output_frames);
- sf_close(sf);
-
- return EXIT_SUCCESS;
-}
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
deleted file mode 100644
index 3505e0f..0000000
--- a/services/audioflinger/tests/Android.mk
+++ /dev/null
@@ -1,68 +0,0 @@
-# Build the unit tests for audioflinger
-
-#
-# resampler unit test
-#
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_SHARED_LIBRARIES := \
- liblog \
- libutils \
- libcutils \
- libaudioutils \
- libaudioresampler
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/services/audioflinger
-
-LOCAL_SRC_FILES := \
- resampler_tests.cpp
-
-LOCAL_MODULE := resampler_tests
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_NATIVE_TEST)
-
-#
-# audio mixer test tool
-#
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- test-mixer.cpp \
- ../AudioMixer.cpp.arm \
- ../BufferProviders.cpp
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-effects) \
- $(call include-path-for, audio-utils) \
- frameworks/av/services/audioflinger \
- external/sonic
-
-LOCAL_STATIC_LIBRARIES := \
- libsndfile
-
-LOCAL_SHARED_LIBRARIES := \
- libeffects \
- libnbaio \
- libaudioresampler \
- libaudioutils \
- libdl \
- libcutils \
- libutils \
- liblog \
- libsonic
-
-LOCAL_MODULE:= test-mixer
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_CXX_STL := libc++
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_EXECUTABLE)
diff --git a/services/audioflinger/tests/README b/services/audioflinger/tests/README
deleted file mode 100644
index 508e960..0000000
--- a/services/audioflinger/tests/README
+++ /dev/null
@@ -1,13 +0,0 @@
-For libsonic dependency:
-pushd external/sonic
-mm
-popd
-
-To build resampler library:
-pushd ..
-Optionally uncomment USE_NEON=false in Android.mk
-mm
-popd
-
-Then build here:
-mm
diff --git a/services/audioflinger/tests/build_and_run_all_unit_tests.sh b/services/audioflinger/tests/build_and_run_all_unit_tests.sh
deleted file mode 100755
index 7f4d456..0000000
--- a/services/audioflinger/tests/build_and_run_all_unit_tests.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-if [ -z "$ANDROID_BUILD_TOP" ]; then
- echo "Android build environment not set"
- exit -1
-fi
-
-# ensure we have mm
-. $ANDROID_BUILD_TOP/build/envsetup.sh
-
-pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/
-pwd
-mm
-
-echo "waiting for device"
-adb root && adb wait-for-device remount
-adb push $OUT/system/lib/libaudioresampler.so /system/lib
-adb push $OUT/data/nativetest/resampler_tests /system/bin
-
-sh $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/tests/run_all_unit_tests.sh
-
-popd
diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh
deleted file mode 100755
index d0482a1..0000000
--- a/services/audioflinger/tests/mixer_to_wav_tests.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash
-#
-# This script uses test-mixer to generate WAV files
-# for evaluation of the AudioMixer component.
-#
-# Sine and chirp signals are used for input because they
-# show up as clear lines, either horizontal or diagonal,
-# on a spectrogram. This means easy verification of multiple
-# track mixing.
-#
-# After execution, look for created subdirectories like
-# mixer_i_i
-# mixer_i_f
-# mixer_f_f
-#
-# Recommend using a program such as audacity to evaluate
-# the output WAV files, e.g.
-#
-# cd testdir
-# audacity *.wav
-#
-# Using Audacity:
-#
-# Under "Waveform" view mode you can zoom into the
-# start of the WAV file to verify proper ramping.
-#
-# Select "Spectrogram" to see verify the lines
-# (sine = horizontal, chirp = diagonal) which should
-# be clear (except for around the start as the volume
-# ramping causes spectral distortion).
-
-if [ -z "$ANDROID_BUILD_TOP" ]; then
- echo "Android build environment not set"
- exit -1
-fi
-
-# ensure we have mm
-. $ANDROID_BUILD_TOP/build/envsetup.sh
-
-pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/
-
-# build
-pwd
-mm
-
-# send to device
-echo "waiting for device"
-adb root && adb wait-for-device remount
-adb push $OUT/system/lib/libaudioresampler.so /system/lib
-adb push $OUT/system/bin/test-mixer /system/bin
-
-# createwav creates a series of WAV files testing various
-# mixer settings
-# $1 = flags
-# $2 = directory
-function createwav() {
-# create directory if it doesn't exist
- if [ ! -d $2 ]; then
- mkdir $2
- fi
-
-# Test:
-# process__genericResampling with mixed integer and float track input
-# track__Resample / track__genericResample
- adb shell test-mixer $1 -s 48000 \
- -o /sdcard/tm48000grif.wav \
- sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
- sine:f,6,6000,19000 chirp:i,4,30000
- adb pull /sdcard/tm48000grif.wav $2
-
-# Test:
-# process__genericResampling
-# track__Resample / track__genericResample
- adb shell test-mixer $1 -s 48000 \
- -o /sdcard/tm48000gr.wav \
- sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
- sine:6,6000,19000
- adb pull /sdcard/tm48000gr.wav $2
-
-# Test:
-# process__genericResample
-# track__Resample / track__genericResample
-# track__NoResample / track__16BitsStereo / track__16BitsMono
-# Aux buffer
- adb shell test-mixer $1 -c 5 -s 9307 \
- -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \
- sine:4,1000,3000 sine:1,2000,9307 chirp:3,9307
- adb pull /sdcard/tm9307gra.wav $2
- adb pull /sdcard/aux9307gra.wav $2
-
-# Test:
-# process__genericNoResampling
-# track__NoResample / track__16BitsStereo / track__16BitsMono
- adb shell test-mixer $1 -s 32000 \
- -o /sdcard/tm32000gnr.wav \
- sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000
- adb pull /sdcard/tm32000gnr.wav $2
-
-# Test:
-# process__genericNoResampling
-# track__NoResample / track__16BitsStereo / track__16BitsMono
-# Aux buffer
- adb shell test-mixer $1 -s 32000 \
- -a /sdcard/aux32000gnra.wav -o /sdcard/tm32000gnra.wav \
- sine:2,1000,32000 chirp:2,32000 sine:1,3000,32000
- adb pull /sdcard/tm32000gnra.wav $2
- adb pull /sdcard/aux32000gnra.wav $2
-
-# Test:
-# process__NoResampleOneTrack / process__OneTrack16BitsStereoNoResampling
-# Downmixer
- adb shell test-mixer $1 -s 32000 \
- -o /sdcard/tm32000nrot.wav \
- sine:6,1000,32000
- adb pull /sdcard/tm32000nrot.wav $2
-
-# Test:
-# process__NoResampleOneTrack / OneTrack16BitsStereoNoResampling
-# Aux buffer
- adb shell test-mixer $1 -s 44100 \
- -a /sdcard/aux44100nrota.wav -o /sdcard/tm44100nrota.wav \
- sine:2,2000,44100
- adb pull /sdcard/tm44100nrota.wav $2
- adb pull /sdcard/aux44100nrota.wav $2
-}
-
-#
-# Call createwav to generate WAV files in various combinations
-#
-# i_i = integer input track, integer mixer output
-# f_f = float input track, float mixer output
-# i_f = integer input track, float_mixer output
-#
-# If the mixer output is float, then the output WAV file is pcm float.
-#
-# TODO: create a "snr" like "diff" to automatically
-# compare files in these directories together.
-#
-
-createwav "" "tests/mixer_i_i"
-createwav "-f -m" "tests/mixer_f_f"
-createwav "-m" "tests/mixer_i_f"
-
-popd
diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp
deleted file mode 100644
index b0d384d..0000000
--- a/services/audioflinger/tests/resampler_tests.cpp
+++ /dev/null
@@ -1,417 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "audioflinger_resampler_tests"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <time.h>
-#include <unistd.h>
-
-#include <iostream>
-#include <utility>
-#include <vector>
-
-#include <gtest/gtest.h>
-#include <log/log.h>
-#include <media/AudioBufferProvider.h>
-
-#include "AudioResampler.h"
-#include "test_utils.h"
-
-void resample(int channels, void *output,
- size_t outputFrames, const std::vector<size_t> &outputIncr,
- android::AudioBufferProvider *provider, android::AudioResampler *resampler)
-{
- for (size_t i = 0, j = 0; i < outputFrames; ) {
- size_t thisFrames = outputIncr[j++];
- if (j >= outputIncr.size()) {
- j = 0;
- }
- if (thisFrames == 0 || thisFrames > outputFrames - i) {
- thisFrames = outputFrames - i;
- }
- size_t framesResampled = resampler->resample(
- (int32_t*) output + channels*i, thisFrames, provider);
- // we should have enough buffer space, so there is no short count.
- ASSERT_EQ(thisFrames, framesResampled);
- i += thisFrames;
- }
-}
-
-void buffercmp(const void *reference, const void *test,
- size_t outputFrameSize, size_t outputFrames)
-{
- for (size_t i = 0; i < outputFrames; ++i) {
- int check = memcmp((const char*)reference + i * outputFrameSize,
- (const char*)test + i * outputFrameSize, outputFrameSize);
- if (check) {
- ALOGE("Failure at frame %zu", i);
- ASSERT_EQ(check, 0); /* fails */
- }
- }
-}
-
-void testBufferIncrement(size_t channels, bool useFloat,
- unsigned inputFreq, unsigned outputFreq,
- enum android::AudioResampler::src_quality quality)
-{
- const audio_format_t format = useFloat ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
- // create the provider
- std::vector<int> inputIncr;
- SignalProvider provider;
- if (useFloat) {
- provider.setChirp<float>(channels,
- 0., outputFreq/2., outputFreq, outputFreq/2000.);
- } else {
- provider.setChirp<int16_t>(channels,
- 0., outputFreq/2., outputFreq, outputFreq/2000.);
- }
- provider.setIncr(inputIncr);
-
- // calculate the output size
- size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
- size_t outputFrameSize = channels * (useFloat ? sizeof(float) : sizeof(int32_t));
- size_t outputSize = outputFrameSize * outputFrames;
- outputSize &= ~7;
-
- // create the resampler
- android::AudioResampler* resampler;
-
- resampler = android::AudioResampler::create(format, channels, outputFreq, quality);
- resampler->setSampleRate(inputFreq);
- resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
- android::AudioResampler::UNITY_GAIN_FLOAT);
-
- // set up the reference run
- std::vector<size_t> refIncr;
- refIncr.push_back(outputFrames);
- void* reference = malloc(outputSize);
- resample(channels, reference, outputFrames, refIncr, &provider, resampler);
-
- provider.reset();
-
-#if 0
- /* this test will fail - API interface issue: reset() does not clear internal buffers */
- resampler->reset();
-#else
- delete resampler;
- resampler = android::AudioResampler::create(format, channels, outputFreq, quality);
- resampler->setSampleRate(inputFreq);
- resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
- android::AudioResampler::UNITY_GAIN_FLOAT);
-#endif
-
- // set up the test run
- std::vector<size_t> outIncr;
- outIncr.push_back(1);
- outIncr.push_back(2);
- outIncr.push_back(3);
- void* test = malloc(outputSize);
- inputIncr.push_back(1);
- inputIncr.push_back(3);
- provider.setIncr(inputIncr);
- resample(channels, test, outputFrames, outIncr, &provider, resampler);
-
- // check
- buffercmp(reference, test, outputFrameSize, outputFrames);
-
- free(reference);
- free(test);
- delete resampler;
-}
-
-template <typename T>
-inline double sqr(T v)
-{
- double dv = static_cast<double>(v);
- return dv * dv;
-}
-
-template <typename T>
-double signalEnergy(T *start, T *end, unsigned stride)
-{
- double accum = 0;
-
- for (T *p = start; p < end; p += stride) {
- accum += sqr(*p);
- }
- unsigned count = (end - start + stride - 1) / stride;
- return accum / count;
-}
-
-// TI = resampler input type, int16_t or float
-// TO = resampler output type, int32_t or float
-template <typename TI, typename TO>
-void testStopbandDownconversion(size_t channels,
- unsigned inputFreq, unsigned outputFreq,
- unsigned passband, unsigned stopband,
- enum android::AudioResampler::src_quality quality)
-{
- // create the provider
- std::vector<int> inputIncr;
- SignalProvider provider;
- provider.setChirp<TI>(channels,
- 0., inputFreq/2., inputFreq, inputFreq/2000.);
- provider.setIncr(inputIncr);
-
- // calculate the output size
- size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
- size_t outputFrameSize = channels * sizeof(TO);
- size_t outputSize = outputFrameSize * outputFrames;
- outputSize &= ~7;
-
- // create the resampler
- android::AudioResampler* resampler;
-
- resampler = android::AudioResampler::create(
- is_same<TI, int16_t>::value ? AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_FLOAT,
- channels, outputFreq, quality);
- resampler->setSampleRate(inputFreq);
- resampler->setVolume(android::AudioResampler::UNITY_GAIN_FLOAT,
- android::AudioResampler::UNITY_GAIN_FLOAT);
-
- // set up the reference run
- std::vector<size_t> refIncr;
- refIncr.push_back(outputFrames);
- void* reference = malloc(outputSize);
- resample(channels, reference, outputFrames, refIncr, &provider, resampler);
-
- TO *out = reinterpret_cast<TO *>(reference);
-
- // check signal energy in passband
- const unsigned passbandFrame = passband * outputFreq / 1000.;
- const unsigned stopbandFrame = stopband * outputFreq / 1000.;
-
- // check each channel separately
- for (size_t i = 0; i < channels; ++i) {
- double passbandEnergy = signalEnergy(out, out + passbandFrame * channels, channels);
- double stopbandEnergy = signalEnergy(out + stopbandFrame * channels,
- out + outputFrames * channels, channels);
- double dbAtten = -10. * log10(stopbandEnergy / passbandEnergy);
- ASSERT_GT(dbAtten, 60.);
-
-#if 0
- // internal verification
- printf("if:%d of:%d pbf:%d sbf:%d sbe: %f pbe: %f db: %.2f\n",
- provider.getNumFrames(), outputFrames,
- passbandFrame, stopbandFrame, stopbandEnergy, passbandEnergy, dbAtten);
- for (size_t i = 0; i < 10; ++i) {
- std::cout << out[i+passbandFrame*channels] << std::endl;
- }
- for (size_t i = 0; i < 10; ++i) {
- std::cout << out[i+stopbandFrame*channels] << std::endl;
- }
-#endif
- }
-
- free(reference);
- delete resampler;
-}
-
-/* Buffer increment test
- *
- * We compare a reference output, where we consume and process the entire
- * buffer at a time, and a test output, where we provide small chunks of input
- * data and process small chunks of output (which may not be equivalent in size).
- *
- * Two subtests - fixed phase (3:2 down) and interpolated phase (147:320 up)
- */
-TEST(audioflinger_resampler, bufferincrement_fixedphase) {
- // all of these work
- static const enum android::AudioResampler::src_quality kQualityArray[] = {
- android::AudioResampler::LOW_QUALITY,
- android::AudioResampler::MED_QUALITY,
- android::AudioResampler::HIGH_QUALITY,
- android::AudioResampler::VERY_HIGH_QUALITY,
- android::AudioResampler::DYN_LOW_QUALITY,
- android::AudioResampler::DYN_MED_QUALITY,
- android::AudioResampler::DYN_HIGH_QUALITY,
- };
-
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testBufferIncrement(2, false, 48000, 32000, kQualityArray[i]);
- }
-}
-
-TEST(audioflinger_resampler, bufferincrement_interpolatedphase) {
- // all of these work except low quality
- static const enum android::AudioResampler::src_quality kQualityArray[] = {
-// android::AudioResampler::LOW_QUALITY,
- android::AudioResampler::MED_QUALITY,
- android::AudioResampler::HIGH_QUALITY,
- android::AudioResampler::VERY_HIGH_QUALITY,
- android::AudioResampler::DYN_LOW_QUALITY,
- android::AudioResampler::DYN_MED_QUALITY,
- android::AudioResampler::DYN_HIGH_QUALITY,
- };
-
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testBufferIncrement(2, false, 22050, 48000, kQualityArray[i]);
- }
-}
-
-TEST(audioflinger_resampler, bufferincrement_fixedphase_multi) {
- // only dynamic quality
- static const enum android::AudioResampler::src_quality kQualityArray[] = {
- android::AudioResampler::DYN_LOW_QUALITY,
- android::AudioResampler::DYN_MED_QUALITY,
- android::AudioResampler::DYN_HIGH_QUALITY,
- };
-
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testBufferIncrement(4, false, 48000, 32000, kQualityArray[i]);
- }
-}
-
-TEST(audioflinger_resampler, bufferincrement_interpolatedphase_multi_float) {
- // only dynamic quality
- static const enum android::AudioResampler::src_quality kQualityArray[] = {
- android::AudioResampler::DYN_LOW_QUALITY,
- android::AudioResampler::DYN_MED_QUALITY,
- android::AudioResampler::DYN_HIGH_QUALITY,
- };
-
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testBufferIncrement(8, true, 22050, 48000, kQualityArray[i]);
- }
-}
-
-/* Simple aliasing test
- *
- * This checks stopband response of the chirp signal to make sure frequencies
- * are properly suppressed. It uses downsampling because the stopband can be
- * clearly isolated by input frequencies exceeding the output sample rate (nyquist).
- */
-TEST(audioflinger_resampler, stopbandresponse_integer) {
- // not all of these may work (old resamplers fail on downsampling)
- static const enum android::AudioResampler::src_quality kQualityArray[] = {
- //android::AudioResampler::LOW_QUALITY,
- //android::AudioResampler::MED_QUALITY,
- //android::AudioResampler::HIGH_QUALITY,
- //android::AudioResampler::VERY_HIGH_QUALITY,
- android::AudioResampler::DYN_LOW_QUALITY,
- android::AudioResampler::DYN_MED_QUALITY,
- android::AudioResampler::DYN_HIGH_QUALITY,
- };
-
- // in this test we assume a maximum transition band between 12kHz and 20kHz.
- // there must be at least 60dB relative attenuation between stopband and passband.
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testStopbandDownconversion<int16_t, int32_t>(
- 2, 48000, 32000, 12000, 20000, kQualityArray[i]);
- }
-
- // in this test we assume a maximum transition band between 7kHz and 15kHz.
- // there must be at least 60dB relative attenuation between stopband and passband.
- // (the weird ratio triggers interpolative resampling)
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testStopbandDownconversion<int16_t, int32_t>(
- 2, 48000, 22101, 7000, 15000, kQualityArray[i]);
- }
-}
-
-TEST(audioflinger_resampler, stopbandresponse_integer_multichannel) {
- // not all of these may work (old resamplers fail on downsampling)
- static const enum android::AudioResampler::src_quality kQualityArray[] = {
- //android::AudioResampler::LOW_QUALITY,
- //android::AudioResampler::MED_QUALITY,
- //android::AudioResampler::HIGH_QUALITY,
- //android::AudioResampler::VERY_HIGH_QUALITY,
- android::AudioResampler::DYN_LOW_QUALITY,
- android::AudioResampler::DYN_MED_QUALITY,
- android::AudioResampler::DYN_HIGH_QUALITY,
- };
-
- // in this test we assume a maximum transition band between 12kHz and 20kHz.
- // there must be at least 60dB relative attenuation between stopband and passband.
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testStopbandDownconversion<int16_t, int32_t>(
- 8, 48000, 32000, 12000, 20000, kQualityArray[i]);
- }
-
- // in this test we assume a maximum transition band between 7kHz and 15kHz.
- // there must be at least 60dB relative attenuation between stopband and passband.
- // (the weird ratio triggers interpolative resampling)
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testStopbandDownconversion<int16_t, int32_t>(
- 8, 48000, 22101, 7000, 15000, kQualityArray[i]);
- }
-}
-
-TEST(audioflinger_resampler, stopbandresponse_float) {
- // not all of these may work (old resamplers fail on downsampling)
- static const enum android::AudioResampler::src_quality kQualityArray[] = {
- //android::AudioResampler::LOW_QUALITY,
- //android::AudioResampler::MED_QUALITY,
- //android::AudioResampler::HIGH_QUALITY,
- //android::AudioResampler::VERY_HIGH_QUALITY,
- android::AudioResampler::DYN_LOW_QUALITY,
- android::AudioResampler::DYN_MED_QUALITY,
- android::AudioResampler::DYN_HIGH_QUALITY,
- };
-
- // in this test we assume a maximum transition band between 12kHz and 20kHz.
- // there must be at least 60dB relative attenuation between stopband and passband.
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testStopbandDownconversion<float, float>(
- 2, 48000, 32000, 12000, 20000, kQualityArray[i]);
- }
-
- // in this test we assume a maximum transition band between 7kHz and 15kHz.
- // there must be at least 60dB relative attenuation between stopband and passband.
- // (the weird ratio triggers interpolative resampling)
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testStopbandDownconversion<float, float>(
- 2, 48000, 22101, 7000, 15000, kQualityArray[i]);
- }
-}
-
-TEST(audioflinger_resampler, stopbandresponse_float_multichannel) {
- // not all of these may work (old resamplers fail on downsampling)
- static const enum android::AudioResampler::src_quality kQualityArray[] = {
- //android::AudioResampler::LOW_QUALITY,
- //android::AudioResampler::MED_QUALITY,
- //android::AudioResampler::HIGH_QUALITY,
- //android::AudioResampler::VERY_HIGH_QUALITY,
- android::AudioResampler::DYN_LOW_QUALITY,
- android::AudioResampler::DYN_MED_QUALITY,
- android::AudioResampler::DYN_HIGH_QUALITY,
- };
-
- // in this test we assume a maximum transition band between 12kHz and 20kHz.
- // there must be at least 60dB relative attenuation between stopband and passband.
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testStopbandDownconversion<float, float>(
- 8, 48000, 32000, 12000, 20000, kQualityArray[i]);
- }
-
- // in this test we assume a maximum transition band between 7kHz and 15kHz.
- // there must be at least 60dB relative attenuation between stopband and passband.
- // (the weird ratio triggers interpolative resampling)
- for (size_t i = 0; i < ARRAY_SIZE(kQualityArray); ++i) {
- testStopbandDownconversion<float, float>(
- 8, 48000, 22101, 7000, 15000, kQualityArray[i]);
- }
-}
-
diff --git a/services/audioflinger/tests/run_all_unit_tests.sh b/services/audioflinger/tests/run_all_unit_tests.sh
deleted file mode 100755
index 113f39e..0000000
--- a/services/audioflinger/tests/run_all_unit_tests.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-if [ -z "$ANDROID_BUILD_TOP" ]; then
- echo "Android build environment not set"
- exit -1
-fi
-
-echo "waiting for device"
-adb root && adb wait-for-device remount
-
-#adb shell /system/bin/resampler_tests
-adb shell /data/nativetest/resampler_tests/resampler_tests
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
deleted file mode 100644
index 65e22da..0000000
--- a/services/audioflinger/tests/test-mixer.cpp
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdio.h>
-#include <inttypes.h>
-#include <math.h>
-#include <vector>
-#include <audio_utils/primitives.h>
-#include <audio_utils/sndfile.h>
-#include <media/AudioBufferProvider.h>
-#include "AudioMixer.h"
-#include "test_utils.h"
-
-/* Testing is typically through creation of an output WAV file from several
- * source inputs, to be later analyzed by an audio program such as Audacity.
- *
- * Sine or chirp functions are typically more useful as input to the mixer
- * as they show up as straight lines on a spectrogram if successfully mixed.
- *
- * A sample shell script is provided: mixer_to_wave_tests.sh
- */
-
-using namespace android;
-
-static void usage(const char* name) {
- fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]"
- " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
- " (<input-file> | <command>)+\n", name);
- fprintf(stderr, " -f enable floating point input track by default\n");
- fprintf(stderr, " -m enable floating point mixer output\n");
- fprintf(stderr, " -c number of mixer output channels\n");
- fprintf(stderr, " -s mixer sample-rate\n");
- fprintf(stderr, " -o <output-file> WAV file, pcm16 (or float if -m specified)\n");
- fprintf(stderr, " -a <aux-buffer-file>\n");
- fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n");
- fprintf(stderr, " <input-file> is a WAV file\n");
- fprintf(stderr, " <command> can be 'sine:[(i|f),]<channels>,<frequency>,<samplerate>'\n");
- fprintf(stderr, " 'chirp:[(i|f),]<channels>,<samplerate>'\n");
-}
-
-static int writeFile(const char *filename, const void *buffer,
- uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) {
- if (filename == NULL) {
- return 0; // ok to pass in NULL filename
- }
- // write output to file.
- SF_INFO info;
- info.frames = 0;
- info.samplerate = sampleRate;
- info.channels = channels;
- info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16);
- printf("saving file:%s channels:%u samplerate:%u frames:%zu\n",
- filename, info.channels, info.samplerate, frames);
- SNDFILE *sf = sf_open(filename, SFM_WRITE, &info);
- if (sf == NULL) {
- perror(filename);
- return EXIT_FAILURE;
- }
- if (isBufferFloat) {
- (void) sf_writef_float(sf, (float*)buffer, frames);
- } else {
- (void) sf_writef_short(sf, (short*)buffer, frames);
- }
- sf_close(sf);
- return EXIT_SUCCESS;
-}
-
-const char *parseFormat(const char *s, bool *useFloat) {
- if (!strncmp(s, "f,", 2)) {
- *useFloat = true;
- return s + 2;
- }
- if (!strncmp(s, "i,", 2)) {
- *useFloat = false;
- return s + 2;
- }
- return s;
-}
-
-int main(int argc, char* argv[]) {
- const char* const progname = argv[0];
- bool useInputFloat = false;
- bool useMixerFloat = false;
- bool useRamp = true;
- uint32_t outputSampleRate = 48000;
- uint32_t outputChannels = 2; // stereo for now
- std::vector<int> Pvalues;
- const char* outputFilename = NULL;
- const char* auxFilename = NULL;
- std::vector<int32_t> names;
- std::vector<SignalProvider> providers;
- std::vector<audio_format_t> formats;
-
- for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
- switch (ch) {
- case 'f':
- useInputFloat = true;
- break;
- case 'm':
- useMixerFloat = true;
- break;
- case 'c':
- outputChannels = atoi(optarg);
- break;
- case 's':
- outputSampleRate = atoi(optarg);
- break;
- case 'o':
- outputFilename = optarg;
- break;
- case 'a':
- auxFilename = optarg;
- break;
- case 'P':
- if (parseCSV(optarg, Pvalues) < 0) {
- fprintf(stderr, "incorrect syntax for -P option\n");
- return EXIT_FAILURE;
- }
- break;
- case '?':
- default:
- usage(progname);
- return EXIT_FAILURE;
- }
- }
- argc -= optind;
- argv += optind;
-
- if (argc == 0) {
- usage(progname);
- return EXIT_FAILURE;
- }
- if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) {
- fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS);
- return EXIT_FAILURE;
- }
-
- size_t outputFrames = 0;
-
- // create providers for each track
- names.resize(argc);
- providers.resize(argc);
- formats.resize(argc);
- for (int i = 0; i < argc; ++i) {
- static const char chirp[] = "chirp:";
- static const char sine[] = "sine:";
- static const double kSeconds = 1;
- bool useFloat = useInputFloat;
-
- if (!strncmp(argv[i], chirp, strlen(chirp))) {
- std::vector<int> v;
- const char *s = parseFormat(argv[i] + strlen(chirp), &useFloat);
-
- parseCSV(s, v);
- if (v.size() == 2) {
- printf("creating chirp(%d %d)\n", v[0], v[1]);
- if (useFloat) {
- providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
- formats[i] = AUDIO_FORMAT_PCM_FLOAT;
- } else {
- providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
- formats[i] = AUDIO_FORMAT_PCM_16_BIT;
- }
- providers[i].setIncr(Pvalues);
- } else {
- fprintf(stderr, "malformed input '%s'\n", argv[i]);
- }
- } else if (!strncmp(argv[i], sine, strlen(sine))) {
- std::vector<int> v;
- const char *s = parseFormat(argv[i] + strlen(sine), &useFloat);
-
- parseCSV(s, v);
- if (v.size() == 3) {
- printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
- if (useFloat) {
- providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
- formats[i] = AUDIO_FORMAT_PCM_FLOAT;
- } else {
- providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
- formats[i] = AUDIO_FORMAT_PCM_16_BIT;
- }
- providers[i].setIncr(Pvalues);
- } else {
- fprintf(stderr, "malformed input '%s'\n", argv[i]);
- }
- } else {
- printf("creating filename(%s)\n", argv[i]);
- if (useInputFloat) {
- providers[i].setFile<float>(argv[i]);
- formats[i] = AUDIO_FORMAT_PCM_FLOAT;
- } else {
- providers[i].setFile<short>(argv[i]);
- formats[i] = AUDIO_FORMAT_PCM_16_BIT;
- }
- providers[i].setIncr(Pvalues);
- }
- // calculate the number of output frames
- size_t nframes = (int64_t) providers[i].getNumFrames() * outputSampleRate
- / providers[i].getSampleRate();
- if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames
- outputFrames = nframes;
- }
- }
-
- // create the output buffer.
- const size_t outputFrameSize = outputChannels
- * (useMixerFloat ? sizeof(float) : sizeof(int16_t));
- const size_t outputSize = outputFrames * outputFrameSize;
- const audio_channel_mask_t outputChannelMask =
- audio_channel_out_mask_from_count(outputChannels);
- void *outputAddr = NULL;
- (void) posix_memalign(&outputAddr, 32, outputSize);
- memset(outputAddr, 0, outputSize);
-
- // create the aux buffer, if needed.
- const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always
- const size_t auxSize = outputFrames * auxFrameSize;
- void *auxAddr = NULL;
- if (auxFilename) {
- (void) posix_memalign(&auxAddr, 32, auxSize);
- memset(auxAddr, 0, auxSize);
- }
-
- // create the mixer.
- const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960
- AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate);
- audio_format_t mixerFormat = useMixerFloat
- ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
- float f = AudioMixer::UNITY_GAIN_FLOAT / providers.size(); // normalize volume by # tracks
- static float f0; // zero
-
- // set up the tracks.
- for (size_t i = 0; i < providers.size(); ++i) {
- //printf("track %d out of %d\n", i, providers.size());
- uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
- int32_t name = mixer->getTrackName(channelMask,
- formats[i], AUDIO_SESSION_OUTPUT_MIX);
- ALOG_ASSERT(name >= 0);
- names[i] = name;
- mixer->setBufferProvider(name, &providers[i]);
- mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
- (void *)outputAddr);
- mixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::MIXER_FORMAT,
- (void *)(uintptr_t)mixerFormat);
- mixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::FORMAT,
- (void *)(uintptr_t)formats[i]);
- mixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::MIXER_CHANNEL_MASK,
- (void *)(uintptr_t)outputChannelMask);
- mixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::CHANNEL_MASK,
- (void *)(uintptr_t)channelMask);
- mixer->setParameter(
- name,
- AudioMixer::RESAMPLE,
- AudioMixer::SAMPLE_RATE,
- (void *)(uintptr_t)providers[i].getSampleRate());
- if (useRamp) {
- mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0);
- mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0);
- mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f);
- mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f);
- } else {
- mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
- mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
- }
- if (auxFilename) {
- mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
- (void *) auxAddr);
- mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0);
- mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f);
- }
- mixer->enable(name);
- }
-
- // pump the mixer to process data.
- size_t i;
- for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) {
- for (size_t j = 0; j < names.size(); ++j) {
- mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
- (char *) outputAddr + i * outputFrameSize);
- if (auxFilename) {
- mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
- (char *) auxAddr + i * auxFrameSize);
- }
- }
- mixer->process();
- }
- outputFrames = i; // reset output frames to the data actually produced.
-
- // write to files
- writeFile(outputFilename, outputAddr,
- outputSampleRate, outputChannels, outputFrames, useMixerFloat);
- if (auxFilename) {
- // Aux buffer is always in q4_27 format for now.
- // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count)
- ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1);
- writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false);
- }
-
- delete mixer;
- free(outputAddr);
- free(auxAddr);
- return EXIT_SUCCESS;
-}
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 9f6b300..ad340e5 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -4,23 +4,12 @@
LOCAL_SRC_FILES:= \
service/AudioPolicyService.cpp \
- service/AudioPolicyEffects.cpp
-
-ifeq ($(USE_LEGACY_AUDIO_POLICY), 1)
-LOCAL_SRC_FILES += \
- service/AudioPolicyInterfaceImplLegacy.cpp \
- service/AudioPolicyClientImplLegacy.cpp
-
- LOCAL_CFLAGS += -DUSE_LEGACY_AUDIO_POLICY
-else
-LOCAL_SRC_FILES += \
+ service/AudioPolicyEffects.cpp \
service/AudioPolicyInterfaceImpl.cpp \
service/AudioPolicyClientImpl.cpp
-endif
LOCAL_C_INCLUDES := \
frameworks/av/services/audioflinger \
- $(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils) \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engine/interface \
@@ -33,15 +22,11 @@
libbinder \
libaudioclient \
libhardware_legacy \
- libserviceutility
-
-ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
-LOCAL_SHARED_LIBRARIES += \
- libaudiopolicymanager
-endif
+ libserviceutility \
+ libaudiopolicymanager \
+ libmedia_helper
LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
libaudiopolicycomponents
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -53,8 +38,6 @@
include $(BUILD_SHARED_LIBRARY)
-ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
-
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= managerdefault/AudioPolicyManager.cpp
@@ -91,13 +74,12 @@
frameworks/av/services/audiopolicy/utilities
LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
libaudiopolicycomponents
-ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
-LOCAL_STATIC_LIBRARIES += libxml2
+LOCAL_SHARED_LIBRARIES += libmedia_helper
-LOCAL_SHARED_LIBRARIES += libicuuc
+ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+LOCAL_SHARED_LIBRARIES += libicuuc libxml2
LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
@@ -136,7 +118,6 @@
include $(BUILD_SHARED_LIBRARY)
endif
-endif
#######################################################################
# Recursive call sub-folder Android.mk
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index d1b86da..60ed1d6 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -21,8 +21,6 @@
#include <media/AudioPolicy.h>
#include <utils/String8.h>
-#include <hardware/audio_policy.h>
-
namespace android {
// ----------------------------------------------------------------------------
@@ -67,6 +65,16 @@
API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
} input_type_t;
+ enum {
+ API_INPUT_CONCURRENCY_NONE = 0,
+ API_INPUT_CONCURRENCY_CALL = (1 << 0), // Concurrency with a call
+ API_INPUT_CONCURRENCY_CAPTURE = (1 << 1), // Concurrency with another capture
+
+ API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_CALL | API_INPUT_CONCURRENCY_CAPTURE),
+ };
+
+ typedef uint32_t concurrency_type__mask_t;
+
public:
virtual ~AudioPolicyInterface() {}
//
@@ -112,12 +120,10 @@
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_t *config,
audio_output_flags_t flags,
int selectedDeviceId,
- const audio_offload_info_t *offloadInfo) = 0;
+ audio_port_handle_t *portId) = 0;
// indicates to the audio policy manager that the output starts being used by corresponding stream.
virtual status_t startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
@@ -136,15 +142,15 @@
audio_io_handle_t *input,
audio_session_t session,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t selectedDeviceId,
- input_type_t *inputType) = 0;
+ input_type_t *inputType,
+ audio_port_handle_t *portId) = 0;
// indicates to the audio policy manager that the input starts being used.
virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session) = 0;
+ audio_session_t session,
+ concurrency_type__mask_t *concurrency) = 0;
// indicates to the audio policy manager that the input stops being used.
virtual status_t stopInput(audio_io_handle_t input,
audio_session_t session) = 0;
@@ -229,9 +235,9 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_io_handle_t *handle,
+ audio_patch_handle_t *handle,
uid_t uid) = 0;
- virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
+ virtual status_t stopAudioSource(audio_patch_handle_t handle) = 0;
virtual status_t setMasterMono(bool mono) = 0;
virtual status_t getMasterMono(bool *mono) = 0;
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
old mode 100755
new mode 100644
index d091179..1239fe0
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -124,16 +124,16 @@
case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+ case AUDIO_DEVICE_OUT_USB_HEADSET:
return DEVICE_CATEGORY_HEADSET;
case AUDIO_DEVICE_OUT_LINE:
case AUDIO_DEVICE_OUT_AUX_DIGITAL:
- /*USB? Remote submix?*/
+ case AUDIO_DEVICE_OUT_USB_DEVICE:
return DEVICE_CATEGORY_EXT_MEDIA;
case AUDIO_DEVICE_OUT_SPEAKER:
case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
case AUDIO_DEVICE_OUT_USB_ACCESSORY:
- case AUDIO_DEVICE_OUT_USB_DEVICE:
case AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
default:
return DEVICE_CATEGORY_SPEAKER;
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
old mode 100755
new mode 100644
index 55ee91f..31f0550
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -33,9 +33,9 @@
/**
* A device mask for all audio input devices that are considered "virtual" when evaluating
- * active inputs in getActiveInput()
+ * active inputs in getActiveInputs()
*/
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX)
/**
@@ -47,16 +47,6 @@
#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_BUS)
/**
- * Stub audio output device. Used in policy configuration file on platforms without audio outputs.
- * This alias value to AUDIO_DEVICE_OUT_DEFAULT is only used in the audio policy context.
- */
-#define AUDIO_DEVICE_OUT_STUB AUDIO_DEVICE_OUT_DEFAULT
-/**
- * Stub audio input device. Used in policy configuration file on platforms without audio inputs.
- * This alias value to AUDIO_DEVICE_IN_DEFAULT is only used in the audio policy context.
- */
-#define AUDIO_DEVICE_IN_STUB AUDIO_DEVICE_IN_DEFAULT
-/**
* Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume
* control APIs (e.g setStreamVolumeIndex().
*/
@@ -109,6 +99,44 @@
((device & APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL) != 0));
}
+/**
+ * Returns the priority of a given audio source for capture. The priority is used when more than one
+ * capture session is active on a given input stream to determine which session drives routing and
+ * effect configuration.
+ *
+ * @param[in] inputSource to consider. Valid sources are:
+ * - AUDIO_SOURCE_VOICE_COMMUNICATION
+ * - AUDIO_SOURCE_CAMCORDER
+ * - AUDIO_SOURCE_MIC
+ * - AUDIO_SOURCE_FM_TUNER
+ * - AUDIO_SOURCE_VOICE_RECOGNITION
+ * - AUDIO_SOURCE_HOTWORD
+ *
+ * @return the corresponding input source priority or 0 if priority is irrelevant for this source.
+ * This happens when the specified source cannot share a given input stream (e.g remote submix)
+ * The higher the value, the higher the priority.
+ */
+static inline int32_t source_priority(audio_source_t inputSource)
+{
+ switch (inputSource) {
+ case AUDIO_SOURCE_VOICE_COMMUNICATION:
+ return 6;
+ case AUDIO_SOURCE_CAMCORDER:
+ return 5;
+ case AUDIO_SOURCE_MIC:
+ return 4;
+ case AUDIO_SOURCE_FM_TUNER:
+ return 3;
+ case AUDIO_SOURCE_VOICE_RECOGNITION:
+ return 2;
+ case AUDIO_SOURCE_HOTWORD:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
/* Indicates if audio formats are equivalent when considering a match between
* audio HAL supported formats and client requested formats
*/
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 96591df..e263c0c 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -41,9 +41,7 @@
LOCAL_SRC_FILES += src/Serializer.cpp
-LOCAL_STATIC_LIBRARIES += libxml2
-
-LOCAL_SHARED_LIBRARIES += libicuuc
+LOCAL_SHARED_LIBRARIES += libicuuc libxml2
LOCAL_C_INCLUDES += \
external/libxml2/include \
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index e689320..b169bac 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -57,13 +57,15 @@
void clearPreemptedSessions();
bool isActive() const;
bool isSourceActive(audio_source_t source) const;
- audio_source_t inputSource() const;
+ audio_source_t inputSource(bool activeOnly = false) const;
bool isSoundTrigger() const;
status_t addAudioSession(audio_session_t session,
const sp<AudioSession>& audioSession);
status_t removeAudioSession(audio_session_t session);
sp<AudioSession> getAudioSession(audio_session_t session) const;
- AudioSessionCollection getActiveAudioSessions() const;
+ AudioSessionCollection getAudioSessions(bool activeOnly) const;
+ size_t getAudioSessionCount(bool activeOnly) const;
+ audio_source_t getHighestPrioritySource(bool activeOnly) const;
// implementation of AudioSessionInfoProvider
virtual audio_config_base_t getConfig() const;
@@ -102,7 +104,7 @@
* Only considers inputs from physical devices (e.g. main mic, headset mic) when
* ignoreVirtualInputs is true.
*/
- audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
+ Vector<sp <AudioInputDescriptor> > getActiveInputs(bool ignoreVirtualInputs = true);
audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 35bb021..c09cb5a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -16,6 +16,8 @@
#pragma once
+#include <sys/types.h>
+
#include "AudioPort.h"
#include <RoutingStrategy.h>
#include <utils/Errors.h>
@@ -128,6 +130,7 @@
sp<SwAudioOutputDescriptor> mOutput1; // used by duplicated outputs: first output
sp<SwAudioOutputDescriptor> mOutput2; // used by duplicated outputs: second output
uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
+ audio_session_t mDirectClientSession; // session id of the direct output client
uint32_t mGlobalRefCount; // non-stream-specific ref count
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index c9652de..0bacef7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -19,7 +19,7 @@
#include <utils/RefBase.h>
#include <media/AudioPolicy.h>
#include <utils/KeyedVector.h>
-#include <hardware/audio.h>
+#include <system/audio.h>
#include <utils/String8.h>
namespace android {
@@ -43,6 +43,8 @@
void setMix(AudioMix &mix);
+ status_t dump(int fd, int spaces, int index) const;
+
private:
AudioMix mMix; // Audio policy mix descriptor
sp<SwAudioOutputDescriptor> mOutput; // Corresponding output stream
@@ -77,6 +79,8 @@
AudioMix **policyMix);
status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
+
+ status_t dump(int fd) const;
};
}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index d00d49f..4f79ed2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -71,7 +71,7 @@
virtual void toAudioPort(struct audio_port *port) const;
- virtual void importAudioPort(const sp<AudioPort>& port);
+ virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
void addAudioProfile(const sp<AudioProfile> &profile) { mProfiles.add(profile); }
@@ -118,7 +118,7 @@
audio_format_t targetFormat);
audio_module_handle_t getModuleHandle() const;
- uint32_t getModuleVersion() const;
+ uint32_t getModuleVersionMajor() const;
const char *getModuleName() const;
bool useInputChannelMask() const
@@ -166,6 +166,10 @@
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const = 0;
virtual sp<AudioPort> getAudioPort() const = 0;
+ virtual bool hasSameHwModuleAs(const sp<AudioPortConfig>& other) const {
+ return (other != 0) &&
+ (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
+ }
uint32_t mSamplingRate;
audio_format_t mFormat;
audio_channel_mask_t mChannelMask;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index 388c25d..18fba25 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -91,8 +91,10 @@
uint32_t getOpenCount() const;
AudioSessionCollection getActiveSessions() const;
+ size_t getActiveSessionCount() const;
bool hasActiveSession() const;
bool isSourceActive(audio_source_t source) const;
+ audio_source_t getHighestPrioritySource(bool activeOnly) const;
// implementation of AudioSessionInfoUpdateListener
virtual void onSessionInfoUpdate() const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
index 4ab7cf0..7e1e24d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSourceDescriptor.h
@@ -50,7 +50,7 @@
};
class AudioSourceCollection :
- public DefaultKeyedVector< audio_io_handle_t, sp<AudioSourceDescriptor> >
+ public DefaultKeyedVector< audio_patch_handle_t, sp<AudioSourceDescriptor> >
{
public:
status_t dump(int fd) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 9a52d22..1a644d7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -48,7 +48,7 @@
// AudioPort
virtual void attach(const sp<HwModule>& module);
virtual void toAudioPort(struct audio_port *port) const;
- virtual void importAudioPort(const sp<AudioPort>& port);
+ virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
audio_port_handle_t getId() const;
status_t dump(int fd, int spaces, int index, bool verbose = true) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index ab650c0..9ea0aea 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -17,7 +17,7 @@
#pragma once
#include <RoutingStrategy.h>
-#include <hardware/audio_effect.h>
+#include <system/audio_effect.h>
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
#include <utils/Errors.h>
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 3a31672..29b6b9c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -18,7 +18,6 @@
#include "DeviceDescriptor.h"
#include "AudioRoute.h"
-#include <hardware/audio.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
#include <utils/Errors.h>
@@ -40,7 +39,7 @@
class HwModule : public RefBase
{
public:
- explicit HwModule(const char *name, uint32_t halVersion = AUDIO_DEVICE_API_VERSION_MIN);
+ explicit HwModule(const char *name, uint32_t halVersionMajor = 0, uint32_t halVersionMinor = 0);
~HwModule();
const char *getName() const { return mName.string(); }
@@ -55,8 +54,11 @@
void setProfiles(const IOProfileCollection &profiles);
- void setHalVersion(uint32_t halVersion) { mHalVersion = halVersion; }
- uint32_t getHalVersion() const { return mHalVersion; }
+ void setHalVersion(uint32_t major, uint32_t minor) {
+ mHalVersion = (major << 8) | (minor & 0xff);
+ }
+ uint32_t getHalVersionMajor() const { return mHalVersion >> 8; }
+ uint32_t getHalVersionMinor() const { return mHalVersion & 0xff; }
sp<DeviceDescriptor> getRouteSinkDevice(const sp<AudioRoute> &route) const;
DeviceVector getRouteSourceDevices(const sp<AudioRoute> &route) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
index 424df84..8822927 100644
--- a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
@@ -20,7 +20,7 @@
#include <utils/KeyedVector.h>
#include <utils/StrongPointer.h>
#include <utils/SortedVector.h>
-#include <hardware/audio.h>
+#include <system/audio.h>
namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index 579eb31..fc95eb9 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -16,201 +16,46 @@
#pragma once
+#include <media/TypeConverter.h>
+
#include "policy.h"
#include <Volume.h>
-#include <system/audio.h>
-#include <convert/convert.h>
-#include <utils/Log.h>
-#include <string>
-#include <utils/Vector.h>
-#include <utils/SortedVector.h>
namespace android {
-struct SampleRateTraits
-{
- typedef uint32_t Type;
- typedef SortedVector<Type> Collection;
-};
-struct DeviceTraits
-{
- typedef audio_devices_t Type;
- typedef Vector<Type> Collection;
-};
-struct OutputFlagTraits
-{
- typedef audio_output_flags_t Type;
- typedef Vector<Type> Collection;
-};
-struct InputFlagTraits
-{
- typedef audio_input_flags_t Type;
- typedef Vector<Type> Collection;
-};
-struct FormatTraits
-{
- typedef audio_format_t Type;
- typedef Vector<Type> Collection;
-};
-struct ChannelTraits
-{
- typedef audio_channel_mask_t Type;
- typedef SortedVector<Type> Collection;
-};
-struct OutputChannelTraits : public ChannelTraits {};
-struct InputChannelTraits : public ChannelTraits {};
-struct ChannelIndexTraits : public ChannelTraits {};
-struct GainModeTraits
-{
- typedef audio_gain_mode_t Type;
- typedef Vector<Type> Collection;
-};
-struct StreamTraits
-{
- typedef audio_stream_type_t Type;
- typedef Vector<Type> Collection;
-};
struct DeviceCategoryTraits
{
- typedef device_category Type;
- typedef Vector<Type> Collection;
+ typedef device_category Type;
+ typedef Vector<Type> Collection;
};
-template <typename T>
-struct DefaultTraits
+struct MixTypeTraits
{
- typedef T Type;
- typedef Vector<Type> Collection;
+ typedef int32_t Type;
+ typedef Vector<Type> Collection;
+};
+struct RouteFlagTraits
+{
+ typedef uint32_t Type;
+ typedef Vector<Type> Collection;
+};
+struct RuleTraits
+{
+ typedef uint32_t Type;
+ typedef Vector<Type> Collection;
};
-template <class Traits>
-static void collectionFromString(const std::string &str, typename Traits::Collection &collection,
- const char *del = "|")
-{
- char *literal = strdup(str.c_str());
- for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
- typename Traits::Type value;
- if (utilities::convertTo<std::string, typename Traits::Type >(cstr, value)) {
- collection.add(value);
- }
- }
- free(literal);
-}
-
-template <class Traits>
-class TypeConverter
-{
-public:
- static bool toString(const typename Traits::Type &value, std::string &str);
-
- static bool fromString(const std::string &str, typename Traits::Type &result);
-
- static void collectionFromString(const std::string &str,
- typename Traits::Collection &collection,
- const char *del = "|");
-
- static uint32_t maskFromString(const std::string &str, const char *del = "|");
-
-protected:
- struct Table {
- const char *literal;
- typename Traits::Type value;
- };
-
- static const Table mTable[];
- static const size_t mSize;
-};
-
-typedef TypeConverter<DeviceTraits> DeviceConverter;
-typedef TypeConverter<OutputFlagTraits> OutputFlagConverter;
-typedef TypeConverter<InputFlagTraits> InputFlagConverter;
-typedef TypeConverter<FormatTraits> FormatConverter;
-typedef TypeConverter<OutputChannelTraits> OutputChannelConverter;
-typedef TypeConverter<InputChannelTraits> InputChannelConverter;
-typedef TypeConverter<ChannelIndexTraits> ChannelIndexConverter;
-typedef TypeConverter<GainModeTraits> GainModeConverter;
-typedef TypeConverter<StreamTraits> StreamTypeConverter;
typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
+typedef TypeConverter<MixTypeTraits> MixTypeConverter;
+typedef TypeConverter<RouteFlagTraits> RouteFlagTypeConverter;
+typedef TypeConverter<RuleTraits> RuleTypeConverter;
-template<> const DeviceConverter::Table DeviceConverter::mTable[];
-template<> const OutputFlagConverter::Table OutputFlagConverter::mTable[];
-template<> const InputFlagConverter::Table InputFlagConverter::mTable[];
-template<> const FormatConverter::Table FormatConverter::mTable[];
-template<> const OutputChannelConverter::Table OutputChannelConverter::mTable[];
-template<> const InputChannelConverter::Table InputChannelConverter::mTable[];
-template<> const ChannelIndexConverter::Table ChannelIndexConverter::mTable[];
-template<> const GainModeConverter::Table GainModeConverter::mTable[];
-template<> const DeviceCategoryConverter::Table DeviceCategoryConverter::mTable[];
-template<> const StreamTypeConverter::Table StreamTypeConverter::mTable[];
-
-inline
-static SampleRateTraits::Collection samplingRatesFromString(const std::string &samplingRates,
- const char *del = "|")
-{
- SampleRateTraits::Collection samplingRateCollection;
- collectionFromString<SampleRateTraits>(samplingRates, samplingRateCollection, del);
- return samplingRateCollection;
-}
-
-inline
-static FormatTraits::Collection formatsFromString(const std::string &formats, const char *del = "|")
-{
- FormatTraits::Collection formatCollection;
- FormatConverter::collectionFromString(formats, formatCollection, del);
- return formatCollection;
-}
-
-inline
-static audio_format_t formatFromString(const std::string &literalFormat)
-{
- audio_format_t format;
- if (literalFormat.empty()) {
- return gDynamicFormat;
- }
- FormatConverter::fromString(literalFormat, format);
- return format;
-}
-
-inline
-static audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
-{
- audio_channel_mask_t channels;
- if (!OutputChannelConverter::fromString(literalChannels, channels) ||
- !InputChannelConverter::fromString(literalChannels, channels)) {
- return AUDIO_CHANNEL_INVALID;
- }
- return channels;
-}
-
-inline
-static ChannelTraits::Collection channelMasksFromString(const std::string &channels,
- const char *del = "|")
-{
- ChannelTraits::Collection channelMaskCollection;
- OutputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
- InputChannelConverter::collectionFromString(channels, channelMaskCollection, del);
- ChannelIndexConverter::collectionFromString(channels, channelMaskCollection, del);
- return channelMaskCollection;
-}
-
-inline
-static InputChannelTraits::Collection inputChannelMasksFromString(const std::string &inChannels,
- const char *del = "|")
-{
- InputChannelTraits::Collection inputChannelMaskCollection;
- InputChannelConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
- ChannelIndexConverter::collectionFromString(inChannels, inputChannelMaskCollection, del);
- return inputChannelMaskCollection;
-}
-
-inline
-static OutputChannelTraits::Collection outputChannelMasksFromString(const std::string &outChannels,
- const char *del = "|")
-{
- OutputChannelTraits::Collection outputChannelMaskCollection;
- OutputChannelConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
- ChannelIndexConverter::collectionFromString(outChannels, outputChannelMaskCollection, del);
- return outputChannelMaskCollection;
-}
+template <>
+const DeviceCategoryConverter::Table DeviceCategoryConverter::mTable[];
+template <>
+const MixTypeConverter::Table MixTypeConverter::mTable[];
+template <>
+const RouteFlagTypeConverter::Table RouteFlagTypeConverter::mTable[];
+template <>
+const RuleTypeConverter::Table RuleTypeConverter::mTable[];
}; // namespace android
-
diff --git a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
index 10f0766..e7fcefc 100644
--- a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
+++ b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
@@ -18,7 +18,6 @@
#include "IVolumeCurvesCollection.h"
#include <policy.h>
-#include <hardware/audio.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
#include <utils/SortedVector.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index c7d2ee4..2492ed6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -63,11 +63,9 @@
return mId;
}
-audio_source_t AudioInputDescriptor::inputSource() const
+audio_source_t AudioInputDescriptor::inputSource(bool activeOnly) const
{
- // TODO: return highest priority input source
- return mSessions.size() > 0 ? mSessions.valueAt(0)->inputSource() :
- AUDIO_SOURCE_DEFAULT;
+ return getHighestPrioritySource(activeOnly);
}
void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -132,6 +130,12 @@
return mSessions.isSourceActive(source);
}
+audio_source_t AudioInputDescriptor::getHighestPrioritySource(bool activeOnly) const
+{
+
+ return mSessions.getHighestPrioritySource(activeOnly);
+}
+
bool AudioInputDescriptor::isSoundTrigger() const {
// sound trigger and non sound trigger sessions are not mixed
// on a given input
@@ -143,9 +147,22 @@
return mSessions.valueFor(session);
}
-AudioSessionCollection AudioInputDescriptor::getActiveAudioSessions() const
+AudioSessionCollection AudioInputDescriptor::getAudioSessions(bool activeOnly) const
{
- return mSessions.getActiveSessions();
+ if (activeOnly) {
+ return mSessions.getActiveSessions();
+ } else {
+ return mSessions;
+ }
+}
+
+size_t AudioInputDescriptor::getAudioSessionCount(bool activeOnly) const
+{
+ if (activeOnly) {
+ return mSessions.getActiveSessionCount();
+ } else {
+ return mSessions.size();
+ }
}
status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
@@ -236,17 +253,19 @@
return count;
}
-audio_io_handle_t AudioInputCollection::getActiveInput(bool ignoreVirtualInputs)
+Vector<sp <AudioInputDescriptor> > AudioInputCollection::getActiveInputs(bool ignoreVirtualInputs)
{
+ Vector<sp <AudioInputDescriptor> > activeInputs;
+
for (size_t i = 0; i < size(); i++) {
const sp<AudioInputDescriptor> inputDescriptor = valueAt(i);
if ((inputDescriptor->isActive())
&& (!ignoreVirtualInputs ||
!is_virtual_input_device(inputDescriptor->mDevice))) {
- return keyAt(i);
+ activeInputs.add(inputDescriptor);
}
}
- return 0;
+ return activeInputs;
}
audio_devices_t AudioInputCollection::getSupportedDevices(audio_io_handle_t handle) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 1dbc3d0..8593444 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -81,7 +81,7 @@
return sharesHwModuleWith(outputDesc->subOutput1()) ||
sharesHwModuleWith(outputDesc->subOutput2());
} else {
- return (getModuleHandle() == outputDesc->getModuleHandle());
+ return hasSameHwModuleAs(outputDesc);
}
}
@@ -223,7 +223,8 @@
: AudioOutputDescriptor(profile, clientInterface),
mProfile(profile), mIoHandle(0), mLatency(0),
mFlags((audio_output_flags_t)0), mPolicyMix(NULL),
- mOutput1(0), mOutput2(0), mDirectOpenCount(0), mGlobalRefCount(0)
+ mOutput1(0), mOutput2(0), mDirectOpenCount(0),
+ mDirectClientSession(AUDIO_SESSION_NONE), mGlobalRefCount(0)
{
if (profile != NULL) {
mFlags = (audio_output_flags_t)profile->getFlags();
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 6059009..32606ea 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -22,6 +22,7 @@
#include "TypeConverter.h"
#include <log/log.h>
+#include <cutils/atomic.h>
#include <utils/String8.h>
namespace android {
@@ -55,7 +56,7 @@
for (size_t i = 0; i < mPatch.num_sources; i++) {
if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
std::string device;
- DeviceConverter::toString(mPatch.sources[i].ext.device.type, device);
+ deviceToString(mPatch.sources[i].ext.device.type, device);
snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
mPatch.sources[i].id,
device.c_str());
@@ -70,7 +71,7 @@
for (size_t i = 0; i < mPatch.num_sinks; i++) {
if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
std::string device;
- DeviceConverter::toString(mPatch.sinks[i].ext.device.type, device);
+ deviceToString(mPatch.sinks[i].ext.device.type, device);
snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
mPatch.sinks[i].id,
device.c_str());
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 02833a9..08930f1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include "AudioPolicyMix.h"
+#include "TypeConverter.h"
#include "HwModule.h"
#include "AudioPort.h"
#include "IOProfile.h"
@@ -51,6 +52,66 @@
return &mMix;
}
+status_t AudioPolicyMix::dump(int fd, int spaces, int index) const
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ snprintf(buffer, SIZE, "%*sAudio Policy Mix %d:\n", spaces, "", index+1);
+ result.append(buffer);
+ std::string mixTypeLiteral;
+ if (!MixTypeConverter::toString(mMix.mMixType, mixTypeLiteral)) {
+ ALOGE("%s: failed to convert mix type %d", __FUNCTION__, mMix.mMixType);
+ return BAD_VALUE;
+ }
+ snprintf(buffer, SIZE, "%*s- mix type: %s\n", spaces, "", mixTypeLiteral.c_str());
+ result.append(buffer);
+ std::string routeFlagLiteral;
+ RouteFlagTypeConverter::maskToString(mMix.mRouteFlags, routeFlagLiteral);
+ snprintf(buffer, SIZE, "%*s- Route Flags: %s\n", spaces, "", routeFlagLiteral.c_str());
+ result.append(buffer);
+ std::string deviceLiteral;
+ deviceToString(mMix.mDeviceType, deviceLiteral);
+ snprintf(buffer, SIZE, "%*s- device type: %s\n", spaces, "", deviceLiteral.c_str());
+ result.append(buffer);
+ snprintf(buffer, SIZE, "%*s- device address: %s\n", spaces, "", mMix.mDeviceAddress.string());
+ result.append(buffer);
+
+ int indexCriterion = 0;
+ for (const auto &criterion : mMix.mCriteria) {
+ snprintf(buffer, SIZE, "%*s- Criterion %d:\n", spaces + 2, "", indexCriterion++);
+ result.append(buffer);
+ std::string usageLiteral;
+ if (!UsageTypeConverter::toString(criterion.mValue.mUsage, usageLiteral)) {
+ ALOGE("%s: failed to convert usage %d", __FUNCTION__, criterion.mValue.mUsage);
+ return BAD_VALUE;
+ }
+ snprintf(buffer, SIZE, "%*s- Usage:%s\n", spaces + 4, "", usageLiteral.c_str());
+ result.append(buffer);
+ if (mMix.mMixType == MIX_TYPE_RECORDERS) {
+ std::string sourceLiteral;
+ if (!SourceTypeConverter::toString(criterion.mValue.mSource, sourceLiteral)) {
+ ALOGE("%s: failed to convert source %d", __FUNCTION__, criterion.mValue.mSource);
+ return BAD_VALUE;
+ }
+ snprintf(buffer, SIZE, "%*s- Source:%s\n", spaces + 4, "", sourceLiteral.c_str());
+ result.append(buffer);
+ }
+ snprintf(buffer, SIZE, "%*s- Uid:%d\n", spaces + 4, "", criterion.mValue.mUid);
+ result.append(buffer);
+ std::string ruleLiteral;
+ if (!RuleTypeConverter::toString(criterion.mRule, ruleLiteral)) {
+ ALOGE("%s: failed to convert source %d", __FUNCTION__,criterion.mRule);
+ return BAD_VALUE;
+ }
+ snprintf(buffer, SIZE, "%*s- Rule:%s\n", spaces + 4, "", ruleLiteral.c_str());
+ result.append(buffer);
+ }
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
+
status_t AudioPolicyMixCollection::registerMix(const String8& address, AudioMix mix,
sp<SwAudioOutputDescriptor> desc)
{
@@ -288,4 +349,14 @@
return NO_ERROR;
}
+status_t AudioPolicyMixCollection::dump(int fd) const
+{
+ std::string log("\nAudio Policy Mix:\n");
+ write(fd, log.c_str(), log.size());
+ for (size_t i = 0; i < size(); i++) {
+ valueAt(i)->dump(fd, 2, i);
+ }
+ return NO_ERROR;
+}
+
}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 31bf95c..fcf9070 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -21,6 +21,7 @@
#include "HwModule.h"
#include "AudioGain.h"
#include <policy.h>
+#include <cutils/atomic.h>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
@@ -50,12 +51,12 @@
return mModule->mHandle;
}
-uint32_t AudioPort::getModuleVersion() const
+uint32_t AudioPort::getModuleVersionMajor() const
{
if (mModule == 0) {
return 0;
}
- return mModule->getHalVersion();
+ return mModule->getHalVersionMajor();
}
const char *AudioPort::getModuleName() const
@@ -127,7 +128,7 @@
port->num_gains = i;
}
-void AudioPort::importAudioPort(const sp<AudioPort>& port)
+void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
{
size_t indexToImport;
for (indexToImport = 0; indexToImport < port->mProfiles.size(); indexToImport++) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index a246c3d..bea9f4f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include <AudioPolicyInterface.h>
+#include "policy.h"
#include "AudioSession.h"
#include "AudioGain.h"
#include "TypeConverter.h"
@@ -90,8 +91,10 @@
AUDIO_CONFIG_BASE_INITIALIZER;
const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
AUDIO_PATCH_HANDLE_NONE;
- mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
- &mConfig, &deviceConfig, patchHandle);
+ if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+ mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
+ &mConfig, &deviceConfig, patchHandle);
+ }
}
return mActiveCount;
@@ -125,9 +128,11 @@
AUDIO_CONFIG_BASE_INITIALIZER;
const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
AUDIO_PATCH_HANDLE_NONE;
- mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
- mSession, mInputSource,
- &mConfig, &deviceConfig, patchHandle);
+ if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+ mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
+ mSession, mInputSource,
+ &mConfig, &deviceConfig, patchHandle);
+ }
}
}
@@ -215,9 +220,20 @@
return activeSessions;
}
+size_t AudioSessionCollection::getActiveSessionCount() const
+{
+ size_t activeCount = 0;
+ for (size_t i = 0; i < size(); i++) {
+ if (valueAt(i)->activeCount() != 0) {
+ activeCount++;
+ }
+ }
+ return activeCount;
+}
+
bool AudioSessionCollection::hasActiveSession() const
{
- return getActiveSessions().size() != 0;
+ return getActiveSessionCount() != 0;
}
bool AudioSessionCollection::isSourceActive(audio_source_t source) const
@@ -237,6 +253,25 @@
return false;
}
+audio_source_t AudioSessionCollection::getHighestPrioritySource(bool activeOnly) const
+{
+ audio_source_t source = AUDIO_SOURCE_DEFAULT;
+ int32_t priority = -1;
+
+ for (size_t i = 0; i < size(); i++) {
+ const sp<AudioSession> audioSession = valueAt(i);
+ if (activeOnly && audioSession->activeCount() == 0) {
+ continue;
+ }
+ int32_t curPriority = source_priority(audioSession->inputSource());
+ if (curPriority > priority) {
+ priority = curPriority;
+ source = audioSession->inputSource();
+ }
+ }
+ return source;
+}
+
void AudioSessionCollection::onSessionInfoUpdate() const
{
for (size_t i = 0; i < size(); i++) {
@@ -244,7 +279,6 @@
}
}
-
status_t AudioSessionCollection::dump(int fd, int spaces) const
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
index a3536e5..e5888e2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -18,11 +18,11 @@
//#define LOG_NDEBUG 0
#include "ConfigParsingUtils.h"
-#include <convert/convert.h>
#include "AudioGain.h"
#include "IOProfile.h"
-#include "TypeConverter.h"
-#include <hardware/audio.h>
+#include <system/audio.h>
+#include <media/AudioParameter.h>
+#include <media/TypeConverter.h>
#include <utils/Log.h>
#include <cutils/misc.h>
@@ -105,7 +105,7 @@
audio_devices_t type = AUDIO_DEVICE_NONE;
while (node) {
if (strcmp(node->name, APM_DEVICE_TYPE) == 0) {
- DeviceConverter::fromString(node->value, type);
+ deviceFromString(node->value, type);
break;
}
node = node->next;
@@ -289,11 +289,11 @@
const DeviceVector &declaredDevices)
{
char *tagLiteral = strndup(tag, strlen(tag));
- char *devTag = strtok(tagLiteral, "|");
+ char *devTag = strtok(tagLiteral, AudioParameter::valueListSeparator);
while (devTag != NULL) {
if (strlen(devTag) != 0) {
audio_devices_t type;
- if (DeviceConverter::fromString(devTag, type)) {
+ if (deviceFromString(devTag, type)) {
uint32_t inBit = type & AUDIO_DEVICE_BIT_IN;
type &= ~AUDIO_DEVICE_BIT_IN;
while (type) {
@@ -311,7 +311,7 @@
}
}
}
- devTag = strtok(NULL, "|");
+ devTag = strtok(NULL, AudioParameter::valueListSeparator);
}
free(tagLiteral);
}
@@ -340,7 +340,7 @@
config.addAvailableOutputDevices(availableOutputDevices);
} else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
audio_devices_t device = AUDIO_DEVICE_NONE;
- DeviceConverter::fromString(node->value, device);
+ deviceFromString(node->value, device);
if (device != AUDIO_DEVICE_NONE) {
sp<DeviceDescriptor> defaultOutputDevice = new DeviceDescriptor(device);
config.setDefaultOutputDevice(defaultOutputDevice);
@@ -356,9 +356,8 @@
} else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) {
uint32_t major, minor;
sscanf((char *)node->value, "%u.%u", &major, &minor);
- module->setHalVersion(HARDWARE_DEVICE_API_VERSION(major, minor));
- ALOGV("loadGlobalConfig() mHalVersion = %04x major %u minor %u",
- module->getHalVersion(), major, minor);
+ module->setHalVersion(major, minor);
+ ALOGV("loadGlobalConfig() mHalVersion = major %u minor %u", major, minor);
}
node = node->next;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index ba2b9e3..a2c1165 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -263,7 +263,10 @@
strncpy(port->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
}
-void DeviceDescriptor::importAudioPort(const sp<AudioPort>& port) {
+void DeviceDescriptor::importAudioPort(const sp<AudioPort>& port, bool force) {
+ if (!force && !port->hasDynamicAudioProfile()) {
+ return;
+ }
AudioPort::importAudioPort(port);
port->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
}
@@ -285,7 +288,7 @@
result.append(buffer);
}
std::string deviceLiteral;
- if (DeviceConverter::toString(mDeviceType, deviceLiteral)) {
+ if (deviceToString(mDeviceType, deviceLiteral)) {
snprintf(buffer, SIZE, "%*s- type: %-48s\n", spaces, "", deviceLiteral.c_str());
result.append(buffer);
}
@@ -302,7 +305,7 @@
void DeviceDescriptor::log() const
{
std::string device;
- DeviceConverter::toString(mDeviceType, device);
+ deviceToString(mDeviceType, device);
ALOGI("Device id:%d type:0x%X:%s, addr:%s", mId, mDeviceType, device.c_str(),
mAddress.string());
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 7a942cd..cc56fb8 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -20,16 +20,16 @@
#include "HwModule.h"
#include "IOProfile.h"
#include "AudioGain.h"
-#include <hardware/audio.h>
#include <policy.h>
+#include <system/audio.h>
namespace android {
-HwModule::HwModule(const char *name, uint32_t halVersion)
+HwModule::HwModule(const char *name, uint32_t halVersionMajor, uint32_t halVersionMinor)
: mName(String8(name)),
- mHandle(AUDIO_MODULE_HANDLE_NONE),
- mHalVersion(halVersion)
+ mHandle(AUDIO_MODULE_HANDLE_NONE)
{
+ setHalVersion(halVersionMajor, halVersionMinor);
}
HwModule::~HwModule()
@@ -42,8 +42,8 @@
}
}
-status_t HwModule::addOutputProfile(const String8 &name, const audio_config_t *config,
- audio_devices_t device, const String8 &address)
+status_t HwModule::addOutputProfile(const String8& name, const audio_config_t *config,
+ audio_devices_t device, const String8& address)
{
sp<IOProfile> profile = new OutputProfile(name);
@@ -227,7 +227,7 @@
result.append(buffer);
snprintf(buffer, SIZE, " - handle: %d\n", mHandle);
result.append(buffer);
- snprintf(buffer, SIZE, " - version: %u.%u\n", mHalVersion >> 8, mHalVersion & 0xFF);
+ snprintf(buffer, SIZE, " - version: %u.%u\n", getHalVersionMajor(), getHalVersionMinor());
result.append(buffer);
write(fd, result.string(), result.size());
if (mOutputProfiles.size()) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 57f2534..74ef4ec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -108,8 +108,18 @@
AudioPort::dump(fd, 4);
- snprintf(buffer, SIZE, " - flags: 0x%04x\n", getFlags());
+ snprintf(buffer, SIZE, " - flags: 0x%04x", getFlags());
result.append(buffer);
+ std::string flagsLiteral;
+ if (getRole() == AUDIO_PORT_ROLE_SINK) {
+ InputFlagConverter::maskToString(getFlags(), flagsLiteral);
+ } else if (getRole() == AUDIO_PORT_ROLE_SOURCE) {
+ OutputFlagConverter::maskToString(getFlags(), flagsLiteral);
+ }
+ if (!flagsLiteral.empty()) {
+ result.appendFormat(" (%s)", flagsLiteral.c_str());
+ }
+ result.append("\n");
write(fd, result.string(), result.size());
mSupportedDevices.dump(fd, String8("Supported"), 4, false);
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 2ecd6b1..a224004 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -18,7 +18,7 @@
//#define LOG_NDEBUG 0
#include "Serializer.h"
-#include <convert/convert.h>
+#include <media/convert.h>
#include "TypeConverter.h"
#include <libxml/parser.h>
#include <libxml/xinclude.h>
@@ -199,7 +199,8 @@
string format = getXmlAttribute(root, Attributes::format);
string channels = getXmlAttribute(root, Attributes::channelMasks);
- profile = new Element(formatFromString(format), channelMasksFromString(channels, ","),
+ profile = new Element(formatFromString(format, gDynamicFormat),
+ channelMasksFromString(channels, ","),
samplingRatesFromString(samplingRates, ","));
profile->setDynamicFormat(profile->getFormat() == gDynamicFormat);
@@ -300,7 +301,7 @@
AUDIO_PORT_ROLE_SOURCE : AUDIO_PORT_ROLE_SINK;
audio_devices_t type = AUDIO_DEVICE_NONE;
- if (!DeviceConverter::fromString(typeName, type) ||
+ if (!deviceFromString(typeName, type) ||
(!audio_is_input_device(type) && portRole == AUDIO_PORT_ROLE_SOURCE) ||
(!audio_is_output_devices(type) && portRole == AUDIO_PORT_ROLE_SINK)) {
ALOGW("%s: bad type %08x", __FUNCTION__, type);
@@ -419,19 +420,17 @@
ALOGE("%s: No %s found", __FUNCTION__, Attributes::name);
return BAD_VALUE;
}
- uint32_t version = AUDIO_DEVICE_API_VERSION_MIN;
+ uint32_t versionMajor = 0, versionMinor = 0;
string versionLiteral = getXmlAttribute(root, Attributes::version);
if (!versionLiteral.empty()) {
- uint32_t major, minor;
- sscanf(versionLiteral.c_str(), "%u.%u", &major, &minor);
- version = HARDWARE_DEVICE_API_VERSION(major, minor);
- ALOGV("%s: mHalVersion = %04x major %u minor %u", __FUNCTION__,
- version, major, minor);
+ sscanf(versionLiteral.c_str(), "%u.%u", &versionMajor, &versionMinor);
+ ALOGV("%s: mHalVersion = major %u minor %u", __FUNCTION__,
+ versionMajor, versionMajor);
}
ALOGV("%s: %s %s=%s", __FUNCTION__, tag, Attributes::name, name.c_str());
- module = new Element(name.c_str(), version);
+ module = new Element(name.c_str(), versionMajor, versionMinor);
// Deserialize childrens: Audio Mix Port, Audio Device Ports (Source/Sink), Audio Routes
MixPortTraits::Collection mixPorts;
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 48bfd79..0362037 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -14,290 +14,55 @@
* limitations under the License.
*/
+#include <media/AudioPolicy.h>
+
#include "TypeConverter.h"
namespace android {
#define MAKE_STRING_FROM_ENUM(string) { #string, string }
-
-template <>
-const DeviceConverter::Table DeviceConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_LINE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_SPDIF),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_FM),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_STUB),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LINE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_SPDIF),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
-};
-
-template<>
-const size_t DeviceConverter::mSize = sizeof(DeviceConverter::mTable) /
- sizeof(DeviceConverter::mTable[0]);
-
-
-template <>
-const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_FAST),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_TTS),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
- MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
-};
-template<>
-const size_t OutputFlagConverter::mSize = sizeof(OutputFlagConverter::mTable) /
- sizeof(OutputFlagConverter::mTable[0]);
-
-
-template <>
-const InputFlagConverter::Table InputFlagConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_FAST),
- MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
- MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
- MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
-};
-template<>
-const size_t InputFlagConverter::mSize = sizeof(InputFlagConverter::mTable) /
- sizeof(InputFlagConverter::mTable[0]);
-
-
-template <>
-const FormatConverter::Table FormatConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_16_BIT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_BIT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_32_BIT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_FLOAT),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MP3),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_MAIN),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LC),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SSR),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LTP),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V1),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ERLC),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LD),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_HE_V2),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_ELD),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_VORBIS),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V1),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_HE_AAC_V2),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_OPUS),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AC3),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_E_AC3),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DTS_HD),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
- MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
-};
-template<>
-const size_t FormatConverter::mSize = sizeof(FormatConverter::mTable) /
- sizeof(FormatConverter::mTable[0]);
-
-
-template <>
-const OutputChannelConverter::Table OutputChannelConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_MONO),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_STEREO),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
-};
-template<>
-const size_t OutputChannelConverter::mSize = sizeof(OutputChannelConverter::mTable) /
- sizeof(OutputChannelConverter::mTable[0]);
-
-
-template <>
-const InputChannelConverter::Table InputChannelConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_MONO),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
- MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
-};
-template<>
-const size_t InputChannelConverter::mSize = sizeof(InputChannelConverter::mTable) /
- sizeof(InputChannelConverter::mTable[0]);
-
-template <>
-const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
- {"AUDIO_CHANNEL_INDEX_MASK_1", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_1)},
- {"AUDIO_CHANNEL_INDEX_MASK_2", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_2)},
- {"AUDIO_CHANNEL_INDEX_MASK_3", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_3)},
- {"AUDIO_CHANNEL_INDEX_MASK_4", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_4)},
- {"AUDIO_CHANNEL_INDEX_MASK_5", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_5)},
- {"AUDIO_CHANNEL_INDEX_MASK_6", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_6)},
- {"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
- {"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
-};
-template<>
-const size_t ChannelIndexConverter::mSize = sizeof(ChannelIndexConverter::mTable) /
- sizeof(ChannelIndexConverter::mTable[0]);
-
-
-template <>
-const GainModeConverter::Table GainModeConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_JOINT),
- MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_CHANNELS),
- MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
-};
-
-template<>
-const size_t GainModeConverter::mSize = sizeof(GainModeConverter::mTable) /
- sizeof(GainModeConverter::mTable[0]);
+#define TERMINATOR { .literal = nullptr }
template <>
const DeviceCategoryConverter::Table DeviceCategoryConverter::mTable[] = {
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_HEADSET),
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_SPEAKER),
MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EARPIECE),
- MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA)
+ MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA),
+ TERMINATOR
};
-template<>
-const size_t DeviceCategoryConverter::mSize = sizeof(DeviceCategoryConverter::mTable) /
- sizeof(DeviceCategoryConverter::mTable[0]);
-
template <>
-const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_VOICE_CALL),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_SYSTEM),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_RING),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_MUSIC),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ALARM),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_NOTIFICATION),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_BLUETOOTH_SCO ),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ENFORCED_AUDIBLE),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_DTMF),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_TTS),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_ACCESSIBILITY),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_REROUTING),
- MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
+const MixTypeConverter::Table MixTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(MIX_TYPE_INVALID),
+ MAKE_STRING_FROM_ENUM(MIX_TYPE_PLAYERS),
+ MAKE_STRING_FROM_ENUM(MIX_TYPE_RECORDERS),
+ TERMINATOR
};
-template<>
-const size_t StreamTypeConverter::mSize = sizeof(StreamTypeConverter::mTable) /
- sizeof(StreamTypeConverter::mTable[0]);
+template <>
+const RouteFlagTypeConverter::Table RouteFlagTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_RENDER),
+ MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_LOOP_BACK),
+ MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_ALL),
+ TERMINATOR
+};
-template <class Traits>
-bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
-{
- for (size_t i = 0; i < mSize; i++) {
- if (mTable[i].value == value) {
- str = mTable[i].literal;
- return true;
- }
- }
- return false;
-}
+template <>
+const RuleTypeConverter::Table RuleTypeConverter::mTable[] = {
+ MAKE_STRING_FROM_ENUM(RULE_EXCLUSION_MASK),
+ MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_USAGE),
+ MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET),
+ MAKE_STRING_FROM_ENUM(RULE_MATCH_UID),
+ MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_ATTRIBUTE_USAGE),
+ MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET),
+ MAKE_STRING_FROM_ENUM(RULE_EXCLUDE_UID),
+ TERMINATOR
+};
-template <class Traits>
-bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
-{
- for (size_t i = 0; i < mSize; i++) {
- if (strcmp(mTable[i].literal, str.c_str()) == 0) {
- ALOGV("stringToEnum() found %s", mTable[i].literal);
- result = mTable[i].value;
- return true;
- }
- }
- return false;
-}
-
-template <class Traits>
-void TypeConverter<Traits>::collectionFromString(const std::string &str,
- typename Traits::Collection &collection,
- const char *del)
-{
- char *literal = strdup(str.c_str());
-
- for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
- typename Traits::Type value;
- if (fromString(cstr, value)) {
- collection.add(value);
- }
- }
- free(literal);
-}
-
-template <class Traits>
-uint32_t TypeConverter<Traits>::maskFromString(const std::string &str, const char *del)
-{
- char *literal = strdup(str.c_str());
- uint32_t value = 0;
- for (const char *cstr = strtok(literal, del); cstr != NULL; cstr = strtok(NULL, del)) {
- typename Traits::Type type;
- if (fromString(cstr, type)) {
- value |= static_cast<uint32_t>(type);
- }
- }
- free(literal);
- return value;
-}
-
-template class TypeConverter<DeviceTraits>;
-template class TypeConverter<OutputFlagTraits>;
-template class TypeConverter<InputFlagTraits>;
-template class TypeConverter<FormatTraits>;
-template class TypeConverter<OutputChannelTraits>;
-template class TypeConverter<InputChannelTraits>;
-template class TypeConverter<ChannelIndexTraits>;
-template class TypeConverter<GainModeTraits>;
-template class TypeConverter<StreamTraits>;
template class TypeConverter<DeviceCategoryTraits>;
+template class TypeConverter<MixTypeTraits>;
+template class TypeConverter<RouteFlagTraits>;
+template class TypeConverter<RuleTraits>;
}; // namespace android
-
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic.xml b/services/audiopolicy/config/audio_policy_configuration_generic.xml
new file mode 100644
index 0000000..58768c3
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_generic.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2017 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+ <!-- Global configuration Decalaration -->
+ <globalConfiguration speaker_drc_enabled="false"/>
+
+ <modules>
+ <!-- Primary Audio HAL -->
+ <xi:include href="primary_audio_policy_configuration.xml"/>
+
+ <!-- Remote Submix Audio HAL -->
+ <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+ </modules>
+ <!-- End of Modules section -->
+
+ <!-- Volume section -->
+
+ <xi:include href="audio_policy_volumes.xml"/>
+ <xi:include href="default_volume_tables.xml"/>
+
+ <!-- End of Volume section -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/primary_audio_policy_configuration.xml b/services/audiopolicy/config/primary_audio_policy_configuration.xml
new file mode 100644
index 0000000..bf508ac
--- /dev/null
+++ b/services/audiopolicy/config/primary_audio_policy_configuration.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Default Primary Audio HAL Module Audio Policy Configuration include flie -->
+<module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000, 16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
+ </devicePort>
+
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker"
+ sources="primary output"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
old mode 100755
new mode 100644
index 1924d1d..c2105e9
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -39,18 +39,20 @@
LOCAL_MODULE := libaudiopolicyengineconfigurable
LOCAL_MODULE_TAGS := optional
+
LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
libaudiopolicypfwwrapper \
- libaudiopolicycomponents \
- libxml2
+ libaudiopolicycomponents
LOCAL_SHARED_LIBRARIES := \
liblog \
libcutils \
libutils \
+ liblog \
libaudioutils \
- libparameter
+ libparameter \
+ libmedia_helper \
+ libxml2
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h b/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/include/EngineDefinition.h b/services/audiopolicy/engineconfigurable/include/EngineDefinition.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in b/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
index aa2af0f..b43f83b 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
@@ -3062,7 +3062,7 @@
<CompoundRule Type="All">
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
- <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+ <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
</CompoundRule>
</Configuration>
@@ -3070,7 +3070,7 @@
<CompoundRule Type="All">
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
- <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+ <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
</CompoundRule>
</Configuration>
@@ -3078,7 +3078,7 @@
<CompoundRule Type="All">
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
- <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+ <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
</CompoundRule>
</Configuration>
@@ -6472,7 +6472,7 @@
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
- <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+ <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
</CompoundRule>
</CompoundRule>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
@@ -8416,6 +8416,7 @@
<ConfigurableElement Path="/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy"/>
<ConfigurableElement Path="/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy"/>
<ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy"/>
+ <ConfigurableElement Path="/Policy/policy/usages/assistant/applicable_strategy/strategy"/>
</ConfigurableElements>
<Settings>
<Configuration Name="Calibration">
@@ -8461,6 +8462,9 @@
<ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy">
<EnumParameter Name="strategy">media</EnumParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/usages/assistant/applicable_strategy/strategy">
+ <EnumParameter Name="strategy">media</EnumParameter>
+ </ConfigurableElement>
</Configuration>
</Settings>
</ConfigurableDomain>
@@ -8738,6 +8742,7 @@
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/loopback"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ip"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/stub"/>
</ConfigurableElements>
<Settings>
<Configuration Name="Calibration">
@@ -9428,6 +9433,9 @@
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus">
<BitParameter Name="bus">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/stub">
+ <BitParameter Name="stub">0</BitParameter>
+ </ConfigurableElement>
</Configuration>
</Settings>
</ConfigurableDomain>
@@ -9758,7 +9766,7 @@
</Configuration>
</Settings>
</ConfigurableDomain>
- <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndHotword" SequenceAware="false">
+ <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndUnprocessedAndHotword" SequenceAware="false">
<Configurations>
<Configuration Name="ScoHeadset">
<CompoundRule Type="All">
@@ -9790,6 +9798,10 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset"/>
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device"/>
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic"/>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset"/>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset"/>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device"/>
@@ -9809,6 +9821,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">0</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">1</BitParameter>
</ConfigurableElement>
@@ -9835,6 +9859,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">1</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">0</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">0</BitParameter>
</ConfigurableElement>
@@ -9861,6 +9897,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">1</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">0</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">0</BitParameter>
</ConfigurableElement>
@@ -9887,6 +9935,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">1</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">1</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">0</BitParameter>
</ConfigurableElement>
@@ -9913,6 +9973,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">0</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">0</BitParameter>
</ConfigurableElement>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
index ecd56b0..eb11980 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
@@ -375,7 +375,7 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
AvailableOutputDevices Excludes UsbAccessory
- ForceUseForCommunication Is ForceSpeaker
+ ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes UsbDevice
component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
index b30aa4c..cee7cd1 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
@@ -78,7 +78,7 @@
#
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- ForceUseForMedia Is ForceNoBtA2dp
+ ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dp
component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
@@ -105,7 +105,7 @@
#
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- ForceUseForMedia Is ForceNoBtA2dp
+ ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpHeadphones
component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
@@ -132,7 +132,7 @@
#
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- ForceUseForMedia Is ForceNoBtA2dp
+ ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpSpeaker
component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
index 3f5da13..b3115e7 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
@@ -16,6 +16,7 @@
/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy = media
/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy = media
/Policy/policy/usages/game/applicable_strategy/strategy = media
+ /Policy/policy/usages/assistant/applicable_strategy/strategy = media
domain: AssistanceAccessibility
conf: Sonification
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicyClass.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicyClass.xml
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
old mode 100755
new mode 100644
index 71b2b62..ad9c356
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
@@ -68,7 +68,7 @@
<!--#################### USAGE BEGIN ####################-->
- <ComponentType Name="Usages" Description="associated to audio_stream_type_t definition,
+ <ComponentType Name="Usages" Description="associated to audio_usage_t definition,
identifier mapping must match the value of the enum">
<Component Name="unknown" Type="Usage" Mapping="Amend1:Unknown,Identifier:0"/>
<Component Name="media" Type="Usage" Mapping="Amend1:Media,Identifier:1"/>
@@ -97,6 +97,7 @@
<Component Name="game" Type="Usage" Mapping="Amend1:BluetoothSco,Identifier:14"/>
<Component Name="virtual_source" Type="Usage"
Mapping="Amend1:VirtualSource,Identifier:15"/>
+ <Component Name="assistant" Type="Usage" Mapping="Amend1:Assistant,Identifier:16"/>
</ComponentType>
<!--#################### USAGE END ####################-->
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/policy_criteria.txt b/services/audiopolicy/engineconfigurable/parameter-framework/examples/policy_criteria.txt
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicyMappingKeys.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicyMappingKeys.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Collection.h b/services/audiopolicy/engineconfigurable/src/Collection.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Element.h b/services/audiopolicy/engineconfigurable/src/Element.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp b/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.cpp b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.h b/services/audiopolicy/engineconfigurable/src/InputSource.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.cpp b/services/audiopolicy/engineconfigurable/src/Strategy.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.h b/services/audiopolicy/engineconfigurable/src/Strategy.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.cpp b/services/audiopolicy/engineconfigurable/src/Stream.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.h b/services/audiopolicy/engineconfigurable/src/Stream.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.cpp b/services/audiopolicy/engineconfigurable/src/Usage.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.h b/services/audiopolicy/engineconfigurable/src/Usage.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index 9788e0e..36e0f42 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -14,11 +14,9 @@
LOCAL_SRC_FILES:= ParameterManagerWrapper.cpp
-LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
-
LOCAL_SHARED_LIBRARIES := \
libparameter \
+ libmedia_helper
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
old mode 100755
new mode 100644
index fe15d86..9b0442e
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -22,7 +22,7 @@
#include <ParameterMgrPlatformConnector.h>
#include <SelectionCriterionTypeInterface.h>
#include <SelectionCriterionInterface.h>
-#include <convert.h>
+#include <media/convert.h>
#include <algorithm>
#include <cutils/config_utils.h>
#include <cutils/misc.h>
diff --git a/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h b/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf b/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
old mode 100755
new mode 100644
index 6a5655d..cbbe306
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -35,13 +35,12 @@
LOCAL_MODULE_TAGS := optional
LOCAL_STATIC_LIBRARIES := \
- libmedia_helper \
libaudiopolicycomponents \
- libxml2
LOCAL_SHARED_LIBRARIES += \
liblog \
libcutils \
libutils \
+ libmedia_helper
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h b/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
old mode 100755
new mode 100644
index 44683df..096ffd1
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -202,6 +202,7 @@
case AUDIO_USAGE_MEDIA:
case AUDIO_USAGE_GAME:
+ case AUDIO_USAGE_ASSISTANT:
case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
return STRATEGY_MEDIA;
@@ -320,8 +321,7 @@
if (((availableInputDevices.types() &
AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
(((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
- (primaryOutput->getAudioPort()->getModuleVersion() <
- AUDIO_DEVICE_API_VERSION_3_0))) {
+ (primaryOutput->getAudioPort()->getModuleVersionMajor() < 3))) {
availableOutputDevicesType = availPrimaryOutputDevices;
}
}
@@ -356,6 +356,8 @@
if (device) break;
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
if (device) break;
+ device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
+ if (device) break;
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
if (device) break;
if (!isInCall()) {
@@ -509,6 +511,9 @@
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
}
if (device2 == AUDIO_DEVICE_NONE) {
+ device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
+ }
+ if (device2 == AUDIO_DEVICE_NONE) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
}
if (device2 == AUDIO_DEVICE_NONE) {
@@ -591,6 +596,8 @@
device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+ } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
+ device = AUDIO_DEVICE_IN_USB_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
device = AUDIO_DEVICE_IN_USB_DEVICE;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
@@ -621,6 +628,8 @@
default: // FORCE_NONE
if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+ } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
+ device = AUDIO_DEVICE_IN_USB_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
device = AUDIO_DEVICE_IN_USB_DEVICE;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
@@ -646,6 +655,8 @@
device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+ } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
+ device = AUDIO_DEVICE_IN_USB_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
device = AUDIO_DEVICE_IN_USB_DEVICE;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/enginedefault/src/EngineInstance.cpp b/services/audiopolicy/enginedefault/src/EngineInstance.cpp
old mode 100755
new mode 100644
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 8cb5e0f..d9859ea 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -24,20 +24,22 @@
#define ALOGVV(a...) do { } while(0)
#endif
-#define AUDIO_POLICY_XML_CONFIG_FILE "/system/etc/audio_policy_configuration.xml"
+#define AUDIO_POLICY_XML_CONFIG_FILE_PATH_MAX_LENGTH 128
+#define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
#include <inttypes.h>
#include <math.h>
#include <AudioPolicyManagerInterface.h>
#include <AudioPolicyEngineInstance.h>
+#include <cutils/atomic.h>
#include <cutils/properties.h>
#include <utils/Log.h>
-#include <hardware/audio.h>
-#include <hardware/audio_effect.h>
#include <media/AudioParameter.h>
#include <media/AudioPolicyHelper.h>
#include <soundtrigger/SoundTrigger.h>
+#include <system/audio.h>
+#include <audio_policy_conf.h>
#include "AudioPolicyManager.h"
#ifndef USE_XML_AUDIO_POLICY_CONF
#include <ConfigParsingUtils.h>
@@ -70,7 +72,7 @@
{
AudioParameter param(device_address);
const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
- AUDIO_PARAMETER_DEVICE_CONNECT : AUDIO_PARAMETER_DEVICE_DISCONNECT);
+ AudioParameter::keyStreamConnect : AudioParameter::keyStreamDisconnect);
param.addInt(key, device);
mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
}
@@ -492,15 +494,17 @@
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch and logic here must be
// symmetric to the one in startInput()
- audio_io_handle_t activeInput = mInputs.getActiveInput();
- if (activeInput != 0) {
- sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
- if (activeDesc->getModuleHandle() == txSourceDeviceDesc->getModuleHandle()) {
- //FIXME: consider all active sessions
- AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
- audio_session_t activeSession = activeSessions.keyAt(0);
- stopInput(activeInput, activeSession);
- releaseInput(activeInput, activeSession);
+ Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
+ AudioSessionCollection activeSessions =
+ activeDesc->getAudioSessions(true /*activeOnly*/);
+ for (size_t j = 0; j < activeSessions.size(); j++) {
+ audio_session_t activeSession = activeSessions.keyAt(j);
+ stopInput(activeDesc->mIoHandle, activeSession);
+ releaseInput(activeDesc->mIoHandle, activeSession);
+ }
}
}
@@ -634,6 +638,9 @@
audio_policy_forced_cfg_t config)
{
ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mEngine->getPhoneState());
+ if (config == mEngine->getForceUse(usage)) {
+ return;
+ }
if (mEngine->setForceUse(usage, config) != NO_ERROR) {
ALOGW("setForceUse() could not set force cfg %d for usage %d", config, usage);
@@ -671,15 +678,16 @@
}
}
- audio_io_handle_t activeInput = mInputs.getActiveInput();
- if (activeInput != 0) {
- sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
- audio_devices_t newDevice = getNewInputDevice(activeInput);
+ Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+ audio_devices_t newDevice = getNewInputDevice(activeDesc);
// Force new input selection if the new device can not be reached via current input
- if (activeDesc->mProfile->getSupportedDevices().types() & (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
- setInputDevice(activeInput, newDevice);
+ if (activeDesc->mProfile->getSupportedDevices().types() &
+ (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
+ setInputDevice(activeDesc->mIoHandle, newDevice);
} else {
- closeInput(activeInput);
+ closeInput(activeDesc->mIoHandle);
}
}
}
@@ -749,9 +757,8 @@
ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x",
device, stream, samplingRate, format, channelMask, flags);
- return getOutputForDevice(device, AUDIO_SESSION_ALLOCATE,
- stream, samplingRate,format, channelMask,
- flags, offloadInfo);
+ return getOutputForDevice(device, AUDIO_SESSION_ALLOCATE, stream, samplingRate, format,
+ channelMask, flags, offloadInfo);
}
status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
@@ -759,12 +766,10 @@
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t selectedDeviceId,
- const audio_offload_info_t *offloadInfo)
+ audio_port_handle_t *portId)
{
audio_attributes_t attributes;
if (attr != NULL) {
@@ -782,10 +787,16 @@
}
stream_type_to_audio_attributes(*stream, &attributes);
}
+
+ // TODO: check for existing client for this port ID
+ if (*portId == AUDIO_PORT_HANDLE_NONE) {
+ *portId = AudioPort::getNextUniqueId();
+ }
+
sp<SwAudioOutputDescriptor> desc;
if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
- if (!audio_has_proportional_frames(format)) {
+ if (!audio_has_proportional_frames(config->format)) {
return BAD_VALUE;
}
*stream = streamTypefromAttributesInt(&attributes);
@@ -823,11 +834,11 @@
}
ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
- device, samplingRate, format, channelMask, flags);
+ device, config->sample_rate, config->format, config->channel_mask, flags);
*output = getOutputForDevice(device, session, *stream,
- samplingRate, format, channelMask,
- flags, offloadInfo);
+ config->sample_rate, config->format, config->channel_mask,
+ flags, &config->offload_info);
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
@@ -838,7 +849,7 @@
audio_io_handle_t AudioPolicyManager::getOutputForDevice(
audio_devices_t device,
- audio_session_t session __unused,
+ audio_session_t session,
audio_stream_type_t stream,
uint32_t samplingRate,
audio_format_t format,
@@ -947,13 +958,22 @@
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
outputDesc = desc;
- // reuse direct output if currently open and configured with same parameters
+ // reuse direct output if currently open by the same client
+ // and configured with same parameters
if ((samplingRate == outputDesc->mSamplingRate) &&
- audio_formats_match(format, outputDesc->mFormat) &&
- (channelMask == outputDesc->mChannelMask)) {
- outputDesc->mDirectOpenCount++;
- ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
- return mOutputs.keyAt(i);
+ audio_formats_match(format, outputDesc->mFormat) &&
+ (channelMask == outputDesc->mChannelMask)) {
+ if (session == outputDesc->mDirectClientSession) {
+ outputDesc->mDirectOpenCount++;
+ ALOGV("getOutput() reusing direct output %d for session %d",
+ mOutputs.keyAt(i), session);
+ return mOutputs.keyAt(i);
+ } else {
+ ALOGV("getOutput() do not reuse direct output because current client (%d) "
+ "is not the same as requesting client (%d)",
+ outputDesc->mDirectClientSession, session);
+ goto non_direct_output;
+ }
}
}
}
@@ -989,11 +1009,14 @@
if (offloadInfo != NULL) {
config.offload_info = *offloadInfo;
}
+ DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
+ String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
+ : String8("");
status = mpClientInterface->openOutput(profile->getModuleHandle(),
&output,
&config,
&outputDesc->mDevice,
- String8(""),
+ address,
&outputDesc->mLatency,
outputDesc->mFlags);
@@ -1021,13 +1044,9 @@
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
+ outputDesc->mDirectClientSession = session;
- audio_io_handle_t srcOutput = getOutputForEffect();
addOutput(output, outputDesc);
- audio_io_handle_t dstOutput = getOutputForEffect();
- if (dstOutput == output) {
- mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, srcOutput, dstOutput);
- }
mPreviousOutputs = mOutputs;
ALOGV("getOutput() returns new direct output %d", output);
mpClientInterface->onAudioPortListUpdate();
@@ -1235,11 +1254,16 @@
// necessary for a correct control of hardware output routing by startOutput() and stopOutput()
outputDesc->changeRefCount(stream, 1);
+ if (stream == AUDIO_STREAM_MUSIC) {
+ selectOutputForMusicEffects();
+ }
+
if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
// starting an output being rerouted?
if (device == AUDIO_DEVICE_NONE) {
device = getNewOutputDevice(outputDesc, false /*fromCache*/);
}
+
routing_strategy strategy = getStrategy(stream);
bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
(strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
@@ -1392,6 +1416,9 @@
// update the outputs if stopping one with a stream that can affect notification routing
handleNotificationRoutingForStream(stream);
}
+ if (stream == AUDIO_STREAM_MUSIC) {
+ selectOutputForMusicEffects();
+ }
return NO_ERROR;
} else {
ALOGW("stopOutput() refcount is already 0");
@@ -1435,13 +1462,6 @@
}
if (--desc->mDirectOpenCount == 0) {
closeOutput(output);
- // If effects where present on the output, audioflinger moved them to the primary
- // output by default: move them back to the appropriate output.
- audio_io_handle_t dstOutput = getOutputForEffect();
- if (hasPrimaryOutput() && dstOutput != mPrimaryOutput->mIoHandle) {
- mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX,
- mPrimaryOutput->mIoHandle, dstOutput);
- }
mpClientInterface->onAudioPortListUpdate();
}
}
@@ -1452,19 +1472,19 @@
audio_io_handle_t *input,
audio_session_t session,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t selectedDeviceId,
- input_type_t *inputType)
+ input_type_t *inputType,
+ audio_port_handle_t *portId)
{
ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
"session %d, flags %#x",
- attr->source, samplingRate, format, channelMask, session, flags);
+ attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
+
audio_devices_t device;
// handle legacy remote submix case where the address was not always specified
String8 address = String8("");
@@ -1477,6 +1497,11 @@
}
halInputSource = inputSource;
+ // TODO: check for existing client for this port ID
+ if (*portId == AUDIO_PORT_HANDLE_NONE) {
+ *portId = AudioPort::getNextUniqueId();
+ }
+
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
@@ -1526,12 +1551,13 @@
}
*input = getInputForDevice(device, address, session, uid, inputSource,
- samplingRate, format, channelMask, flags,
+ config->sample_rate, config->format, config->channel_mask, flags,
policyMix);
if (*input == AUDIO_IO_HANDLE_NONE) {
mInputRoutes.removeRoute(session);
return INVALID_OPERATION;
}
+
ALOGV("getInputForAttr() returns input type = %d", *inputType);
return NO_ERROR;
}
@@ -1608,14 +1634,24 @@
isSoundTrigger,
policyMix, mpClientInterface);
-// TODO enable input reuse
+// FIXME: disable concurrent capture until UI is ready
#if 0
// reuse an open input if possible
+ sp<AudioInputDescriptor> reusedInputDesc;
for (size_t i = 0; i < mInputs.size(); i++) {
sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
- // reuse input if it shares the same profile and same sound trigger attribute
- if (profile == desc->mProfile &&
- isSoundTrigger == desc->isSoundTrigger()) {
+ // reuse input if:
+ // - it shares the same profile
+ // AND
+ // - it is not a reroute submix input
+ // AND
+ // - it is: not used for sound trigger
+ // OR
+ // used for sound trigger and all clients use the same session ID
+ //
+ if ((profile == desc->mProfile) &&
+ (isSoundTrigger == desc->isSoundTrigger()) &&
+ !is_virtual_input_device(device)) {
sp<AudioSession> as = desc->getAudioSession(session);
if (as != 0) {
@@ -1625,13 +1661,41 @@
} else {
ALOGW("getInputForDevice() record with different attributes"
" exists for session %d", session);
- return input;
+ continue;
}
+ } else if (isSoundTrigger) {
+ continue;
+ }
+
+ // Reuse the already opened input stream on this profile if:
+ // - the new capture source is background OR
+ // - the path requested configurations match OR
+ // - the new source priority is less than the highest source priority on this input
+ // If the input stream cannot be reused, close it before opening a new stream
+ // on the same profile for the new client so that the requested path configuration
+ // can be selected.
+ if (!isConcurrentSource(inputSource) &&
+ ((desc->mSamplingRate != samplingRate ||
+ desc->mChannelMask != channelMask ||
+ !audio_formats_match(desc->mFormat, format)) &&
+ (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
+ source_priority(inputSource)))) {
+ reusedInputDesc = desc;
+ continue;
} else {
desc->addAudioSession(session, audioSession);
+ ALOGV("%s: reusing input %d", __FUNCTION__, mInputs.keyAt(i));
+ return mInputs.keyAt(i);
}
- ALOGV("getInputForDevice() reusing input %d", mInputs.keyAt(i));
- return mInputs.keyAt(i);
+ }
+ }
+
+ if (reusedInputDesc != 0) {
+ AudioSessionCollection sessions = reusedInputDesc->getAudioSessions(false /*activeOnly*/);
+ for (size_t j = 0; j < sessions.size(); j++) {
+ audio_session_t currentSession = sessions.keyAt(j);
+ stopInput(reusedInputDesc->mIoHandle, currentSession);
+ releaseInput(reusedInputDesc->mIoHandle, currentSession);
}
}
#endif
@@ -1641,6 +1705,12 @@
config.channel_mask = profileChannelMask;
config.format = profileFormat;
+ if (address == "") {
+ DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
+ // the inputs vector must be of size 1, but we don't want to crash here
+ address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
+ }
+
status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
&input,
&config,
@@ -1677,10 +1747,56 @@
return input;
}
+//static
+bool AudioPolicyManager::isConcurrentSource(audio_source_t source)
+{
+ return (source == AUDIO_SOURCE_HOTWORD) ||
+ (source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
+ (source == AUDIO_SOURCE_FM_TUNER);
+}
+
+bool AudioPolicyManager::isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
+ const sp<AudioSession>& audioSession)
+{
+ // Do not allow capture if an active voice call is using a software patch and
+ // the call TX source device is on the same HW module.
+ // FIXME: would be better to refine to only inputs whose profile connects to the
+ // call TX device but this information is not in the audio patch
+ if (mCallTxPatch != 0 &&
+ inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
+ return false;
+ }
+
+ // starting concurrent capture is enabled if:
+ // 1) capturing for re-routing
+ // 2) capturing for HOTWORD source
+ // 3) capturing for FM TUNER source
+ // 3) All other active captures are either for re-routing or HOTWORD
+
+ if (is_virtual_input_device(inputDesc->mDevice) ||
+ isConcurrentSource(audioSession->inputSource())) {
+ return true;
+ }
+
+ Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeInput = activeInputs[i];
+ if (!isConcurrentSource(activeInput->inputSource(true)) &&
+ !is_virtual_input_device(activeInput->mDevice)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
status_t AudioPolicyManager::startInput(audio_io_handle_t input,
- audio_session_t session)
+ audio_session_t session,
+ concurrency_type__mask_t *concurrency)
{
ALOGV("startInput() input %d", input);
+ *concurrency = API_INPUT_CONCURRENCY_NONE;
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("startInput() unknown input %d", input);
@@ -1694,86 +1810,132 @@
return BAD_VALUE;
}
- // virtual input devices are compatible with other input devices
+// FIXME: disable concurrent capture until UI is ready
+#if 0
+ if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
+ ALOGW("startInput(%d) failed: other input already started", input);
+ return INVALID_OPERATION;
+ }
+
+ if (isInCall()) {
+ *concurrency |= API_INPUT_CONCURRENCY_CALL;
+ }
+ if (mInputs.activeInputsCountOnDevices() != 0) {
+ *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
+ }
+#else
if (!is_virtual_input_device(inputDesc->mDevice)) {
+ if (mCallTxPatch != 0 &&
+ inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
+ ALOGW("startInput(%d) failed: call in progress", input);
+ return INVALID_OPERATION;
+ }
- // for a non-virtual input device, check if there is another (non-virtual) active input
- audio_io_handle_t activeInput = mInputs.getActiveInput();
- if (activeInput != 0 && activeInput != input) {
+ Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
- // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
- // otherwise the active input continues and the new input cannot be started.
- sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
- if ((activeDesc->inputSource() == AUDIO_SOURCE_HOTWORD) &&
- !activeDesc->hasPreemptedSession(session)) {
- ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
- //FIXME: consider all active sessions
- AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
- audio_session_t activeSession = activeSessions.keyAt(0);
- SortedVector<audio_session_t> sessions =
- activeDesc->getPreemptedSessions();
- sessions.add(activeSession);
- inputDesc->setPreemptedSessions(sessions);
- stopInput(activeInput, activeSession);
- releaseInput(activeInput, activeSession);
+ if (is_virtual_input_device(activeDesc->mDevice)) {
+ continue;
+ }
+
+ audio_source_t activeSource = activeDesc->inputSource(true);
+ if (audioSession->inputSource() == AUDIO_SOURCE_HOTWORD) {
+ if (activeSource == AUDIO_SOURCE_HOTWORD) {
+ if (activeDesc->hasPreemptedSession(session)) {
+ ALOGW("startInput(%d) failed for HOTWORD: "
+ "other input %d already started for HOTWORD",
+ input, activeDesc->mIoHandle);
+ return INVALID_OPERATION;
+ }
+ } else {
+ ALOGV("startInput(%d) failed for HOTWORD: other input %d already started",
+ input, activeDesc->mIoHandle);
+ return INVALID_OPERATION;
+ }
} else {
- ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
- return INVALID_OPERATION;
+ if (activeSource != AUDIO_SOURCE_HOTWORD) {
+ ALOGW("startInput(%d) failed: other input %d already started",
+ input, activeDesc->mIoHandle);
+ return INVALID_OPERATION;
+ }
}
}
- // Do not allow capture if an active voice call is using a software patch and
- // the call TX source device is on the same HW module.
- // FIXME: would be better to refine to only inputs whose profile connects to the
- // call TX device but this information is not in the audio patch
- if (mCallTxPatch != 0 &&
- inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
- return INVALID_OPERATION;
+ // if capture is allowed, preempt currently active HOTWORD captures
+ for (size_t i = 0; i < activeInputs.size(); i++) {
+ sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+
+ if (is_virtual_input_device(activeDesc->mDevice)) {
+ continue;
+ }
+
+ audio_source_t activeSource = activeDesc->inputSource(true);
+ if (activeSource == AUDIO_SOURCE_HOTWORD) {
+ AudioSessionCollection activeSessions =
+ activeDesc->getAudioSessions(true /*activeOnly*/);
+ audio_session_t activeSession = activeSessions.keyAt(0);
+ audio_io_handle_t activeHandle = activeDesc->mIoHandle;
+ SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
+ sessions.add(activeSession);
+ inputDesc->setPreemptedSessions(sessions);
+ stopInput(activeHandle, activeSession);
+ releaseInput(activeHandle, activeSession);
+ ALOGV("startInput(%d) for HOTWORD preempting HOTWORD input %d",
+ input, activeDesc->mIoHandle);
+ }
}
}
+#endif
+
+ // increment activity count before calling getNewInputDevice() below as only active sessions
+ // are considered for device selection
+ audioSession->changeActiveCount(1);
// Routing?
mInputRoutes.incRouteActivity(session);
- if (!inputDesc->isActive() || mInputRoutes.hasRouteChanged(session)) {
- // if input maps to a dynamic policy with an activity listener, notify of state change
- if ((inputDesc->mPolicyMix != NULL)
- && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
- mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
- MIX_STATE_MIXING);
- }
-
+ if (audioSession->activeCount() == 1 || mInputRoutes.hasRouteChanged(session)) {
// indicate active capture to sound trigger service if starting capture from a mic on
// primary HW module
- audio_devices_t device = getNewInputDevice(input);
- audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
- if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
- mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
- SoundTrigger::setCaptureState(true);
- }
+ audio_devices_t device = getNewInputDevice(inputDesc);
setInputDevice(input, device, true /* force */);
- // automatically enable the remote submix output when input is started if not
- // used by a policy mix of type MIX_TYPE_RECORDERS
- // For remote submix (a virtual device), we open only one input per capture request.
- if (audio_is_remote_submix_device(inputDesc->mDevice)) {
- String8 address = String8("");
- if (inputDesc->mPolicyMix == NULL) {
- address = String8("0");
- } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
- address = inputDesc->mPolicyMix->mDeviceAddress;
+ if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
+ // if input maps to a dynamic policy with an activity listener, notify of state change
+ if ((inputDesc->mPolicyMix != NULL)
+ && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+ MIX_STATE_MIXING);
}
- if (address != "") {
- setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- address, "remote-submix");
+
+ audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+ if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+ mInputs.activeInputsCountOnDevices(primaryInputDevices) == 1) {
+ SoundTrigger::setCaptureState(true);
+ }
+
+ // automatically enable the remote submix output when input is started if not
+ // used by a policy mix of type MIX_TYPE_RECORDERS
+ // For remote submix (a virtual device), we open only one input per capture request.
+ if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+ String8 address = String8("");
+ if (inputDesc->mPolicyMix == NULL) {
+ address = String8("0");
+ } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+ address = inputDesc->mPolicyMix->mDeviceAddress;
+ }
+ if (address != "") {
+ setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address, "remote-submix");
+ }
}
}
}
ALOGV("AudioPolicyManager::startInput() input source = %d", audioSession->inputSource());
- audioSession->changeActiveCount(1);
return NO_ERROR;
}
@@ -1804,41 +1966,46 @@
// Routing?
mInputRoutes.decRouteActivity(session);
- if (!inputDesc->isActive()) {
- // if input maps to a dynamic policy with an activity listener, notify of state change
- if ((inputDesc->mPolicyMix != NULL)
- && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
- mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
- MIX_STATE_IDLE);
- }
+ if (audioSession->activeCount() == 0) {
- // automatically disable the remote submix output when input is stopped if not
- // used by a policy mix of type MIX_TYPE_RECORDERS
- if (audio_is_remote_submix_device(inputDesc->mDevice)) {
- String8 address = String8("");
- if (inputDesc->mPolicyMix == NULL) {
- address = String8("0");
- } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
- address = inputDesc->mPolicyMix->mDeviceAddress;
+ if (inputDesc->isActive()) {
+ setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
+ } else {
+ // if input maps to a dynamic policy with an activity listener, notify of state change
+ if ((inputDesc->mPolicyMix != NULL)
+ && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+ MIX_STATE_IDLE);
}
- if (address != "") {
- setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- address, "remote-submix");
+
+ // automatically disable the remote submix output when input is stopped if not
+ // used by a policy mix of type MIX_TYPE_RECORDERS
+ if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+ String8 address = String8("");
+ if (inputDesc->mPolicyMix == NULL) {
+ address = String8("0");
+ } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+ address = inputDesc->mPolicyMix->mDeviceAddress;
+ }
+ if (address != "") {
+ setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address, "remote-submix");
+ }
}
- }
- audio_devices_t device = inputDesc->mDevice;
- resetInputDevice(input);
+ audio_devices_t device = inputDesc->mDevice;
+ resetInputDevice(input);
- // indicate inactive capture to sound trigger service if stopping capture from a mic on
- // primary HW module
- audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
- if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
- mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
- SoundTrigger::setCaptureState(false);
+ // indicate inactive capture to sound trigger service if stopping capture from a mic on
+ // primary HW module
+ audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+ if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+ mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
+ SoundTrigger::setCaptureState(false);
+ }
+ inputDesc->clearPreemptedSessions();
}
- inputDesc->clearPreemptedSessions();
}
return NO_ERROR;
}
@@ -1861,7 +2028,7 @@
ALOG_ASSERT(inputDesc != 0);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
- if (index < 0) {
+ if (audioSession == 0) {
ALOGW("releaseInput() unknown session %d on input %d", session, input);
return;
}
@@ -2025,8 +2192,7 @@
return NO_ERROR;
}
-audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
- const SortedVector<audio_io_handle_t>& outputs)
+audio_io_handle_t AudioPolicyManager::selectOutputForMusicEffects()
{
// select one output among several suitable for global effects.
// The priority is as follows:
@@ -2034,53 +2200,68 @@
// AudioFlinger will invalidate the track and the offloaded output
// will be closed causing the effect to be moved to a PCM output.
// 2: A deep buffer output
- // 3: the first output in the list
-
- if (outputs.size() == 0) {
- return 0;
- }
-
- audio_io_handle_t outputOffloaded = 0;
- audio_io_handle_t outputDeepBuffer = 0;
-
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
- ALOGV("selectOutputForEffects outputs[%zu] flags %x", i, desc->mFlags);
- if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- outputOffloaded = outputs[i];
- }
- if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
- outputDeepBuffer = outputs[i];
- }
- }
-
- ALOGV("selectOutputForEffects outputOffloaded %d outputDeepBuffer %d",
- outputOffloaded, outputDeepBuffer);
- if (outputOffloaded != 0) {
- return outputOffloaded;
- }
- if (outputDeepBuffer != 0) {
- return outputDeepBuffer;
- }
-
- return outputs[0];
-}
-
-audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc)
-{
- // apply simple rule where global effects are attached to the same output as MUSIC streams
+ // 3: The primary output
+ // 4: the first output in the list
routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
- SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(device, mOutputs);
+ SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
- audio_io_handle_t output = selectOutputForEffects(dstOutputs);
- ALOGV("getOutputForEffect() got output %d for fx %s flags %x",
- output, (desc == NULL) ? "unspecified" : desc->name, (desc == NULL) ? 0 : desc->flags);
+ if (outputs.size() == 0) {
+ return AUDIO_IO_HANDLE_NONE;
+ }
+ audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+ bool activeOnly = true;
+
+ while (output == AUDIO_IO_HANDLE_NONE) {
+ audio_io_handle_t outputOffloaded = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputDeepBuffer = AUDIO_IO_HANDLE_NONE;
+ audio_io_handle_t outputPrimary = AUDIO_IO_HANDLE_NONE;
+
+ for (size_t i = 0; i < outputs.size(); i++) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+ if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
+ continue;
+ }
+ ALOGV("selectOutputForMusicEffects activeOnly %d outputs[%zu] flags 0x%08x",
+ activeOnly, i, desc->mFlags);
+ if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ outputOffloaded = outputs[i];
+ }
+ if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
+ outputDeepBuffer = outputs[i];
+ }
+ if ((desc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) != 0) {
+ outputPrimary = outputs[i];
+ }
+ }
+ if (outputOffloaded != AUDIO_IO_HANDLE_NONE) {
+ output = outputOffloaded;
+ } else if (outputDeepBuffer != AUDIO_IO_HANDLE_NONE) {
+ output = outputDeepBuffer;
+ } else if (outputPrimary != AUDIO_IO_HANDLE_NONE) {
+ output = outputPrimary;
+ } else {
+ output = outputs[0];
+ }
+ activeOnly = false;
+ }
+
+ if (output != mMusicEffectOutput) {
+ mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, mMusicEffectOutput, output);
+ mMusicEffectOutput = output;
+ }
+
+ ALOGV("selectOutputForMusicEffects selected output %d", output);
return output;
}
+audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc __unused)
+{
+ return selectOutputForMusicEffects();
+}
+
status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
@@ -2320,7 +2501,9 @@
snprintf(buffer, SIZE, " Primary Output: %d\n",
hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
result.append(buffer);
- snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState());
+ std::string stateLiteral;
+ AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
+ snprintf(buffer, SIZE, " Phone state: %s\n", stateLiteral.c_str());
result.append(buffer);
snprintf(buffer, SIZE, " Force use for communications %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
@@ -2354,6 +2537,7 @@
mVolumeCurves->dump(fd);
mEffects.dump(fd);
mAudioPatches.dump(fd);
+ mPolicyMixes.dump(fd);
return NO_ERROR;
}
@@ -2709,8 +2893,8 @@
// create a software bridge in PatchPanel if:
// - source and sink devices are on differnt HW modules OR
// - audio HAL version is < 3.0
- if ((srcDeviceDesc->getModuleHandle() != sinkDeviceDesc->getModuleHandle()) ||
- (srcDeviceDesc->mModule->getHalVersion() < AUDIO_DEVICE_API_VERSION_3_0)) {
+ if (!srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) ||
+ (srcDeviceDesc->mModule->getHalVersionMajor() < 3)) {
// support only one sink device for now to simplify output selection logic
if (patch->num_sinks > 1) {
return INVALID_OPERATION;
@@ -2809,7 +2993,7 @@
return BAD_VALUE;
}
setInputDevice(inputDesc->mIoHandle,
- getNewInputDevice(inputDesc->mIoHandle),
+ getNewInputDevice(inputDesc),
true,
NULL);
} else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
@@ -3014,7 +3198,7 @@
status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_io_handle_t *handle,
+ audio_patch_handle_t *handle,
uid_t uid)
{
ALOGV("%s source %p attributes %p handle %p", __FUNCTION__, source, attributes, handle);
@@ -3022,7 +3206,7 @@
return BAD_VALUE;
}
- *handle = AUDIO_IO_HANDLE_NONE;
+ *handle = AUDIO_PATCH_HANDLE_NONE;
if (source->role != AUDIO_PORT_ROLE_SOURCE ||
source->type != AUDIO_PORT_TYPE_DEVICE) {
@@ -3072,7 +3256,7 @@
if (srcDeviceDesc->getAudioPort()->mModule->getHandle() ==
sinkDeviceDesc->getAudioPort()->mModule->getHandle() &&
- srcDeviceDesc->getAudioPort()->mModule->getHalVersion() >= AUDIO_DEVICE_API_VERSION_3_0 &&
+ srcDeviceDesc->getAudioPort()->mModule->getHalVersionMajor() >= 3 &&
srcDeviceDesc->getAudioPort()->mGains.size() > 0) {
ALOGV("%s AUDIO_DEVICE_API_VERSION_3_0", __FUNCTION__);
// create patch between src device and output device
@@ -3129,7 +3313,7 @@
return NO_ERROR;
}
-status_t AudioPolicyManager::stopAudioSource(audio_io_handle_t handle __unused)
+status_t AudioPolicyManager::stopAudioSource(audio_patch_handle_t handle __unused)
{
sp<AudioSourceDescriptor> sourceDesc = mAudioSources.valueFor(handle);
ALOGV("%s handle %d", __FUNCTION__, handle);
@@ -3235,6 +3419,33 @@
return android_atomic_inc(&mAudioPortGeneration);
}
+#ifdef USE_XML_AUDIO_POLICY_CONF
+// Treblized audio policy xml config will be located in /odm/etc or /vendor/etc.
+static const char *kConfigLocationList[] =
+ {"/odm/etc", "/vendor/etc", "/system/etc"};
+static const int kConfigLocationListSize =
+ (sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
+
+static status_t deserializeAudioPolicyXmlConfig(AudioPolicyConfig &config) {
+ char audioPolicyXmlConfigFile[AUDIO_POLICY_XML_CONFIG_FILE_PATH_MAX_LENGTH];
+ status_t ret;
+
+ for (int i = 0; i < kConfigLocationListSize; i++) {
+ PolicySerializer serializer;
+ snprintf(audioPolicyXmlConfigFile,
+ sizeof(audioPolicyXmlConfigFile),
+ "%s/%s",
+ kConfigLocationList[i],
+ AUDIO_POLICY_XML_CONFIG_FILE_NAME);
+ ret = serializer.deserialize(audioPolicyXmlConfigFile, config);
+ if (ret == NO_ERROR) {
+ break;
+ }
+ }
+ return ret;
+}
+#endif
+
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
:
#ifdef AUDIO_POLICY_TEST
@@ -3247,7 +3458,8 @@
mBeaconPlayingRefCount(0),
mBeaconMuted(false),
mTtsOutputAvailable(false),
- mMasterMono(false)
+ mMasterMono(false),
+ mMusicEffectOutput(AUDIO_IO_HANDLE_NONE)
{
mUidCached = getuid();
mpClientInterface = clientInterface;
@@ -3262,8 +3474,7 @@
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled,
static_cast<VolumeCurvesCollection *>(mVolumeCurves));
- PolicySerializer serializer;
- if (serializer.deserialize(AUDIO_POLICY_XML_CONFIG_FILE, config) != NO_ERROR) {
+ if (deserializeAudioPolicyXmlConfig(config) != NO_ERROR) {
#else
mVolumeCurves = new StreamDescriptorCollection();
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
@@ -3436,7 +3647,7 @@
sp<DeviceDescriptor> devDesc = mAvailableInputDevices[index];
if (!devDesc->isAttached()) {
devDesc->attach(mHwModules[i]);
- devDesc->importAudioPort(inProfile);
+ devDesc->importAudioPort(inProfile, true);
}
}
}
@@ -3583,7 +3794,7 @@
mTestFormat = format;
} else if (mTestOutputs[mCurOutput] != 0) {
AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8("format"), format);
+ outputParam.addInt(String8(AudioParameter::keyStreamSupportedFormats), format);
mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
}
}
@@ -3602,7 +3813,7 @@
mTestChannels = channels;
} else if (mTestOutputs[mCurOutput] != 0) {
AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8("channels"), channels);
+ outputParam.addInt(String8(AudioParameter::keyStreamSupportedChannels), channels);
mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
}
}
@@ -3615,7 +3826,7 @@
mTestSamplingRate = samplingRate;
} else if (mTestOutputs[mCurOutput] != 0) {
AudioParameter outputParam = AudioParameter();
- outputParam.addInt(String8("sampling_rate"), samplingRate);
+ outputParam.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), samplingRate);
mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
}
}
@@ -3693,12 +3904,14 @@
outputDesc->setIoHandle(output);
mOutputs.add(output, outputDesc);
updateMono(output); // update mono status when adding to output list
+ selectOutputForMusicEffects();
nextAudioPortGeneration();
}
void AudioPolicyManager::removeOutput(audio_io_handle_t output)
{
mOutputs.removeItem(output);
+ selectOutputForMusicEffects();
}
void AudioPolicyManager::addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc)
@@ -3797,8 +4010,8 @@
continue;
}
- ALOGV("opening output for device %08x with params %s profile %p",
- device, address.string(), profile.get());
+ ALOGV("opening output for device %08x with params %s profile %p name %s",
+ device, address.string(), profile.get(), profile->getName().string());
desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
desc->mDevice = device;
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
@@ -4047,6 +4260,10 @@
config.channel_mask = desc->mChannelMask;
config.format = desc->mFormat;
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+
+ ALOGV("opening inputput for device %08x with params %s profile %p name %s",
+ desc->mDevice, address.string(), profile.get(), profile->getName().string());
+
status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
&input,
&config,
@@ -4286,22 +4503,7 @@
// Move effects associated to this strategy from previous output to new output
if (strategy == STRATEGY_MEDIA) {
- audio_io_handle_t fxOutput = selectOutputForEffects(dstOutputs);
- SortedVector<audio_io_handle_t> moved;
- for (size_t i = 0; i < mEffects.size(); i++) {
- sp<EffectDescriptor> effectDesc = mEffects.valueAt(i);
- if (effectDesc->mSession == AUDIO_SESSION_OUTPUT_MIX &&
- effectDesc->mIo != fxOutput) {
- if (moved.indexOf(effectDesc->mIo) < 0) {
- ALOGV("checkOutputForStrategy() moving effect %d to output %d",
- mEffects.keyAt(i), fxOutput);
- mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, effectDesc->mIo,
- fxOutput);
- moved.add(effectDesc->mIo);
- }
- effectDesc->mIo = fxOutput;
- }
- }
+ selectOutputForMusicEffects();
}
// Move tracks associated to this strategy from previous output to new output
for (int i = 0; i < AUDIO_STREAM_FOR_POLICY_CNT; i++) {
@@ -4339,33 +4541,36 @@
((mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET &
~AUDIO_DEVICE_BIT_IN) != 0) ||
((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_ALL_SCO) != 0);
- // suspend A2DP output if:
- // (NOT already suspended) &&
- // ((SCO device is connected &&
- // (forced usage for communication || for record is SCO))) ||
- // (phone state is ringing || in call)
+
+ // if suspended, restore A2DP output if:
+ // ((SCO device is NOT connected) ||
+ // ((forced usage communication is NOT SCO) && (forced usage for record is NOT SCO) &&
+ // (phone state is NOT in call) && (phone state is NOT ringing)))
//
- // restore A2DP output if:
- // (Already suspended) &&
- // ((SCO device is NOT connected ||
- // (forced usage NOT for communication && NOT for record is SCO))) &&
- // (phone state is NOT ringing && NOT in call)
+ // if not suspended, suspend A2DP output if:
+ // (SCO device is connected) &&
+ // ((forced usage for communication is SCO) || (forced usage for record is SCO) ||
+ // ((phone state is in call) || (phone state is ringing)))
//
if (mA2dpSuspended) {
- if ((!isScoConnected ||
- ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) != AUDIO_POLICY_FORCE_BT_SCO) &&
- (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) != AUDIO_POLICY_FORCE_BT_SCO))) &&
- ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) &&
+ if (!isScoConnected ||
+ ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) !=
+ AUDIO_POLICY_FORCE_BT_SCO) &&
+ (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) !=
+ AUDIO_POLICY_FORCE_BT_SCO) &&
+ (mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) &&
(mEngine->getPhoneState() != AUDIO_MODE_RINGTONE))) {
mpClientInterface->restoreOutput(a2dpOutput);
mA2dpSuspended = false;
}
} else {
- if ((isScoConnected &&
- ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) ||
- (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO))) ||
- ((mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) ||
+ if (isScoConnected &&
+ ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) ==
+ AUDIO_POLICY_FORCE_BT_SCO) ||
+ (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) ==
+ AUDIO_POLICY_FORCE_BT_SCO) ||
+ (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) ||
(mEngine->getPhoneState() == AUDIO_MODE_RINGTONE))) {
mpClientInterface->suspendOutput(a2dpOutput);
@@ -4436,9 +4641,9 @@
return device;
}
-audio_devices_t AudioPolicyManager::getNewInputDevice(audio_io_handle_t input)
+audio_devices_t AudioPolicyManager::getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc)
{
- sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+ audio_devices_t device = AUDIO_DEVICE_NONE;
ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (index >= 0) {
@@ -4450,16 +4655,19 @@
}
}
- audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->inputSource());
+ audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
+ if (isInCall()) {
+ device = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+ } else if (source != AUDIO_SOURCE_DEFAULT) {
+ device = getDeviceAndMixForInputSource(source);
+ }
return device;
}
bool AudioPolicyManager::streamsMatchForvolume(audio_stream_type_t stream1,
audio_stream_type_t stream2) {
- return ((stream1 == stream2) ||
- ((stream1 == AUDIO_STREAM_ACCESSIBILITY) && (stream2 == AUDIO_STREAM_MUSIC)) ||
- ((stream1 == AUDIO_STREAM_MUSIC) && (stream2 == AUDIO_STREAM_ACCESSIBILITY)));
+ return (stream1 == stream2);
}
uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
@@ -5034,7 +5242,8 @@
if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
AUDIO_DEVICE_OUT_WIRED_HEADSET |
- AUDIO_DEVICE_OUT_WIRED_HEADPHONE)) &&
+ AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
+ AUDIO_DEVICE_OUT_USB_HEADSET)) &&
((stream_strategy == STRATEGY_SONIFICATION)
|| (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
|| (stream == AUDIO_STREAM_SYSTEM)
@@ -5268,6 +5477,7 @@
switch (attr->usage) {
case AUDIO_USAGE_MEDIA:
case AUDIO_USAGE_GAME:
+ case AUDIO_USAGE_ASSISTANT:
case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
return AUDIO_STREAM_MUSIC;
case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
@@ -5323,6 +5533,7 @@
case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
case AUDIO_USAGE_GAME:
case AUDIO_USAGE_VIRTUAL_SOURCE:
+ case AUDIO_USAGE_ASSISTANT:
break;
default:
return false;
@@ -5525,12 +5736,12 @@
// Format MUST be checked first to update the list of AudioProfile
if (profiles.hasDynamicFormat()) {
- reply = mpClientInterface->getParameters(ioHandle,
- String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
+ reply = mpClientInterface->getParameters(
+ ioHandle, String8(AudioParameter::keyStreamSupportedFormats));
ALOGV("%s: supported formats %s", __FUNCTION__, reply.string());
AudioParameter repliedParameters(reply);
if (repliedParameters.get(
- String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS), reply) != NO_ERROR) {
+ String8(AudioParameter::keyStreamSupportedFormats), reply) != NO_ERROR) {
ALOGE("%s: failed to retrieve format, bailing out", __FUNCTION__);
return;
}
@@ -5547,27 +5758,28 @@
ChannelsVector channelMasks;
SampleRateVector samplingRates;
AudioParameter requestedParameters;
- requestedParameters.addInt(String8(AUDIO_PARAMETER_STREAM_FORMAT), format);
+ requestedParameters.addInt(String8(AudioParameter::keyFormat), format);
if (profiles.hasDynamicRateFor(format)) {
- reply = mpClientInterface->getParameters(ioHandle,
- requestedParameters.toString() + ";" +
- AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES);
+ reply = mpClientInterface->getParameters(
+ ioHandle,
+ requestedParameters.toString() + ";" +
+ AudioParameter::keyStreamSupportedSamplingRates);
ALOGV("%s: supported sampling rates %s", __FUNCTION__, reply.string());
AudioParameter repliedParameters(reply);
if (repliedParameters.get(
- String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES), reply) == NO_ERROR) {
+ String8(AudioParameter::keyStreamSupportedSamplingRates), reply) == NO_ERROR) {
samplingRates = samplingRatesFromString(reply.string());
}
}
if (profiles.hasDynamicChannelsFor(format)) {
reply = mpClientInterface->getParameters(ioHandle,
requestedParameters.toString() + ";" +
- AUDIO_PARAMETER_STREAM_SUP_CHANNELS);
+ AudioParameter::keyStreamSupportedChannels);
ALOGV("%s: supported channel masks %s", __FUNCTION__, reply.string());
AudioParameter repliedParameters(reply);
if (repliedParameters.get(
- String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS), reply) == NO_ERROR) {
+ String8(AudioParameter::keyStreamSupportedChannels), reply) == NO_ERROR) {
channelMasks = channelMasksFromString(reply.string());
if (device == AUDIO_DEVICE_OUT_HDMI) {
filterSurroundChannelMasks(&channelMasks);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 3cfe508..c831d46 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -114,12 +114,10 @@
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t selectedDeviceId,
- const audio_offload_info_t *offloadInfo);
+ audio_port_handle_t *portId);
virtual status_t startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session);
@@ -133,16 +131,16 @@
audio_io_handle_t *input,
audio_session_t session,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t selectedDeviceId,
- input_type_t *inputType);
+ input_type_t *inputType,
+ audio_port_handle_t *portId);
// indicates to the audio policy manager that the input starts being used.
virtual status_t startInput(audio_io_handle_t input,
- audio_session_t session);
+ audio_session_t session,
+ concurrency_type__mask_t *concurrency);
// indicates to the audio policy manager that the input stops being used.
virtual status_t stopInput(audio_io_handle_t input,
@@ -230,9 +228,9 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_io_handle_t *handle,
+ audio_patch_handle_t *handle,
uid_t uid);
- virtual status_t stopAudioSource(audio_io_handle_t handle);
+ virtual status_t stopAudioSource(audio_patch_handle_t handle);
virtual status_t setMasterMono(bool mono);
virtual status_t getMasterMono(bool *mono);
@@ -408,7 +406,7 @@
void updateDevicesAndOutputs();
// selects the most appropriate device on input for current state
- audio_devices_t getNewInputDevice(audio_io_handle_t input);
+ audio_devices_t getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc);
virtual uint32_t getMaxEffectsCpuLoad()
{
@@ -454,7 +452,7 @@
audio_channel_mask_t channelMask,
audio_output_flags_t flags);
- audio_io_handle_t selectOutputForEffects(const SortedVector<audio_io_handle_t>& outputs);
+ audio_io_handle_t selectOutputForMusicEffects();
virtual status_t addAudioPatch(audio_patch_handle_t handle, const sp<AudioPatch>& patch)
{
@@ -509,6 +507,10 @@
void clearAudioSources(uid_t uid);
+ static bool isConcurrentSource(audio_source_t source);
+
+ bool isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
+ const sp<AudioSession>& audioSession);
static bool streamsMatchForvolume(audio_stream_type_t stream1,
audio_stream_type_t stream2);
@@ -568,6 +570,8 @@
bool mMasterMono; // true if we wish to force all outputs to mono
AudioPolicyMixCollection mPolicyMixes; // list of registered mixes
+ audio_io_handle_t mMusicEffectOutput; // output selected for music effects
+
#ifdef AUDIO_POLICY_TEST
Mutex mLock;
@@ -662,7 +666,7 @@
const char *device_name);
void updateMono(audio_io_handle_t output) {
AudioParameter param;
- param.addInt(String8(AUDIO_PARAMETER_MONO_OUTPUT), (int)mMasterMono);
+ param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
mpClientInterface->setParameters(output, param.toString());
}
};
diff --git a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
deleted file mode 100644
index dabffe6..0000000
--- a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioPolicyService"
-//#define LOG_NDEBUG 0
-
-#include "Configuration.h"
-#undef __STRICT_ANSI__
-#define __STDINT_LIMITS
-#define __STDC_LIMIT_MACROS
-#include <stdint.h>
-
-#include <sys/time.h>
-#include <binder/IServiceManager.h>
-#include <utils/Log.h>
-#include <cutils/properties.h>
-#include <binder/IPCThreadState.h>
-#include <utils/String16.h>
-#include <utils/threads.h>
-#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
-#include <hardware_legacy/power.h>
-#include <media/AudioEffect.h>
-#include <media/EffectsFactoryApi.h>
-//#include <media/IAudioFlinger.h>
-
-#include <hardware/hardware.h>
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <audio_effects/audio_effects_conf.h>
-#include <media/AudioParameter.h>
-
-
-namespace android {
-
-/* implementation of the interface to the policy manager */
-extern "C" {
-
-audio_module_handle_t aps_load_hw_module(void *service __unused,
- const char *name)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return AUDIO_MODULE_HANDLE_NONE;
- }
-
- return af->loadHwModule(name);
-}
-
-static audio_io_handle_t open_output(audio_module_handle_t module,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask,
- uint32_t *pLatencyMs,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return AUDIO_IO_HANDLE_NONE;
- }
-
- if (pSamplingRate == NULL || pFormat == NULL || pChannelMask == NULL ||
- pDevices == NULL || pLatencyMs == NULL) {
- return AUDIO_IO_HANDLE_NONE;
- }
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- config.sample_rate = *pSamplingRate;
- config.format = *pFormat;
- config.channel_mask = *pChannelMask;
- if (offloadInfo != NULL) {
- config.offload_info = *offloadInfo;
- }
- audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = af->openOutput(module, &output, &config, pDevices,
- String8(""), pLatencyMs, flags);
- if (status == NO_ERROR) {
- *pSamplingRate = config.sample_rate;
- *pFormat = config.format;
- *pChannelMask = config.channel_mask;
- if (offloadInfo != NULL) {
- *((audio_offload_info_t *)offloadInfo) = config.offload_info;
- }
- }
- return output;
-}
-
-// deprecated: replaced by aps_open_output_on_module()
-audio_io_handle_t aps_open_output(void *service __unused,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask,
- uint32_t *pLatencyMs,
- audio_output_flags_t flags)
-{
- return open_output(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask,
- pLatencyMs, flags, NULL);
-}
-
-audio_io_handle_t aps_open_output_on_module(void *service __unused,
- audio_module_handle_t module,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask,
- uint32_t *pLatencyMs,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
-{
- return open_output(module, pDevices, pSamplingRate, pFormat, pChannelMask,
- pLatencyMs, flags, offloadInfo);
-}
-
-audio_io_handle_t aps_open_dup_output(void *service __unused,
- audio_io_handle_t output1,
- audio_io_handle_t output2)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return 0;
- }
- return af->openDuplicateOutput(output1, output2);
-}
-
-int aps_close_output(void *service __unused, audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->closeOutput(output);
-}
-
-int aps_suspend_output(void *service __unused, audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return PERMISSION_DENIED;
- }
-
- return af->suspendOutput(output);
-}
-
-int aps_restore_output(void *service __unused, audio_io_handle_t output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return PERMISSION_DENIED;
- }
-
- return af->restoreOutput(output);
-}
-
-static audio_io_handle_t open_input(audio_module_handle_t module,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- ALOGW("%s: could not get AudioFlinger", __func__);
- return AUDIO_IO_HANDLE_NONE;
- }
-
- if (pSamplingRate == NULL || pFormat == NULL || pChannelMask == NULL || pDevices == NULL) {
- return AUDIO_IO_HANDLE_NONE;
- }
-
- if (((*pDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) == AUDIO_DEVICE_IN_REMOTE_SUBMIX)
- && !captureAudioOutputAllowed(IPCThreadState::self()->getCallingPid(),
- IPCThreadState::self()->getCallingUid())) {
- ALOGE("open_input() permission denied: capture not allowed");
- return AUDIO_IO_HANDLE_NONE;
- }
-
- audio_config_t config = AUDIO_CONFIG_INITIALIZER;;
- config.sample_rate = *pSamplingRate;
- config.format = *pFormat;
- config.channel_mask = *pChannelMask;
- audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- status_t status = af->openInput(module, &input, &config, pDevices,
- String8(""), AUDIO_SOURCE_MIC, AUDIO_INPUT_FLAG_FAST /*FIXME*/);
- if (status == NO_ERROR) {
- *pSamplingRate = config.sample_rate;
- *pFormat = config.format;
- *pChannelMask = config.channel_mask;
- }
- return input;
-}
-
-
-// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored
-audio_io_handle_t aps_open_input(void *service __unused,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask,
- audio_in_acoustics_t acoustics __unused)
-{
- return open_input(AUDIO_MODULE_HANDLE_NONE, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-audio_io_handle_t aps_open_input_on_module(void *service __unused,
- audio_module_handle_t module,
- audio_devices_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- audio_channel_mask_t *pChannelMask)
-{
- return open_input(module, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-int aps_close_input(void *service __unused, audio_io_handle_t input)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->closeInput(input);
-}
-
-int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->invalidateStream(stream);
-}
-
-int aps_move_effects(void *service __unused, audio_session_t session,
- audio_io_handle_t src_output,
- audio_io_handle_t dst_output)
-{
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
- if (af == 0) {
- return PERMISSION_DENIED;
- }
-
- return af->moveEffects(session, src_output, dst_output);
-}
-
-char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
- const char *keys)
-{
- String8 result = AudioSystem::getParameters(io_handle, String8(keys));
- return strdup(result.string());
-}
-
-void aps_set_parameters(void *service, audio_io_handle_t io_handle,
- const char *kv_pairs, int delay_ms)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
-}
-
-int aps_set_stream_volume(void *service, audio_stream_type_t stream,
- float volume, audio_io_handle_t output,
- int delay_ms)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- return audioPolicyService->setStreamVolume(stream, volume, output,
- delay_ms);
-}
-
-int aps_start_tone(void *service, audio_policy_tone_t tone,
- audio_stream_type_t stream)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- return audioPolicyService->startTone(tone, stream);
-}
-
-int aps_stop_tone(void *service)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- return audioPolicyService->stopTone();
-}
-
-int aps_set_voice_volume(void *service, float volume, int delay_ms)
-{
- AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
- return audioPolicyService->setVoiceVolume(volume, delay_ms);
-}
-
-}; // extern "C"
-
-}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index b732b20..654465d 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -23,11 +23,11 @@
#include <cutils/misc.h>
#include <media/AudioEffect.h>
#include <system/audio.h>
-#include <hardware/audio_effect.h>
-#include <audio_effects/audio_effects_conf.h>
+#include <system/audio_effects/audio_effects_conf.h>
#include <utils/Vector.h>
#include <utils/SortedVector.h>
#include <cutils/config_utils.h>
+#include <binder/IPCThreadState.h>
#include "AudioPolicyEffects.h"
#include "ServiceUtilities.h"
@@ -57,11 +57,11 @@
}
mInputSources.clear();
- for (i = 0; i < mInputs.size(); i++) {
- mInputs.valueAt(i)->mEffects.clear();
- delete mInputs.valueAt(i);
+ for (i = 0; i < mInputSessions.size(); i++) {
+ mInputSessions.valueAt(i)->mEffects.clear();
+ delete mInputSessions.valueAt(i);
}
- mInputs.clear();
+ mInputSessions.clear();
// release audio output processing resources
for (i = 0; i < mOutputStreams.size(); i++) {
@@ -93,19 +93,20 @@
ALOGV("addInputEffects(): no processing needs to be attached to this source");
return status;
}
- ssize_t idx = mInputs.indexOfKey(input);
- EffectVector *inputDesc;
+ ssize_t idx = mInputSessions.indexOfKey(audioSession);
+ EffectVector *sessionDesc;
if (idx < 0) {
- inputDesc = new EffectVector(audioSession);
- mInputs.add(input, inputDesc);
+ sessionDesc = new EffectVector(audioSession);
+ mInputSessions.add(audioSession, sessionDesc);
} else {
// EffectVector is existing and we just need to increase ref count
- inputDesc = mInputs.valueAt(idx);
+ sessionDesc = mInputSessions.valueAt(idx);
}
- inputDesc->mRefCount++;
+ sessionDesc->mRefCount++;
- ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
- if (inputDesc->mRefCount == 1) {
+ ALOGV("addInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
+ if (sessionDesc->mRefCount == 1) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
EffectDesc *effect = effects[i];
@@ -123,30 +124,32 @@
}
ALOGV("addInputEffects(): added Fx %s on source: %d",
effect->mName, (int32_t)aliasSource);
- inputDesc->mEffects.add(fx);
+ sessionDesc->mEffects.add(fx);
}
- inputDesc->setProcessorEnabled(true);
+ sessionDesc->setProcessorEnabled(true);
+ IPCThreadState::self()->restoreCallingIdentity(token);
}
return status;
}
-status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input)
+status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input,
+ audio_session_t audioSession)
{
status_t status = NO_ERROR;
Mutex::Autolock _l(mLock);
- ssize_t index = mInputs.indexOfKey(input);
+ ssize_t index = mInputSessions.indexOfKey(audioSession);
if (index < 0) {
return status;
}
- EffectVector *inputDesc = mInputs.valueAt(index);
- inputDesc->mRefCount--;
- ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
- if (inputDesc->mRefCount == 0) {
- inputDesc->setProcessorEnabled(false);
- delete inputDesc;
- mInputs.removeItemsAt(index);
+ EffectVector *sessionDesc = mInputSessions.valueAt(index);
+ sessionDesc->mRefCount--;
+ ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
+ if (sessionDesc->mRefCount == 0) {
+ sessionDesc->setProcessorEnabled(false);
+ delete sessionDesc;
+ mInputSessions.removeItemsAt(index);
ALOGV("releaseInputEffects(): all effects released");
}
return status;
@@ -160,16 +163,16 @@
Mutex::Autolock _l(mLock);
size_t index;
- for (index = 0; index < mInputs.size(); index++) {
- if (mInputs.valueAt(index)->mSessionId == audioSession) {
+ for (index = 0; index < mInputSessions.size(); index++) {
+ if (mInputSessions.valueAt(index)->mSessionId == audioSession) {
break;
}
}
- if (index == mInputs.size()) {
+ if (index == mInputSessions.size()) {
*count = 0;
return BAD_VALUE;
}
- Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
+ Vector< sp<AudioEffect> > effects = mInputSessions.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
effect_descriptor_t desc = effects[i]->descriptor();
@@ -251,6 +254,8 @@
ALOGV("addOutputSessionEffects(): session: %d, refCount: %d",
audioSession, procDesc->mRefCount);
if (procDesc->mRefCount == 1) {
+ // make sure effects are associated to audio server even if we are executing a binder call
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
EffectDesc *effect = effects[i];
@@ -269,6 +274,7 @@
}
procDesc->setProcessorEnabled(true);
+ IPCThreadState::self()->restoreCallingIdentity(token);
}
return status;
}
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index afdaf98..0c74d87 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -23,7 +23,6 @@
#include <cutils/misc.h>
#include <media/AudioEffect.h>
#include <system/audio.h>
-#include <hardware/audio_effect.h>
#include <utils/Vector.h>
#include <utils/SortedVector.h>
@@ -62,7 +61,8 @@
audio_session_t audioSession);
// Add all input effects associated to this input
- status_t releaseInputEffects(audio_io_handle_t input);
+ status_t releaseInputEffects(audio_io_handle_t input,
+ audio_session_t audioSession);
// Return a list of effect descriptors for default output effects
@@ -178,12 +178,12 @@
size_t *curSize,
size_t *totSize);
- // protects access to mInputSources, mInputs, mOutputStreams, mOutputSessions
+ // protects access to mInputSources, mInputSessions, mOutputStreams, mOutputSessions
Mutex mLock;
// Automatic input effects are configured per audio_source_t
KeyedVector< audio_source_t, EffectDescVector* > mInputSources;
// Automatic input effects are unique for audio_io_handle_t
- KeyedVector< audio_io_handle_t, EffectVector* > mInputs;
+ KeyedVector< audio_session_t, EffectVector* > mInputSessions;
// Automatic output effects are organized per audio_stream_type_t
KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 0387ee6..1e63a05 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -165,12 +165,10 @@
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t selectedDeviceId,
- const audio_offload_info_t *offloadInfo)
+ audio_port_handle_t *portId)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
@@ -184,8 +182,9 @@
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
uid = callingUid;
}
- return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, samplingRate,
- format, channelMask, flags, selectedDeviceId, offloadInfo);
+ return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
+ config,
+ flags, selectedDeviceId, portId);
}
status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -276,11 +275,10 @@
audio_session_t session,
pid_t pid,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
- audio_port_handle_t selectedDeviceId)
+ audio_port_handle_t selectedDeviceId,
+ audio_port_handle_t *portId)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
@@ -319,9 +317,9 @@
Mutex::Autolock _l(mLock);
// the audio_in_acoustics_t parameter is ignored by get_input()
status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
- samplingRate, format, channelMask,
+ config,
flags, selectedDeviceId,
- &inputType);
+ &inputType, portId);
audioPolicyEffects = mAudioPolicyEffects;
if (status == NO_ERROR) {
@@ -375,8 +373,23 @@
return NO_INIT;
}
Mutex::Autolock _l(mLock);
+ AudioPolicyInterface::concurrency_type__mask_t concurrency;
+ status_t status = mAudioPolicyManager->startInput(input, session, &concurrency);
- return mAudioPolicyManager->startInput(input, session);
+ if (status == NO_ERROR) {
+ LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
+ "startInput(): invalid concurrency type %d", (int)concurrency);
+
+ // enforce permission (if any) required for each type of concurrency
+ if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL) {
+ //TODO: check incall capture permission
+ }
+ if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
+ //TODO: check concurrent capture permission
+ }
+ }
+
+ return status;
}
status_t AudioPolicyService::stopInput(audio_io_handle_t input,
@@ -403,7 +416,7 @@
}
if (audioPolicyEffects != 0) {
// release audio processors from the input
- status_t status = audioPolicyEffects->releaseInputEffects(input);
+ status_t status = audioPolicyEffects->releaseInputEffects(input, session);
if(status != NO_ERROR) {
ALOGW("Failed to release effects on input %d", input);
}
@@ -583,7 +596,8 @@
*count = 0;
return NO_INIT;
}
- return audioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count);
+ return audioPolicyEffects->queryDefaultInputEffects(
+ (audio_session_t)audioSession, descriptors, count);
}
bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
@@ -713,7 +727,7 @@
status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_io_handle_t *handle)
+ audio_patch_handle_t *handle)
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager == NULL) {
@@ -724,7 +738,7 @@
IPCThreadState::self()->getCallingUid());
}
-status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
+status_t AudioPolicyService::stopAudioSource(audio_patch_handle_t handle)
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager == NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
deleted file mode 100644
index 946c380..0000000
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioPolicyService"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
-
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <media/AudioPolicyHelper.h>
-
-namespace android {
-
-
-// ----------------------------------------------------------------------------
-
-status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
- audio_policy_dev_state_t state,
- const char *device_address,
- const char *device_name __unused)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
- return BAD_VALUE;
- }
- if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
- state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
- return BAD_VALUE;
- }
-
- ALOGV("setDeviceConnectionState()");
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
- state, device_address);
-}
-
-audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
- audio_devices_t device,
- const char *device_address)
-{
- if (mpAudioPolicy == NULL) {
- return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
- }
- return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device,
- device_address);
-}
-
-status_t AudioPolicyService::setPhoneState(audio_mode_t state)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (uint32_t(state) >= AUDIO_MODE_CNT) {
- return BAD_VALUE;
- }
-
- ALOGV("setPhoneState()");
-
- // TODO: check if it is more appropriate to do it in platform specific policy manager
- AudioSystem::setMode(state);
-
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
- mPhoneState = state;
- return NO_ERROR;
-}
-
-audio_mode_t AudioPolicyService::getPhoneState()
-{
- Mutex::Autolock _l(mLock);
- return mPhoneState;
-}
-
-status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
- audio_policy_forced_cfg_t config)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
- return BAD_VALUE;
- }
- if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
- return BAD_VALUE;
- }
- ALOGV("setForceUse()");
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
- return NO_ERROR;
-}
-
-audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
-{
- if (mpAudioPolicy == NULL) {
- return AUDIO_POLICY_FORCE_NONE;
- }
- if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
- return AUDIO_POLICY_FORCE_NONE;
- }
- return mpAudioPolicy->get_force_use(mpAudioPolicy, usage);
-}
-
-audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return AUDIO_IO_HANDLE_NONE;
- }
- if (mpAudioPolicy == NULL) {
- return AUDIO_IO_HANDLE_NONE;
- }
- ALOGV("getOutput()");
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
- format, channelMask, flags, offloadInfo);
-}
-
-status_t AudioPolicyService::startOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- ALOGV("startOutput()");
- // create audio processors according to stream
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (audioPolicyEffects != 0) {
- status_t status = audioPolicyEffects->addOutputSessionEffects(output, stream, session);
- if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to add effects on session %d", session);
- }
- }
-
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
-}
-
-status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- ALOGV("stopOutput()");
- mOutputCommandThread->stopOutputCommand(output, stream, session);
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::doStopOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
-{
- ALOGV("doStopOutput from tid %d", gettid());
- // release audio processors from the stream
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (audioPolicyEffects != 0) {
- status_t status = audioPolicyEffects->releaseOutputSessionEffects(output, stream, session);
- if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to release effects on session %d", session);
- }
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
-}
-
-void AudioPolicyService::releaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream,
- audio_session_t session)
-{
- if (mpAudioPolicy == NULL) {
- return;
- }
- ALOGV("releaseOutput()");
- mOutputCommandThread->releaseOutputCommand(output, stream, session);
-}
-
-void AudioPolicyService::doReleaseOutput(audio_io_handle_t output,
- audio_stream_type_t stream __unused,
- audio_session_t session __unused)
-{
- ALOGV("doReleaseOutput from tid %d", gettid());
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->release_output(mpAudioPolicy, output);
-}
-
-status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *input,
- audio_session_t session,
- pid_t pid __unused,
- uid_t uid __unused,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_input_flags_t flags __unused,
- audio_port_handle_t selectedDeviceId __unused)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
-
- audio_source_t inputSource = attr->source;
-
- // already checked by client, but double-check in case the client wrapper is bypassed
- if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD &&
- inputSource != AUDIO_SOURCE_FM_TUNER) {
- return BAD_VALUE;
- }
-
- if (inputSource == AUDIO_SOURCE_DEFAULT) {
- inputSource = AUDIO_SOURCE_MIC;
- }
-
- if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
- return BAD_VALUE;
- }
-
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- // the audio_in_acoustics_t parameter is ignored by get_input()
- *input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
- format, channelMask, (audio_in_acoustics_t) 0);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (*input == AUDIO_IO_HANDLE_NONE) {
- return INVALID_OPERATION;
- }
-
- if (audioPolicyEffects != 0) {
- // create audio pre processors according to input source
- status_t status = audioPolicyEffects->addInputEffects(*input, inputSource, session);
- if (status != NO_ERROR && status != ALREADY_EXISTS) {
- ALOGW("Failed to add effects on input %d", input);
- }
- }
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::startInput(audio_io_handle_t input,
- audio_session_t session __unused)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- Mutex::Autolock _l(mLock);
-
- return mpAudioPolicy->start_input(mpAudioPolicy, input);
-}
-
-status_t AudioPolicyService::stopInput(audio_io_handle_t input,
- audio_session_t session __unused)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- Mutex::Autolock _l(mLock);
-
- return mpAudioPolicy->stop_input(mpAudioPolicy, input);
-}
-
-void AudioPolicyService::releaseInput(audio_io_handle_t input,
- audio_session_t session __unused)
-{
- if (mpAudioPolicy == NULL) {
- return;
- }
-
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->release_input(mpAudioPolicy, input);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (audioPolicyEffects != 0) {
- // release audio processors from the input
- status_t status = audioPolicyEffects->releaseInputEffects(input);
- if(status != NO_ERROR) {
- ALOGW("Failed to release effects on input %d", input);
- }
- }
-}
-
-status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mLock);
- mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
- int index,
- audio_devices_t device)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (!settingsAllowed()) {
- return PERMISSION_DENIED;
- }
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mLock);
- if (mpAudioPolicy->set_stream_volume_index_for_device) {
- return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
- stream,
- index,
- device);
- } else {
- return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
- }
-}
-
-status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
- int *index,
- audio_devices_t device)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mLock);
- if (mpAudioPolicy->get_stream_volume_index_for_device) {
- return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
- stream,
- index,
- device);
- } else {
- return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
- }
-}
-
-uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return 0;
- }
- if (mpAudioPolicy == NULL) {
- return 0;
- }
- return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
-}
-
-//audio policy: use audio_device_t appropriately
-
-audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return AUDIO_DEVICE_NONE;
- }
- if (mpAudioPolicy == NULL) {
- return AUDIO_DEVICE_NONE;
- }
- return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
-}
-
-audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc)
-{
- // FIXME change return type to status_t, and return NO_INIT here
- if (mpAudioPolicy == NULL) {
- return 0;
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc);
-}
-
-status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
- audio_io_handle_t io,
- uint32_t strategy,
- audio_session_t session,
- int id)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id);
-}
-
-status_t AudioPolicyService::unregisterEffect(int id)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- return mpAudioPolicy->unregister_effect(mpAudioPolicy, id);
-}
-
-status_t AudioPolicyService::setEffectEnabled(int id, bool enabled)
-{
- if (mpAudioPolicy == NULL) {
- return NO_INIT;
- }
- return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled);
-}
-
-bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return false;
- }
- if (mpAudioPolicy == NULL) {
- return false;
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
-}
-
-bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
-{
- if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return false;
- }
- if (mpAudioPolicy == NULL) {
- return false;
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
-}
-
-bool AudioPolicyService::isSourceActive(audio_source_t source) const
-{
- if (mpAudioPolicy == NULL) {
- return false;
- }
- if (mpAudioPolicy->is_source_active == 0) {
- return false;
- }
- Mutex::Autolock _l(mLock);
- return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
-}
-
-status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
- effect_descriptor_t *descriptors,
- uint32_t *count)
-{
- if (mpAudioPolicy == NULL) {
- *count = 0;
- return NO_INIT;
- }
- sp<AudioPolicyEffects>audioPolicyEffects;
- {
- Mutex::Autolock _l(mLock);
- audioPolicyEffects = mAudioPolicyEffects;
- }
- if (audioPolicyEffects == 0) {
- *count = 0;
- return NO_INIT;
- }
- return audioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count);
-}
-
-bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
-{
- if (mpAudioPolicy == NULL) {
- ALOGV("mpAudioPolicy == NULL");
- return false;
- }
-
- if (mpAudioPolicy->is_offload_supported == NULL) {
- ALOGV("HAL does not implement is_offload_supported");
- return false;
- }
-
- return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
-}
-
-status_t AudioPolicyService::listAudioPorts(audio_port_role_t role __unused,
- audio_port_type_t type __unused,
- unsigned int *num_ports,
- struct audio_port *ports __unused,
- unsigned int *generation __unused)
-{
- *num_ports = 0;
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getAudioPort(struct audio_port *port __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::createAudioPatch(const struct audio_patch *patch __unused,
- audio_patch_handle_t *handle __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::releaseAudioPatch(audio_patch_handle_t handle __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::listAudioPatches(unsigned int *num_patches,
- struct audio_patch *patches __unused,
- unsigned int *generation __unused)
-{
- *num_patches = 0;
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::setAudioPortConfig(const struct audio_port_config *config __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session __unused,
- audio_stream_type_t *stream,
- uid_t uid __unused,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- audio_output_flags_t flags,
- audio_port_handle_t selectedDeviceId __unused,
- const audio_offload_info_t *offloadInfo)
-{
- if (attr != NULL) {
- *stream = audio_attributes_to_stream_type(attr);
- } else {
- if (*stream == AUDIO_STREAM_DEFAULT) {
- return BAD_VALUE;
- }
- }
- *output = getOutput(*stream, samplingRate, format, channelMask,
- flags, offloadInfo);
- if (*output == AUDIO_IO_HANDLE_NONE) {
- return INVALID_OPERATION;
- }
- return NO_ERROR;
-}
-
-status_t AudioPolicyService::acquireSoundTriggerSession(audio_session_t *session __unused,
- audio_io_handle_t *ioHandle __unused,
- audio_devices_t *device __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::releaseSoundTriggerSession(audio_session_t session __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::registerPolicyMixes(const Vector<AudioMix>& mixes __unused,
- bool registration __unused)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
- const audio_attributes_t *attributes,
- audio_io_handle_t *handle)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::setMasterMono(bool mono)
-{
- return INVALID_OPERATION;
-}
-
-status_t AudioPolicyService::getMasterMono(bool *mono)
-{
- return INVALID_OPERATION;
-}
-
-}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 52ed73e..c4f6367 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -34,13 +34,10 @@
#include "ServiceUtilities.h"
#include <hardware_legacy/power.h>
#include <media/AudioEffect.h>
-#include <media/EffectsFactoryApi.h>
#include <media/AudioParameter.h>
-#include <hardware/hardware.h>
#include <system/audio.h>
#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
namespace android {
@@ -52,11 +49,6 @@
static const nsecs_t kAudioCommandTimeoutNs = seconds(3); // 3 seconds
-#ifdef USE_LEGACY_AUDIO_POLICY
-namespace {
- extern struct audio_policy_service_ops aps_ops;
-};
-#endif
// ----------------------------------------------------------------------------
@@ -78,40 +70,8 @@
// start output activity command thread
mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
-#ifdef USE_LEGACY_AUDIO_POLICY
- ALOGI("AudioPolicyService CSTOR in legacy mode");
-
- /* instantiate the audio policy manager */
- const struct hw_module_t *module;
- int rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
- if (rc) {
- return;
- }
- rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
- ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc));
- if (rc) {
- return;
- }
-
- rc = mpAudioPolicyDev->create_audio_policy(mpAudioPolicyDev, &aps_ops, this,
- &mpAudioPolicy);
- ALOGE_IF(rc, "couldn't create audio policy (%s)", strerror(-rc));
- if (rc) {
- return;
- }
-
- rc = mpAudioPolicy->init_check(mpAudioPolicy);
- ALOGE_IF(rc, "couldn't init_check the audio policy (%s)", strerror(-rc));
- if (rc) {
- return;
- }
- ALOGI("Loaded audio policy from %s (%s)", module->name, module->id);
-#else
- ALOGI("AudioPolicyService CSTOR in new mode");
-
mAudioPolicyClient = new AudioPolicyClient(this);
mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);
-#endif
}
// load audio processing modules
sp<AudioPolicyEffects>audioPolicyEffects = new AudioPolicyEffects();
@@ -127,17 +87,8 @@
mAudioCommandThread->exit();
mOutputCommandThread->exit();
-#ifdef USE_LEGACY_AUDIO_POLICY
- if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) {
- mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
- }
- if (mpAudioPolicyDev != NULL) {
- audio_policy_dev_close(mpAudioPolicyDev);
- }
-#else
destroyAudioPolicyManager(mAudioPolicyManager);
delete mAudioPolicyClient;
-#endif
mNotificationClients.clear();
mAudioPolicyEffects.clear();
@@ -185,14 +136,12 @@
Mutex::Autolock _l(mNotificationClientsLock);
mNotificationClients.removeItem(uid);
}
-#ifndef USE_LEGACY_AUDIO_POLICY
{
Mutex::Autolock _l(mLock);
if (mAudioPolicyManager) {
mAudioPolicyManager->releaseResourcesForUid(uid);
}
}
-#endif
}
void AudioPolicyService::onAudioPortListUpdate()
@@ -358,11 +307,7 @@
char buffer[SIZE];
String8 result;
-#ifdef USE_LEGACY_AUDIO_POLICY
- snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpAudioPolicy);
-#else
snprintf(buffer, SIZE, "AudioPolicyManager: %p\n", mAudioPolicyManager);
-#endif
result.append(buffer);
snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
result.append(buffer);
@@ -392,15 +337,9 @@
mTonePlaybackThread->dump(fd);
}
-#ifdef USE_LEGACY_AUDIO_POLICY
- if (mpAudioPolicy) {
- mpAudioPolicy->dump(mpAudioPolicy, fd);
- }
-#else
if (mAudioPolicyManager) {
mAudioPolicyManager->dump(fd);
}
-#endif
if (locked) mLock.unlock();
}
@@ -1208,29 +1147,4 @@
int aps_set_voice_volume(void *service, float volume, int delay_ms);
};
-#ifdef USE_LEGACY_AUDIO_POLICY
-namespace {
- struct audio_policy_service_ops aps_ops = {
- .open_output = aps_open_output,
- .open_duplicate_output = aps_open_dup_output,
- .close_output = aps_close_output,
- .suspend_output = aps_suspend_output,
- .restore_output = aps_restore_output,
- .open_input = aps_open_input,
- .close_input = aps_close_input,
- .set_stream_volume = aps_set_stream_volume,
- .invalidate_stream = aps_invalidate_stream,
- .set_parameters = aps_set_parameters,
- .get_parameters = aps_get_parameters,
- .start_tone = aps_start_tone,
- .stop_tone = aps_stop_tone,
- .set_voice_volume = aps_set_voice_volume,
- .move_effects = aps_move_effects,
- .load_hw_module = aps_load_hw_module,
- .open_output_on_module = aps_open_output_on_module,
- .open_input_on_module = aps_open_input_on_module,
- };
-}; // namespace <unnamed>
-#endif
-
}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 8c9b23c..9a083f4 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -26,14 +26,10 @@
#include <binder/BinderService.h>
#include <system/audio.h>
#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
#include <media/IAudioPolicyService.h>
#include <media/ToneGenerator.h>
#include <media/AudioEffect.h>
#include <media/AudioPolicy.h>
-#ifdef USE_LEGACY_AUDIO_POLICY
-#include <hardware_legacy/AudioPolicyInterface.h>
-#endif
#include "AudioPolicyEffects.h"
#include "managerdefault/AudioPolicyManager.h"
@@ -84,12 +80,10 @@
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
- const audio_offload_info_t *offloadInfo = NULL);
+ const audio_config_t *config,
+ audio_output_flags_t flags,
+ audio_port_handle_t selectedDeviceId,
+ audio_port_handle_t *portId);
virtual status_t startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session);
@@ -104,11 +98,10 @@
audio_session_t session,
pid_t pid,
uid_t uid,
- uint32_t samplingRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
+ const audio_config_base_t *config,
audio_input_flags_t flags,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+ audio_port_handle_t *portId = NULL);
virtual status_t startInput(audio_io_handle_t input,
audio_session_t session);
virtual status_t stopInput(audio_io_handle_t input,
@@ -203,8 +196,8 @@
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
- audio_io_handle_t *handle);
- virtual status_t stopAudioSource(audio_io_handle_t handle);
+ audio_patch_handle_t *handle);
+ virtual status_t stopAudioSource(audio_patch_handle_t handle);
virtual status_t setMasterMono(bool mono);
virtual status_t getMasterMono(bool *mono);
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index d8d3ff1..f1cdea3 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -27,7 +27,7 @@
CameraFlashlight.cpp \
common/Camera2ClientBase.cpp \
common/CameraDeviceBase.cpp \
- common/CameraModule.cpp \
+ common/CameraProviderManager.cpp \
common/FrameProcessorBase.cpp \
api1/CameraClient.cpp \
api1/Camera2Client.cpp \
@@ -40,19 +40,22 @@
api1/client2/CaptureSequencer.cpp \
api1/client2/ZslProcessor.cpp \
api2/CameraDeviceClient.cpp \
+ device1/CameraHardwareInterface.cpp \
device3/Camera3Device.cpp \
device3/Camera3Stream.cpp \
device3/Camera3IOStreamBase.cpp \
device3/Camera3InputStream.cpp \
device3/Camera3OutputStream.cpp \
- device3/Camera3ZslStream.cpp \
device3/Camera3DummyStream.cpp \
+ device3/Camera3SharedOutputStream.cpp \
device3/StatusTracker.cpp \
device3/Camera3BufferManager.cpp \
+ device3/Camera3StreamSplitter.cpp \
gui/RingBufferConsumer.cpp \
utils/CameraTraces.cpp \
utils/AutoConditionLock.cpp \
- utils/TagMonitor.cpp
+ utils/TagMonitor.cpp \
+ utils/LatencyHistogram.cpp
LOCAL_SHARED_LIBRARIES:= \
libui \
@@ -63,18 +66,24 @@
libmedia \
libmediautils \
libcamera_client \
+ libcamera_metadata \
+ libfmq \
libgui \
libhardware \
- libcamera_metadata \
+ libhidlbase \
+ libhidltransport \
libjpeg \
- libmemunreachable
+ libmemunreachable \
+ android.hardware.camera.common@1.0 \
+ android.hardware.camera.provider@2.4 \
+ android.hardware.camera.device@1.0 \
+ android.hardware.camera.device@3.2
-LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder libcamera_client
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := libbinder libcamera_client libfmq
LOCAL_C_INCLUDES += \
system/media/private/camera/include \
- frameworks/native/include/media/openmax \
- external/jpeg
+ frameworks/native/include/media/openmax
LOCAL_EXPORT_C_INCLUDE_DIRS := \
frameworks/av/services/camera/libcameraservice
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index 6314ba5..e06a81f 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -36,10 +36,11 @@
// CameraFlashlight implementation begins
// used by camera service to control flashflight.
/////////////////////////////////////////////////////////////////////
-CameraFlashlight::CameraFlashlight(CameraModule& cameraModule,
- const camera_module_callbacks_t& callbacks) :
- mCameraModule(&cameraModule),
- mCallbacks(&callbacks),
+
+CameraFlashlight::CameraFlashlight(sp<CameraProviderManager> providerManager,
+ camera_module_callbacks_t* callbacks) :
+ mProviderManager(providerManager),
+ mCallbacks(callbacks),
mFlashlightMapInitialized(false) {
}
@@ -53,45 +54,12 @@
return INVALID_OPERATION;
}
- status_t res = OK;
-
- if (mCameraModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4) {
- mFlashControl = new ModuleFlashControl(*mCameraModule, *mCallbacks);
- if (mFlashControl == NULL) {
- ALOGV("%s: cannot create flash control for module api v2.4+",
- __FUNCTION__);
- return NO_MEMORY;
- }
+ if (mProviderManager->supportSetTorchMode(cameraId.string())) {
+ mFlashControl = new ProviderFlashControl(mProviderManager);
} else {
- uint32_t deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
-
- if (mCameraModule->getModuleApiVersion() >=
- CAMERA_MODULE_API_VERSION_2_0) {
- camera_info info;
- res = mCameraModule->getCameraInfo(
- atoi(String8(cameraId).string()), &info);
- if (res) {
- ALOGE("%s: failed to get camera info for camera %s",
- __FUNCTION__, cameraId.string());
- return res;
- }
- deviceVersion = info.device_version;
- }
-
- if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
- CameraDeviceClientFlashControl *flashControl =
- new CameraDeviceClientFlashControl(*mCameraModule,
- *mCallbacks);
- if (!flashControl) {
- return NO_MEMORY;
- }
-
- mFlashControl = flashControl;
- } else {
- mFlashControl =
- new CameraHardwareInterfaceFlashControl(*mCameraModule,
- *mCallbacks);
- }
+ // Only HAL1 devices do not support setTorchMode
+ mFlashControl =
+ new CameraHardwareInterfaceFlashControl(mProviderManager, *mCallbacks);
}
return OK;
@@ -125,10 +93,6 @@
}
if (mFlashControl == NULL) {
- if (enabled == false) {
- return OK;
- }
-
res = createFlashlightControl(cameraId);
if (res) {
return res;
@@ -154,18 +118,32 @@
return res;
}
+int CameraFlashlight::getNumberOfCameras() {
+ return mProviderManager->getAPI1CompatibleCameraCount();
+}
+
status_t CameraFlashlight::findFlashUnits() {
Mutex::Autolock l(mLock);
status_t res;
- int32_t numCameras = mCameraModule->getNumberOfCameras();
- mHasFlashlightMap.clear();
- mFlashlightMapInitialized = false;
+ std::vector<String8> cameraIds;
+ int numberOfCameras = getNumberOfCameras();
+ cameraIds.resize(numberOfCameras);
+ // No module, must be provider
+ std::vector<std::string> ids = mProviderManager->getAPI1CompatibleCameraDeviceIds();
+ for (size_t i = 0; i < cameraIds.size(); i++) {
+ cameraIds[i] = String8(ids[i].c_str());
+ }
- for (int32_t i = 0; i < numCameras; i++) {
+ mFlashControl.clear();
+
+ for (auto &id : cameraIds) {
+ ssize_t index = mHasFlashlightMap.indexOfKey(id);
+ if (0 <= index) {
+ continue;
+ }
+
bool hasFlash = false;
- String8 id = String8::format("%d", i);
-
res = createFlashlightControl(id);
if (res) {
ALOGE("%s: failed to create flash control for %s", __FUNCTION__,
@@ -214,6 +192,15 @@
return mHasFlashlightMap.valueAt(index);
}
+bool CameraFlashlight::isBackwardCompatibleMode(const String8& cameraId) {
+ bool backwardCompatibleMode = false;
+ if (mProviderManager != nullptr &&
+ !mProviderManager->supportSetTorchMode(cameraId.string())) {
+ backwardCompatibleMode = true;
+ }
+ return backwardCompatibleMode;
+}
+
status_t CameraFlashlight::prepareDeviceOpen(const String8& cameraId) {
ALOGV("%s: prepare for device open", __FUNCTION__);
@@ -224,14 +211,14 @@
return NO_INIT;
}
- if (mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
+ if (isBackwardCompatibleMode(cameraId)) {
// framework is going to open a camera device, all flash light control
// should be closed for backward compatible support.
mFlashControl.clear();
if (mOpenedCameraIds.size() == 0) {
// notify torch unavailable for all cameras with a flash
- int numCameras = mCameraModule->getNumberOfCameras();
+ int numCameras = getNumberOfCameras();
for (int i = 0; i < numCameras; i++) {
if (hasFlashUnitLocked(String8::format("%d", i))) {
mCallbacks->torch_mode_status_change(mCallbacks,
@@ -274,9 +261,9 @@
if (mOpenedCameraIds.size() != 0)
return OK;
- if (mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
+ if (isBackwardCompatibleMode(cameraId)) {
// notify torch available for all cameras with a flash
- int numCameras = mCameraModule->getNumberOfCameras();
+ int numCameras = getNumberOfCameras();
for (int i = 0; i < numCameras; i++) {
if (hasFlashUnitLocked(String8::format("%d", i))) {
mCallbacks->torch_mode_status_change(mCallbacks,
@@ -298,359 +285,40 @@
// ModuleFlashControl implementation begins
// Flash control for camera module v2.4 and above.
/////////////////////////////////////////////////////////////////////
-ModuleFlashControl::ModuleFlashControl(CameraModule& cameraModule,
- const camera_module_callbacks_t& callbacks) :
- mCameraModule(&cameraModule) {
- (void) callbacks;
+ProviderFlashControl::ProviderFlashControl(sp<CameraProviderManager> providerManager) :
+ mProviderManager(providerManager) {
}
-ModuleFlashControl::~ModuleFlashControl() {
+ProviderFlashControl::~ProviderFlashControl() {
}
-status_t ModuleFlashControl::hasFlashUnit(const String8& cameraId, bool *hasFlash) {
+status_t ProviderFlashControl::hasFlashUnit(const String8& cameraId, bool *hasFlash) {
if (!hasFlash) {
return BAD_VALUE;
}
-
- *hasFlash = false;
- Mutex::Autolock l(mLock);
-
- camera_info info;
- status_t res = mCameraModule->getCameraInfo(atoi(cameraId.string()),
- &info);
- if (res != 0) {
- return res;
- }
-
- CameraMetadata metadata;
- metadata = info.static_camera_characteristics;
- camera_metadata_entry flashAvailable =
- metadata.find(ANDROID_FLASH_INFO_AVAILABLE);
- if (flashAvailable.count == 1 && flashAvailable.data.u8[0] == 1) {
- *hasFlash = true;
- }
-
+ *hasFlash = mProviderManager->hasFlashUnit(cameraId.string());
return OK;
}
-status_t ModuleFlashControl::setTorchMode(const String8& cameraId, bool enabled) {
+status_t ProviderFlashControl::setTorchMode(const String8& cameraId, bool enabled) {
ALOGV("%s: set camera %s torch mode to %d", __FUNCTION__,
cameraId.string(), enabled);
- Mutex::Autolock l(mLock);
- return mCameraModule->setTorchMode(cameraId.string(), enabled);
+ return mProviderManager->setTorchMode(cameraId.string(), enabled);
}
-// ModuleFlashControl implementation ends
-
-/////////////////////////////////////////////////////////////////////
-// CameraDeviceClientFlashControl implementation begins
-// Flash control for camera module <= v2.3 and camera HAL v2-v3
-/////////////////////////////////////////////////////////////////////
-CameraDeviceClientFlashControl::CameraDeviceClientFlashControl(
- CameraModule& cameraModule,
- const camera_module_callbacks_t& callbacks) :
- mCameraModule(&cameraModule),
- mCallbacks(&callbacks),
- mTorchEnabled(false),
- mMetadata(NULL),
- mStreaming(false) {
-}
-
-CameraDeviceClientFlashControl::~CameraDeviceClientFlashControl() {
- disconnectCameraDevice();
- if (mMetadata) {
- delete mMetadata;
- }
-
- mSurface.clear();
- mSurfaceTexture.clear();
- mProducer.clear();
- mConsumer.clear();
-
- if (mTorchEnabled) {
- if (mCallbacks) {
- ALOGV("%s: notify the framework that torch was turned off",
- __FUNCTION__);
- mCallbacks->torch_mode_status_change(mCallbacks,
- mCameraId.string(), TORCH_MODE_STATUS_AVAILABLE_OFF);
- }
- }
-}
-
-status_t CameraDeviceClientFlashControl::initializeSurface(
- sp<CameraDeviceBase> &device, int32_t width, int32_t height) {
- status_t res;
- BufferQueue::createBufferQueue(&mProducer, &mConsumer);
-
- mSurfaceTexture = new GLConsumer(mConsumer, 0, GLConsumer::TEXTURE_EXTERNAL,
- true, true);
- if (mSurfaceTexture == NULL) {
- return NO_MEMORY;
- }
-
- int32_t format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- res = mSurfaceTexture->setDefaultBufferSize(width, height);
- if (res) {
- return res;
- }
- res = mSurfaceTexture->setDefaultBufferFormat(format);
- if (res) {
- return res;
- }
-
- mSurface = new Surface(mProducer, /*useAsync*/ true);
- if (mSurface == NULL) {
- return NO_MEMORY;
- }
- res = device->createStream(mSurface, width, height, format,
- HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mStreamId);
- if (res) {
- return res;
- }
-
- res = device->configureStreams();
- if (res) {
- return res;
- }
-
- return res;
-}
-
-status_t CameraDeviceClientFlashControl::getSmallestSurfaceSize(
- const camera_info& info, int32_t *width, int32_t *height) {
- if (!width || !height) {
- return BAD_VALUE;
- }
-
- int32_t w = INT32_MAX;
- int32_t h = 1;
-
- CameraMetadata metadata;
- metadata = info.static_camera_characteristics;
- camera_metadata_entry streamConfigs =
- metadata.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
- for (size_t i = 0; i < streamConfigs.count; i += 4) {
- int32_t fmt = streamConfigs.data.i32[i];
- if (fmt == ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED) {
- int32_t ww = streamConfigs.data.i32[i + 1];
- int32_t hh = streamConfigs.data.i32[i + 2];
-
- if (w * h > ww * hh) {
- w = ww;
- h = hh;
- }
- }
- }
-
- // if stream configuration is not found, try available processed sizes.
- if (streamConfigs.count == 0) {
- camera_metadata_entry availableProcessedSizes =
- metadata.find(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
- for (size_t i = 0; i < availableProcessedSizes.count; i += 2) {
- int32_t ww = availableProcessedSizes.data.i32[i];
- int32_t hh = availableProcessedSizes.data.i32[i + 1];
- if (w * h > ww * hh) {
- w = ww;
- h = hh;
- }
- }
- }
-
- if (w == INT32_MAX) {
- return NAME_NOT_FOUND;
- }
-
- *width = w;
- *height = h;
-
- return OK;
-}
-
-status_t CameraDeviceClientFlashControl::connectCameraDevice(
- const String8& cameraId) {
- camera_info info;
- status_t res = mCameraModule->getCameraInfo(atoi(cameraId.string()), &info);
- if (res != 0) {
- ALOGE("%s: failed to get camera info for camera %s", __FUNCTION__,
- cameraId.string());
- return res;
- }
-
- sp<CameraDeviceBase> device =
- new Camera3Device(atoi(cameraId.string()));
- if (device == NULL) {
- return NO_MEMORY;
- }
-
- res = device->initialize(mCameraModule);
- if (res) {
- return res;
- }
-
- int32_t width, height;
- res = getSmallestSurfaceSize(info, &width, &height);
- if (res) {
- return res;
- }
- res = initializeSurface(device, width, height);
- if (res) {
- return res;
- }
-
- mCameraId = cameraId;
- mStreaming = (info.device_version <= CAMERA_DEVICE_API_VERSION_3_1);
- mDevice = device;
-
- return OK;
-}
-
-status_t CameraDeviceClientFlashControl::disconnectCameraDevice() {
- if (mDevice != NULL) {
- mDevice->disconnect();
- mDevice.clear();
- }
-
- return OK;
-}
-
-
-
-status_t CameraDeviceClientFlashControl::hasFlashUnit(const String8& cameraId,
- bool *hasFlash) {
- ALOGV("%s: checking if camera %s has a flash unit", __FUNCTION__,
- cameraId.string());
-
- Mutex::Autolock l(mLock);
- return hasFlashUnitLocked(cameraId, hasFlash);
-
-}
-
-status_t CameraDeviceClientFlashControl::hasFlashUnitLocked(
- const String8& cameraId, bool *hasFlash) {
- if (!hasFlash) {
- return BAD_VALUE;
- }
-
- camera_info info;
- status_t res = mCameraModule->getCameraInfo(
- atoi(cameraId.string()), &info);
- if (res != 0) {
- ALOGE("%s: failed to get camera info for camera %s", __FUNCTION__,
- cameraId.string());
- return res;
- }
-
- CameraMetadata metadata;
- metadata = info.static_camera_characteristics;
- camera_metadata_entry flashAvailable =
- metadata.find(ANDROID_FLASH_INFO_AVAILABLE);
- if (flashAvailable.count == 1 && flashAvailable.data.u8[0] == 1) {
- *hasFlash = true;
- }
-
- return OK;
-}
-
-status_t CameraDeviceClientFlashControl::submitTorchEnabledRequest() {
- status_t res;
-
- if (mMetadata == NULL) {
- mMetadata = new CameraMetadata();
- if (mMetadata == NULL) {
- return NO_MEMORY;
- }
- res = mDevice->createDefaultRequest(
- CAMERA3_TEMPLATE_PREVIEW, mMetadata);
- if (res) {
- return res;
- }
- }
-
- uint8_t torchOn = ANDROID_FLASH_MODE_TORCH;
- mMetadata->update(ANDROID_FLASH_MODE, &torchOn, 1);
- mMetadata->update(ANDROID_REQUEST_OUTPUT_STREAMS, &mStreamId, 1);
-
- uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
- mMetadata->update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
-
- int32_t requestId = 0;
- mMetadata->update(ANDROID_REQUEST_ID, &requestId, 1);
-
- if (mStreaming) {
- res = mDevice->setStreamingRequest(*mMetadata);
- } else {
- res = mDevice->capture(*mMetadata);
- }
- return res;
-}
-
-
-
-
-status_t CameraDeviceClientFlashControl::setTorchMode(
- const String8& cameraId, bool enabled) {
- bool hasFlash = false;
-
- Mutex::Autolock l(mLock);
- status_t res = hasFlashUnitLocked(cameraId, &hasFlash);
-
- // pre-check
- if (enabled) {
- // invalid camera?
- if (res) {
- return -EINVAL;
- }
- // no flash unit?
- if (!hasFlash) {
- return -ENOSYS;
- }
- // already opened for a different device?
- if (mDevice != NULL && cameraId != mCameraId) {
- return BAD_INDEX;
- }
- } else if (mDevice == NULL || cameraId != mCameraId) {
- // disabling the torch mode of an un-opened or different device.
- return OK;
- } else {
- // disabling the torch mode of currently opened device
- disconnectCameraDevice();
- mTorchEnabled = false;
- mCallbacks->torch_mode_status_change(mCallbacks,
- cameraId.string(), TORCH_MODE_STATUS_AVAILABLE_OFF);
- return OK;
- }
-
- if (mDevice == NULL) {
- res = connectCameraDevice(cameraId);
- if (res) {
- return res;
- }
- }
-
- res = submitTorchEnabledRequest();
- if (res) {
- return res;
- }
-
- mTorchEnabled = true;
- mCallbacks->torch_mode_status_change(mCallbacks,
- cameraId.string(), TORCH_MODE_STATUS_AVAILABLE_ON);
- return OK;
-}
-// CameraDeviceClientFlashControl implementation ends
-
+// ProviderFlashControl implementation ends
/////////////////////////////////////////////////////////////////////
// CameraHardwareInterfaceFlashControl implementation begins
// Flash control for camera module <= v2.3 and camera HAL v1
/////////////////////////////////////////////////////////////////////
+
CameraHardwareInterfaceFlashControl::CameraHardwareInterfaceFlashControl(
- CameraModule& cameraModule,
+ sp<CameraProviderManager> manager,
const camera_module_callbacks_t& callbacks) :
- mCameraModule(&cameraModule),
+ mProviderManager(manager),
mCallbacks(&callbacks),
mTorchEnabled(false) {
-
}
CameraHardwareInterfaceFlashControl::~CameraHardwareInterfaceFlashControl() {
@@ -851,7 +519,7 @@
sp<CameraHardwareInterface> device =
new CameraHardwareInterface(cameraId.string());
- status_t res = device->initialize(mCameraModule);
+ status_t res = device->initialize(mProviderManager);
if (res) {
ALOGE("%s: initializing camera %s failed", __FUNCTION__,
cameraId.string());
@@ -859,7 +527,7 @@
}
// need to set __get_memory in set_callbacks().
- device->setCallbacks(NULL, NULL, NULL, NULL);
+ device->setCallbacks(NULL, NULL, NULL, NULL, NULL);
mParameters = device->getParameters();
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index 59fc87d..c86ee85 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -17,14 +17,16 @@
#ifndef ANDROID_SERVERS_CAMERA_CAMERAFLASHLIGHT_H
#define ANDROID_SERVERS_CAMERA_CAMERAFLASHLIGHT_H
-#include "hardware/camera_common.h"
-#include "utils/KeyedVector.h"
-#include "utils/SortedVector.h"
-#include "gui/GLConsumer.h"
-#include "gui/Surface.h"
+#include <gui/GLConsumer.h>
+#include <gui/Surface.h>
+#include <hardware/camera_common.h>
+#include <utils/KeyedVector.h>
+#include <utils/SortedVector.h>
+#include "common/CameraProviderManager.h"
#include "common/CameraDeviceBase.h"
#include "device1/CameraHardwareInterface.h"
+
namespace android {
/**
@@ -52,8 +54,8 @@
*/
class CameraFlashlight : public virtual VirtualLightRefBase {
public:
- CameraFlashlight(CameraModule& cameraModule,
- const camera_module_callbacks_t& callbacks);
+ CameraFlashlight(sp<CameraProviderManager> providerManager,
+ camera_module_callbacks_t* callbacks);
virtual ~CameraFlashlight();
// Find all flash units. This must be called before other methods. All
@@ -87,8 +89,16 @@
// mLock should be locked.
bool hasFlashUnitLocked(const String8& cameraId);
+ // Check if flash control is in backward compatible mode (simulated torch API by
+ // opening cameras)
+ bool isBackwardCompatibleMode(const String8& cameraId);
+
+ int getNumberOfCameras();
+
sp<FlashControlBase> mFlashControl;
- CameraModule *mCameraModule;
+
+ sp<CameraProviderManager> mProviderManager;
+
const camera_module_callbacks_t *mCallbacks;
SortedVector<String8> mOpenedCameraIds;
@@ -100,74 +110,19 @@
};
/**
- * Flash control for camera module v2.4 and above.
+ * Flash control for camera provider v2.4 and above.
*/
-class ModuleFlashControl : public FlashControlBase {
+class ProviderFlashControl : public FlashControlBase {
public:
- ModuleFlashControl(CameraModule& cameraModule,
- const camera_module_callbacks_t& callbacks);
- virtual ~ModuleFlashControl();
+ ProviderFlashControl(sp<CameraProviderManager> providerManager);
+ virtual ~ProviderFlashControl();
// FlashControlBase
status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
status_t setTorchMode(const String8& cameraId, bool enabled);
private:
- CameraModule *mCameraModule;
-
- Mutex mLock;
-};
-
-/**
- * Flash control for camera module <= v2.3 and camera HAL v2-v3
- */
-class CameraDeviceClientFlashControl : public FlashControlBase {
- public:
- CameraDeviceClientFlashControl(CameraModule& cameraModule,
- const camera_module_callbacks_t& callbacks);
- virtual ~CameraDeviceClientFlashControl();
-
- // FlashControlBase
- status_t setTorchMode(const String8& cameraId, bool enabled);
- status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
-
- private:
- // connect to a camera device
- status_t connectCameraDevice(const String8& cameraId);
- // disconnect and free mDevice
- status_t disconnectCameraDevice();
-
- // initialize a surface
- status_t initializeSurface(sp<CameraDeviceBase>& device, int32_t width,
- int32_t height);
-
- // submit a request to enable the torch mode
- status_t submitTorchEnabledRequest();
-
- // get the smallest surface size of IMPLEMENTATION_DEFINED
- status_t getSmallestSurfaceSize(const camera_info& info, int32_t *width,
- int32_t *height);
-
- // protected by mLock
- status_t hasFlashUnitLocked(const String8& cameraId, bool *hasFlash);
-
- CameraModule *mCameraModule;
- const camera_module_callbacks_t *mCallbacks;
- String8 mCameraId;
- bool mTorchEnabled;
- CameraMetadata *mMetadata;
- // WORKAROUND: will be set to true for HAL v2 devices where
- // setStreamingRequest() needs to be call for torch mode settings to
- // take effect.
- bool mStreaming;
-
- sp<CameraDeviceBase> mDevice;
-
- sp<IGraphicBufferProducer> mProducer;
- sp<IGraphicBufferConsumer> mConsumer;
- sp<GLConsumer> mSurfaceTexture;
- sp<Surface> mSurface;
- int32_t mStreamId;
+ sp<CameraProviderManager> mProviderManager;
Mutex mLock;
};
@@ -177,7 +132,8 @@
*/
class CameraHardwareInterfaceFlashControl : public FlashControlBase {
public:
- CameraHardwareInterfaceFlashControl(CameraModule& cameraModule,
+ CameraHardwareInterfaceFlashControl(
+ sp<CameraProviderManager> manager,
const camera_module_callbacks_t& callbacks);
virtual ~CameraHardwareInterfaceFlashControl();
@@ -209,7 +165,7 @@
// function, keepDeviceOpen is ignored.
status_t hasFlashUnitLocked(const String8& cameraId, bool *hasFlash, bool keepDeviceOpen);
- CameraModule *mCameraModule;
+ sp<CameraProviderManager> mProviderManager;
const camera_module_callbacks_t *mCallbacks;
sp<CameraHardwareInterface> mDevice;
String8 mCameraId;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 85faac6..c2b71a2 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -32,6 +32,7 @@
#include <android/hardware/ICameraClient.h>
#include <android-base/macros.h>
+#include <android-base/parseint.h>
#include <binder/AppOpsManager.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
@@ -54,6 +55,7 @@
#include <private/android_filesystem_config.h>
#include <system/camera_vendor_tags.h>
#include <system/camera_metadata.h>
+
#include <system/camera.h>
#include "CameraService.h"
@@ -69,7 +71,11 @@
namespace android {
using binder::Status;
-using namespace hardware;
+using hardware::ICamera;
+using hardware::ICameraClient;
+using hardware::ICameraServiceListener;
+using hardware::camera::common::V1_0::CameraDeviceStatus;
+using hardware::camera::common::V1_0::TorchModeStatus;
// ----------------------------------------------------------------------------
// Logging support -- this is for debugging only
@@ -103,9 +109,24 @@
int new_status) {
sp<CameraService> cs = const_cast<CameraService*>(
static_cast<const CameraService*>(callbacks));
+ String8 id = String8::format("%d", camera_id);
- cs->onDeviceStatusChanged(camera_id,
- static_cast<camera_device_status_t>(new_status));
+ CameraDeviceStatus newStatus{CameraDeviceStatus::NOT_PRESENT};
+ switch (new_status) {
+ case CAMERA_DEVICE_STATUS_NOT_PRESENT:
+ newStatus = CameraDeviceStatus::NOT_PRESENT;
+ break;
+ case CAMERA_DEVICE_STATUS_PRESENT:
+ newStatus = CameraDeviceStatus::PRESENT;
+ break;
+ case CAMERA_DEVICE_STATUS_ENUMERATING:
+ newStatus = CameraDeviceStatus::ENUMERATING;
+ break;
+ default:
+ ALOGW("Unknown device status change to %d", new_status);
+ break;
+ }
+ cs->onDeviceStatusChanged(id, newStatus);
}
static void torch_mode_status_change(
@@ -119,16 +140,16 @@
sp<CameraService> cs = const_cast<CameraService*>(
static_cast<const CameraService*>(callbacks));
- int32_t status;
+ TorchModeStatus status;
switch (new_status) {
case TORCH_MODE_STATUS_NOT_AVAILABLE:
- status = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+ status = TorchModeStatus::NOT_AVAILABLE;
break;
case TORCH_MODE_STATUS_AVAILABLE_OFF:
- status = ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF;
+ status = TorchModeStatus::AVAILABLE_OFF;
break;
case TORCH_MODE_STATUS_AVAILABLE_ON:
- status = ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON;
+ status = TorchModeStatus::AVAILABLE_ON;
break;
default:
ALOGE("Unknown torch status %d", new_status);
@@ -143,16 +164,11 @@
// ----------------------------------------------------------------------------
-// This is ugly and only safe if we never re-create the CameraService, but
-// should be ok for now.
-static CameraService *gCameraService;
-
CameraService::CameraService() :
mEventLog(DEFAULT_EVENT_LOG_LENGTH),
mNumberOfCameras(0), mNumberOfNormalCameras(0),
- mSoundRef(0), mModule(nullptr) {
+ mSoundRef(0), mInitialized(false) {
ALOGI("CameraService started (pid=%d)", getpid());
- gCameraService = this;
this->camera_device_status_change = android::camera_device_status_change;
this->torch_mode_status_change = android::torch_mode_status_change;
@@ -171,112 +187,93 @@
notifier.noteResetCamera();
notifier.noteResetFlashlight();
- camera_module_t *rawModule;
- int err = hw_get_module(CAMERA_HARDWARE_MODULE_ID,
- (const hw_module_t **)&rawModule);
- if (err < 0) {
- ALOGE("Could not load camera HAL module: %d (%s)", err, strerror(-err));
- logServiceError("Could not load camera HAL module", err);
- return;
- }
+ status_t res = INVALID_OPERATION;
- mModule = new CameraModule(rawModule);
- err = mModule->init();
- if (err != OK) {
- ALOGE("Could not initialize camera HAL module: %d (%s)", err,
- strerror(-err));
- logServiceError("Could not initialize camera HAL module", err);
-
- delete mModule;
- mModule = nullptr;
- return;
- }
- ALOGI("Loaded \"%s\" camera module", mModule->getModuleName());
-
- mNumberOfCameras = mModule->getNumberOfCameras();
- mNumberOfNormalCameras = mNumberOfCameras;
-
- // Setup vendor tags before we call get_camera_info the first time
- // because HAL might need to setup static vendor keys in get_camera_info
- VendorTagDescriptor::clearGlobalVendorTagDescriptor();
- if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_2) {
- setUpVendorTags();
- }
-
- mFlashlight = new CameraFlashlight(*mModule, *this);
- status_t res = mFlashlight->findFlashUnits();
- if (res) {
- // impossible because we haven't open any camera devices.
- ALOGE("Failed to find flash units.");
- }
-
- int latestStrangeCameraId = INT_MAX;
- for (int i = 0; i < mNumberOfCameras; i++) {
- String8 cameraId = String8::format("%d", i);
-
- // Get camera info
-
- struct camera_info info;
- bool haveInfo = true;
- status_t rc = mModule->getCameraInfo(i, &info);
- if (rc != NO_ERROR) {
- ALOGE("%s: Received error loading camera info for device %d, cost and"
- " conflicting devices fields set to defaults for this device.",
- __FUNCTION__, i);
- haveInfo = false;
- }
-
- // Check for backwards-compatibility support
- if (haveInfo) {
- if (checkCameraCapabilities(i, info, &latestStrangeCameraId) != OK) {
- delete mModule;
- mModule = nullptr;
- return;
- }
- }
-
- // Defaults to use for cost and conflicting devices
- int cost = 100;
- char** conflicting_devices = nullptr;
- size_t conflicting_devices_length = 0;
-
- // If using post-2.4 module version, query the cost + conflicting devices from the HAL
- if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4 && haveInfo) {
- cost = info.resource_cost;
- conflicting_devices = info.conflicting_devices;
- conflicting_devices_length = info.conflicting_devices_length;
- }
-
- std::set<String8> conflicting;
- for (size_t i = 0; i < conflicting_devices_length; i++) {
- conflicting.emplace(String8(conflicting_devices[i]));
- }
-
- // Initialize state for each camera device
- {
- Mutex::Autolock lock(mCameraStatesLock);
- mCameraStates.emplace(cameraId, std::make_shared<CameraState>(cameraId, cost,
- conflicting));
- }
-
- if (mFlashlight->hasFlashUnit(cameraId)) {
- mTorchStatusMap.add(cameraId,
- ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF);
- }
- }
-
- if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_1) {
- mModule->setCallbacks(this);
+ res = enumerateProviders();
+ if (res == OK) {
+ mInitialized = true;
}
CameraService::pingCameraServiceProxy();
}
+status_t CameraService::enumerateProviders() {
+ status_t res;
+ Mutex::Autolock l(mServiceLock);
+
+ if (nullptr == mCameraProviderManager.get()) {
+ mCameraProviderManager = new CameraProviderManager();
+ res = mCameraProviderManager->initialize(this);
+ if (res != OK) {
+ ALOGE("%s: Unable to initialize camera provider manager: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ }
+
+ mNumberOfCameras = mCameraProviderManager->getCameraCount();
+ mNumberOfNormalCameras =
+ mCameraProviderManager->getAPI1CompatibleCameraCount();
+
+ // Setup vendor tags before we call get_camera_info the first time
+ // because HAL might need to setup static vendor keys in get_camera_info
+ // TODO: maybe put this into CameraProviderManager::initialize()?
+ mCameraProviderManager->setUpVendorTags();
+
+ if (nullptr == mFlashlight.get()) {
+ mFlashlight = new CameraFlashlight(mCameraProviderManager, this);
+ }
+
+ res = mFlashlight->findFlashUnits();
+ if (res != OK) {
+ ALOGE("Failed to enumerate flash units: %s (%d)", strerror(-res), res);
+ }
+
+ for (auto& cameraId : mCameraProviderManager->getCameraDeviceIds()) {
+ String8 id8 = String8(cameraId.c_str());
+ {
+ Mutex::Autolock lock(mCameraStatesLock);
+ auto iter = mCameraStates.find(id8);
+ if (iter != mCameraStates.end()) {
+ continue;
+ }
+ }
+
+ hardware::camera::common::V1_0::CameraResourceCost cost;
+ res = mCameraProviderManager->getResourceCost(cameraId, &cost);
+ if (res != OK) {
+ ALOGE("Failed to query device resource cost: %s (%d)", strerror(-res), res);
+ continue;
+ }
+ std::set<String8> conflicting;
+ for (size_t i = 0; i < cost.conflictingDevices.size(); i++) {
+ conflicting.emplace(String8(cost.conflictingDevices[i].c_str()));
+ }
+
+ {
+ Mutex::Autolock lock(mCameraStatesLock);
+ mCameraStates.emplace(id8,
+ std::make_shared<CameraState>(id8, cost.resourceCost, conflicting));
+ }
+
+ onDeviceStatusChanged(id8, CameraDeviceStatus::PRESENT);
+
+ if (mFlashlight->hasFlashUnit(id8)) {
+ mTorchStatusMap.add(id8, TorchModeStatus::AVAILABLE_OFF);
+ }
+ }
+
+ return OK;
+}
+
sp<ICameraServiceProxy> CameraService::getCameraServiceProxy() {
sp<ICameraServiceProxy> proxyBinder = nullptr;
#ifndef __BRILLO__
sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("media.camera.proxy"));
+ // Use checkService because cameraserver normally starts before the
+ // system server and the proxy service. So the long timeout that getService
+ // has before giving up is inappropriate.
+ sp<IBinder> binder = sm->checkService(String16("media.camera.proxy"));
if (binder != nullptr) {
proxyBinder = interface_cast<ICameraServiceProxy>(binder);
}
@@ -291,35 +288,40 @@
}
CameraService::~CameraService() {
- if (mModule) {
- delete mModule;
- mModule = nullptr;
- }
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
- gCameraService = nullptr;
}
-void CameraService::onDeviceStatusChanged(int cameraId,
- camera_device_status_t newStatus) {
- ALOGI("%s: Status changed for cameraId=%d, newStatus=%d", __FUNCTION__,
- cameraId, newStatus);
+void CameraService::onNewProviderRegistered() {
+ enumerateProviders();
+}
- String8 id = String8::format("%d", cameraId);
+void CameraService::onDeviceStatusChanged(const String8& id,
+ CameraDeviceStatus newHalStatus) {
+ ALOGI("%s: Status changed for cameraId=%s, newStatus=%d", __FUNCTION__,
+ id.string(), newHalStatus);
+
+ StatusInternal newStatus = mapToInternal(newHalStatus);
+
std::shared_ptr<CameraState> state = getCameraState(id);
if (state == nullptr) {
- ALOGE("%s: Bad camera ID %d", __FUNCTION__, cameraId);
+ if (newStatus == StatusInternal::PRESENT) {
+ ALOGW("%s: Unknown camera ID %s, probably newly registered?",
+ __FUNCTION__, id.string());
+ } else {
+ ALOGE("%s: Bad camera ID %s", __FUNCTION__, id.string());
+ }
return;
}
- int32_t oldStatus = state->getStatus();
+ StatusInternal oldStatus = state->getStatus();
- if (oldStatus == static_cast<int32_t>(newStatus)) {
+ if (oldStatus == newStatus) {
ALOGE("%s: State transition to the same status %#x not allowed", __FUNCTION__, newStatus);
return;
}
- if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
+ if (newStatus == StatusInternal::NOT_PRESENT) {
logDeviceRemoved(id, String8::format("Device status changed from %d to %d", oldStatus,
newStatus));
sp<BasicClient> clientToDisconnect;
@@ -329,25 +331,23 @@
// Set the device status to NOT_PRESENT, clients will no longer be able to connect
// to this device until the status changes
- updateStatus(ICameraServiceListener::STATUS_NOT_PRESENT, id);
+ updateStatus(StatusInternal::NOT_PRESENT, id);
// Remove cached shim parameters
state->setShimParams(CameraParameters());
- // Remove the client from the list of active clients
+ // Remove the client from the list of active clients, if there is one
clientToDisconnect = removeClientLocked(id);
+ }
+ // Disconnect client
+ if (clientToDisconnect.get() != nullptr) {
+ ALOGI("%s: Client for camera ID %s evicted due to device status change from HAL",
+ __FUNCTION__, id.string());
// Notify the client of disconnection
clientToDisconnect->notifyError(
hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
CaptureResultExtras{});
- }
-
- ALOGI("%s: Client for camera ID %s evicted due to device status change from HAL",
- __FUNCTION__, id.string());
-
- // Disconnect client
- if (clientToDisconnect.get() != nullptr) {
// Ensure not in binder RPC so client disconnect PID checks work correctly
LOG_ALWAYS_FATAL_IF(getCallingPid() != getpid(),
"onDeviceStatusChanged must be called from the camera service process!");
@@ -355,27 +355,27 @@
}
} else {
- if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
+ if (oldStatus == StatusInternal::NOT_PRESENT) {
logDeviceAdded(id, String8::format("Device status changed from %d to %d", oldStatus,
newStatus));
}
- updateStatus(static_cast<int32_t>(newStatus), id);
+ updateStatus(newStatus, id);
}
}
void CameraService::onTorchStatusChanged(const String8& cameraId,
- int32_t newStatus) {
+ TorchModeStatus newStatus) {
Mutex::Autolock al(mTorchStatusMutex);
onTorchStatusChangedLocked(cameraId, newStatus);
}
void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
- int32_t newStatus) {
+ TorchModeStatus newStatus) {
ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
__FUNCTION__, cameraId.string(), newStatus);
- int32_t status;
+ TorchModeStatus status;
status_t res = getTorchStatusLocked(cameraId, &status);
if (res) {
ALOGE("%s: cannot get torch status of camera %s: %s (%d)",
@@ -403,16 +403,16 @@
BatteryNotifier& notifier(BatteryNotifier::getInstance());
if (oldUid != newUid) {
// If the UID has changed, log the status and update current UID in mTorchUidMap
- if (status == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+ if (status == TorchModeStatus::AVAILABLE_ON) {
notifier.noteFlashlightOff(cameraId, oldUid);
}
- if (newStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+ if (newStatus == TorchModeStatus::AVAILABLE_ON) {
notifier.noteFlashlightOn(cameraId, newUid);
}
iter->second.second = newUid;
} else {
// If the UID has not changed, log the status
- if (newStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+ if (newStatus == TorchModeStatus::AVAILABLE_ON) {
notifier.noteFlashlightOn(cameraId, oldUid);
} else {
notifier.noteFlashlightOff(cameraId, oldUid);
@@ -424,13 +424,14 @@
{
Mutex::Autolock lock(mStatusListenerLock);
for (auto& i : mListenerList) {
- i->onTorchStatusChanged(newStatus, String16{cameraId});
+ i->onTorchStatusChanged(mapToInterface(newStatus), String16{cameraId});
}
}
}
Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
ATRACE_CALL();
+ Mutex::Autolock l(mServiceLock);
switch (type) {
case CAMERA_TYPE_BACKWARD_COMPATIBLE:
*numCameras = mNumberOfNormalCameras;
@@ -450,7 +451,9 @@
Status CameraService::getCameraInfo(int cameraId,
CameraInfo* cameraInfo) {
ATRACE_CALL();
- if (!mModule) {
+ Mutex::Autolock l(mServiceLock);
+
+ if (!mInitialized) {
return STATUS_ERROR(ERROR_DISCONNECTED,
"Camera subsystem is not available");
}
@@ -460,170 +463,48 @@
"CameraId is not valid");
}
- struct camera_info info;
- Status rc = filterGetInfoErrorCode(
- mModule->getCameraInfo(cameraId, &info));
-
- if (rc.isOk()) {
- cameraInfo->facing = info.facing;
- cameraInfo->orientation = info.orientation;
- // CameraInfo is for android.hardware.Camera which does not
- // support external camera facing. The closest approximation would be
- // front camera.
- if (cameraInfo->facing == CAMERA_FACING_EXTERNAL) {
- cameraInfo->facing = CAMERA_FACING_FRONT;
- }
+ Status ret = Status::ok();
+ status_t err = mCameraProviderManager->getCameraInfo(std::to_string(cameraId), cameraInfo);
+ if (err != OK) {
+ ret = STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Error retrieving camera info from device %d: %s (%d)", cameraId,
+ strerror(-err), err);
}
- return rc;
+
+ return ret;
}
int CameraService::cameraIdToInt(const String8& cameraId) {
- errno = 0;
- size_t pos = 0;
- int ret = stoi(std::string{cameraId.string()}, &pos);
- if (errno != 0 || pos != cameraId.size()) {
+ int id;
+ bool success = base::ParseInt(cameraId.string(), &id, 0);
+ if (!success) {
return -1;
}
- return ret;
+ return id;
}
-Status CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
- ATRACE_CALL();
-
- Status ret = Status::ok();
-
- struct CameraInfo info;
- if (!(ret = getCameraInfo(cameraId, &info)).isOk()) {
- return ret;
- }
-
- CameraMetadata shimInfo;
- int32_t orientation = static_cast<int32_t>(info.orientation);
- status_t rc;
- if ((rc = shimInfo.update(ANDROID_SENSOR_ORIENTATION, &orientation, 1)) != OK) {
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Error updating metadata: %d (%s)", rc, strerror(-rc));
- }
-
- uint8_t facing = (info.facing == CAMERA_FACING_FRONT) ?
- ANDROID_LENS_FACING_FRONT : ANDROID_LENS_FACING_BACK;
- if ((rc = shimInfo.update(ANDROID_LENS_FACING, &facing, 1)) != OK) {
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Error updating metadata: %d (%s)", rc, strerror(-rc));
- }
-
- CameraParameters shimParams;
- if (!(ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)).isOk()) {
- // Error logged by callee
- return ret;
- }
-
- Vector<Size> sizes;
- Vector<Size> jpegSizes;
- Vector<int32_t> formats;
- {
- shimParams.getSupportedPreviewSizes(/*out*/sizes);
- shimParams.getSupportedPreviewFormats(/*out*/formats);
- shimParams.getSupportedPictureSizes(/*out*/jpegSizes);
- }
-
- // Always include IMPLEMENTATION_DEFINED
- formats.add(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
-
- const size_t INTS_PER_CONFIG = 4;
-
- // Build available stream configurations metadata
- size_t streamConfigSize = (sizes.size() * formats.size() + jpegSizes.size()) * INTS_PER_CONFIG;
-
- Vector<int32_t> streamConfigs;
- streamConfigs.setCapacity(streamConfigSize);
-
- for (size_t i = 0; i < formats.size(); ++i) {
- for (size_t j = 0; j < sizes.size(); ++j) {
- streamConfigs.add(formats[i]);
- streamConfigs.add(sizes[j].width);
- streamConfigs.add(sizes[j].height);
- streamConfigs.add(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
- }
- }
-
- for (size_t i = 0; i < jpegSizes.size(); ++i) {
- streamConfigs.add(HAL_PIXEL_FORMAT_BLOB);
- streamConfigs.add(jpegSizes[i].width);
- streamConfigs.add(jpegSizes[i].height);
- streamConfigs.add(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
- }
-
- if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
- streamConfigs.array(), streamConfigSize)) != OK) {
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Error updating metadata: %d (%s)", rc, strerror(-rc));
- }
-
- int64_t fakeMinFrames[0];
- // TODO: Fixme, don't fake min frame durations.
- if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
- fakeMinFrames, 0)) != OK) {
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Error updating metadata: %d (%s)", rc, strerror(-rc));
- }
-
- int64_t fakeStalls[0];
- // TODO: Fixme, don't fake stall durations.
- if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
- fakeStalls, 0)) != OK) {
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Error updating metadata: %d (%s)", rc, strerror(-rc));
- }
-
- *cameraInfo = shimInfo;
- return ret;
-}
-
-Status CameraService::getCameraCharacteristics(int cameraId,
- CameraMetadata* cameraInfo) {
+Status CameraService::getCameraCharacteristics(const String16& cameraId,
+ CameraMetadata* cameraInfo) {
ATRACE_CALL();
if (!cameraInfo) {
ALOGE("%s: cameraInfo is NULL", __FUNCTION__);
return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "cameraInfo is NULL");
}
- if (!mModule) {
- ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
+ if (!mInitialized) {
+ ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
return STATUS_ERROR(ERROR_DISCONNECTED,
"Camera subsystem is not available");;
}
- if (cameraId < 0 || cameraId >= mNumberOfCameras) {
- ALOGE("%s: Invalid camera id: %d", __FUNCTION__, cameraId);
- return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
- "Invalid camera id: %d", cameraId);
- }
+ Status ret{};
- int facing;
- Status ret;
- if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0 ||
- getDeviceVersion(cameraId, &facing) < CAMERA_DEVICE_API_VERSION_3_0) {
- /**
- * Backwards compatibility mode for old HALs:
- * - Convert CameraInfo into static CameraMetadata properties.
- * - Retrieve cached CameraParameters for this camera. If none exist,
- * attempt to open CameraClient and retrieve the CameraParameters.
- * - Convert cached CameraParameters into static CameraMetadata
- * properties.
- */
- ALOGI("%s: Switching to HAL1 shim implementation...", __FUNCTION__);
-
- ret = generateShimMetadata(cameraId, cameraInfo);
- } else {
- /**
- * Normal HAL 2.1+ codepath.
- */
- struct camera_info info;
- ret = filterGetInfoErrorCode(mModule->getCameraInfo(cameraId, &info));
- if (ret.isOk()) {
- *cameraInfo = info.static_camera_characteristics;
- }
+ status_t res = mCameraProviderManager->getCameraCharacteristics(
+ String8(cameraId).string(), cameraInfo);
+ if (res != OK) {
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve camera "
+ "characteristics for device %s: %s (%d)", String8(cameraId).string(),
+ strerror(-res), res);
}
return ret;
@@ -644,28 +525,12 @@
return String8(formattedTime);
}
-int CameraService::getCameraPriorityFromProcState(int procState) {
- // Find the priority for the camera usage based on the process state. Higher priority clients
- // win for evictions.
- if (procState < 0) {
- ALOGE("%s: Received invalid process state %d from ActivityManagerService!", __FUNCTION__,
- procState);
- return -1;
- }
- // Treat sleeping TOP processes the same as regular TOP processes, for
- // access priority. This is important for lock-screen camera launch scenarios
- if (procState == PROCESS_STATE_TOP_SLEEPING) {
- procState = PROCESS_STATE_TOP;
- }
- return INT_MAX - procState;
-}
-
Status CameraService::getCameraVendorTagDescriptor(
/*out*/
hardware::camera2::params::VendorTagDescriptor* desc) {
ATRACE_CALL();
- if (!mModule) {
- ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
+ if (!mInitialized) {
+ ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
return STATUS_ERROR(ERROR_DISCONNECTED, "Camera subsystem not available");
}
sp<VendorTagDescriptor> globalDescriptor = VendorTagDescriptor::getGlobalVendorTagDescriptor();
@@ -675,21 +540,38 @@
return Status::ok();
}
-int CameraService::getDeviceVersion(int cameraId, int* facing) {
+Status CameraService::getCameraVendorTagCache(
+ /*out*/ hardware::camera2::params::VendorTagDescriptorCache* cache) {
ATRACE_CALL();
- struct camera_info info;
- if (mModule->getCameraInfo(cameraId, &info) != OK) {
- return -1;
+ if (!mInitialized) {
+ ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
+ return STATUS_ERROR(ERROR_DISCONNECTED,
+ "Camera subsystem not available");
}
-
- int deviceVersion;
- if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
- deviceVersion = info.device_version;
- } else {
- deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
+ sp<VendorTagDescriptorCache> globalCache =
+ VendorTagDescriptorCache::getGlobalVendorTagCache();
+ if (globalCache != nullptr) {
+ *cache = *(globalCache.get());
}
+ return Status::ok();
+}
+int CameraService::getDeviceVersion(const String8& cameraId, int* facing) {
+ ATRACE_CALL();
+
+ int deviceVersion = 0;
+
+ status_t res;
+ hardware::hidl_version maxVersion{0,0};
+ res = mCameraProviderManager->getHighestSupportedVersion(cameraId.string(),
+ &maxVersion);
+ if (res != OK) return -1;
+ deviceVersion = HARDWARE_DEVICE_API_VERSION(maxVersion.get_major(), maxVersion.get_minor());
+
+ hardware::CameraInfo info;
if (facing) {
+ res = mCameraProviderManager->getCameraInfo(cameraId.string(), &info);
+ if (res != OK) return -1;
*facing = info.facing;
}
@@ -700,10 +582,10 @@
switch(err) {
case NO_ERROR:
return Status::ok();
- case -EINVAL:
+ case BAD_VALUE:
return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
"CameraId is not valid for HAL module");
- case -ENODEV:
+ case NO_INIT:
return STATUS_ERROR(ERROR_DISCONNECTED,
"Camera device not available");
default:
@@ -713,45 +595,8 @@
}
}
-bool CameraService::setUpVendorTags() {
- ATRACE_CALL();
- vendor_tag_ops_t vOps = vendor_tag_ops_t();
-
- // Check if vendor operations have been implemented
- if (!mModule->isVendorTagDefined()) {
- ALOGI("%s: No vendor tags defined for this device.", __FUNCTION__);
- return false;
- }
-
- mModule->getVendorTagOps(&vOps);
-
- // Ensure all vendor operations are present
- if (vOps.get_tag_count == NULL || vOps.get_all_tags == NULL ||
- vOps.get_section_name == NULL || vOps.get_tag_name == NULL ||
- vOps.get_tag_type == NULL) {
- ALOGE("%s: Vendor tag operations not fully defined. Ignoring definitions."
- , __FUNCTION__);
- return false;
- }
-
- // Read all vendor tag definitions into a descriptor
- sp<VendorTagDescriptor> desc;
- status_t res;
- if ((res = VendorTagDescriptor::createDescriptorFromOps(&vOps, /*out*/desc))
- != OK) {
- ALOGE("%s: Could not generate descriptor from vendor tag operations,"
- "received error %s (%d). Camera clients will not be able to use"
- "vendor tags", __FUNCTION__, strerror(res), res);
- return false;
- }
-
- // Set the global descriptor to use with camera metadata
- VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
- return true;
-}
-
Status CameraService::makeClient(const sp<CameraService>& cameraService,
- const sp<IInterface>& cameraCb, const String16& packageName, int cameraId,
+ const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client) {
@@ -763,13 +608,13 @@
case CAMERA_DEVICE_API_VERSION_1_0:
if (effectiveApiLevel == API_1) { // Camera1 API route
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new CameraClient(cameraService, tmp, packageName, cameraId, facing,
- clientPid, clientUid, getpid(), legacyMode);
+ *client = new CameraClient(cameraService, tmp, packageName, cameraIdToInt(cameraId),
+ facing, clientPid, clientUid, getpid(), legacyMode);
} else { // Camera2 API route
ALOGW("Camera using old HAL version: %d", deviceVersion);
return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
- "Camera device \"%d\" HAL version %d does not support camera2 API",
- cameraId, deviceVersion);
+ "Camera device \"%s\" HAL version %d does not support camera2 API",
+ cameraId.string(), deviceVersion);
}
break;
case CAMERA_DEVICE_API_VERSION_3_0:
@@ -779,8 +624,8 @@
case CAMERA_DEVICE_API_VERSION_3_4:
if (effectiveApiLevel == API_1) { // Camera1 API route
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new Camera2Client(cameraService, tmp, packageName, cameraId, facing,
- clientPid, clientUid, servicePid, legacyMode);
+ *client = new Camera2Client(cameraService, tmp, packageName, cameraIdToInt(cameraId),
+ facing, clientPid, clientUid, servicePid, legacyMode);
} else { // Camera2 API route
sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
@@ -792,8 +637,8 @@
// Should not be reachable
ALOGE("Unknown camera device HAL version: %d", deviceVersion);
return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Camera device \"%d\" has unknown HAL version %d",
- cameraId, deviceVersion);
+ "Camera device \"%s\" has unknown HAL version %d",
+ cameraId.string(), deviceVersion);
}
} else {
// A particular HAL version is requested by caller. Create CameraClient
@@ -802,16 +647,16 @@
halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
// Only support higher HAL version device opened as HAL1.0 device.
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new CameraClient(cameraService, tmp, packageName, cameraId, facing,
- clientPid, clientUid, servicePid, legacyMode);
+ *client = new CameraClient(cameraService, tmp, packageName, cameraIdToInt(cameraId),
+ facing, clientPid, clientUid, servicePid, legacyMode);
} else {
// Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
" opened as HAL %x device", halVersion, deviceVersion,
CAMERA_DEVICE_API_VERSION_1_0);
return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
- "Camera device \"%d\" (HAL version %d) cannot be opened as HAL version %d",
- cameraId, deviceVersion, halVersion);
+ "Camera device \"%s\" (HAL version %d) cannot be opened as HAL version %d",
+ cameraId.string(), deviceVersion, halVersion);
}
}
return Status::ok();
@@ -831,6 +676,66 @@
return s;
}
+int32_t CameraService::mapToInterface(TorchModeStatus status) {
+ int32_t serviceStatus = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+ switch (status) {
+ case TorchModeStatus::NOT_AVAILABLE:
+ serviceStatus = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+ break;
+ case TorchModeStatus::AVAILABLE_OFF:
+ serviceStatus = ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF;
+ break;
+ case TorchModeStatus::AVAILABLE_ON:
+ serviceStatus = ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON;
+ break;
+ default:
+ ALOGW("Unknown new flash status: %d", status);
+ }
+ return serviceStatus;
+}
+
+CameraService::StatusInternal CameraService::mapToInternal(CameraDeviceStatus status) {
+ StatusInternal serviceStatus = StatusInternal::NOT_PRESENT;
+ switch (status) {
+ case CameraDeviceStatus::NOT_PRESENT:
+ serviceStatus = StatusInternal::NOT_PRESENT;
+ break;
+ case CameraDeviceStatus::PRESENT:
+ serviceStatus = StatusInternal::PRESENT;
+ break;
+ case CameraDeviceStatus::ENUMERATING:
+ serviceStatus = StatusInternal::ENUMERATING;
+ break;
+ default:
+ ALOGW("Unknown new HAL device status: %d", status);
+ }
+ return serviceStatus;
+}
+
+int32_t CameraService::mapToInterface(StatusInternal status) {
+ int32_t serviceStatus = ICameraServiceListener::STATUS_NOT_PRESENT;
+ switch (status) {
+ case StatusInternal::NOT_PRESENT:
+ serviceStatus = ICameraServiceListener::STATUS_NOT_PRESENT;
+ break;
+ case StatusInternal::PRESENT:
+ serviceStatus = ICameraServiceListener::STATUS_PRESENT;
+ break;
+ case StatusInternal::ENUMERATING:
+ serviceStatus = ICameraServiceListener::STATUS_ENUMERATING;
+ break;
+ case StatusInternal::NOT_AVAILABLE:
+ serviceStatus = ICameraServiceListener::STATUS_NOT_AVAILABLE;
+ break;
+ case StatusInternal::UNKNOWN:
+ serviceStatus = ICameraServiceListener::STATUS_UNKNOWN;
+ break;
+ default:
+ ALOGW("Unknown new internal device status: %d", status);
+ }
+ return serviceStatus;
+}
+
Status CameraService::initializeShimMetadata(int cameraId) {
int uid = getCallingUid();
@@ -942,7 +847,7 @@
int callingPid = getCallingPid();
- if (!mModule) {
+ if (!mInitialized) {
ALOGE("CameraService::connect X (PID %d) rejected (camera HAL module not loaded)",
callingPid);
return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
@@ -1042,12 +947,12 @@
return -ENODEV;
}
- int32_t currentStatus = cameraState->getStatus();
- if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
+ StatusInternal currentStatus = cameraState->getStatus();
+ if (currentStatus == StatusInternal::NOT_PRESENT) {
ALOGE("CameraService::connect X (PID %d) rejected (camera %s is not connected)",
callingPid, cameraId.string());
return -ENODEV;
- } else if (currentStatus == ICameraServiceListener::STATUS_ENUMERATING) {
+ } else if (currentStatus == StatusInternal::ENUMERATING) {
ALOGE("CameraService::connect X (PID %d) rejected, (camera %s is initializing)",
callingPid, cameraId.string());
return -EBUSY;
@@ -1123,20 +1028,24 @@
std::vector<int> ownerPids(mActiveClientManager.getAllOwners());
ownerPids.push_back(clientPid);
- // Use the value +PROCESS_STATE_NONEXISTENT, to avoid taking
- // address of PROCESS_STATE_NONEXISTENT as a reference argument
- // for the vector constructor. PROCESS_STATE_NONEXISTENT does
- // not have an out-of-class definition.
- std::vector<int> priorities(ownerPids.size(), +PROCESS_STATE_NONEXISTENT);
+ std::vector<int> priorityScores(ownerPids.size());
+ std::vector<int> states(ownerPids.size());
- // Get priorites of all active PIDs
- ProcessInfoService::getProcessStatesFromPids(ownerPids.size(), &ownerPids[0],
- /*out*/&priorities[0]);
+ // Get priority scores of all active PIDs
+ status_t err = ProcessInfoService::getProcessStatesScoresFromPids(
+ ownerPids.size(), &ownerPids[0], /*out*/&states[0],
+ /*out*/&priorityScores[0]);
+ if (err != OK) {
+ ALOGE("%s: Priority score query failed: %d",
+ __FUNCTION__, err);
+ return err;
+ }
// Update all active clients' priorities
- std::map<int,int> pidToPriorityMap;
+ std::map<int,resource_policy::ClientPriority> pidToPriorityMap;
for (size_t i = 0; i < ownerPids.size() - 1; i++) {
- pidToPriorityMap.emplace(ownerPids[i], getCameraPriorityFromProcState(priorities[i]));
+ pidToPriorityMap.emplace(ownerPids[i],
+ resource_policy::ClientPriority(priorityScores[i], states[i]));
}
mActiveClientManager.updatePriorities(pidToPriorityMap);
@@ -1153,7 +1062,9 @@
clientDescriptor = CameraClientManager::makeClientDescriptor(cameraId,
sp<BasicClient>{nullptr}, static_cast<int32_t>(state->getCost()),
state->getConflicting(),
- getCameraPriorityFromProcState(priorities[priorities.size() - 1]), clientPid);
+ priorityScores[priorityScores.size() - 1],
+ clientPid,
+ states[states.size() - 1]);
// Find clients that would be evicted
auto evicted = mActiveClientManager.wouldEvict(clientDescriptor);
@@ -1170,19 +1081,22 @@
mActiveClientManager.getIncompatibleClients(clientDescriptor);
String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
- "(PID %d, priority %d) due to eviction policy", curTime.string(),
+ "(PID %d, score %d state %d) due to eviction policy", curTime.string(),
cameraId.string(), packageName.string(), clientPid,
- getCameraPriorityFromProcState(priorities[priorities.size() - 1]));
+ priorityScores[priorityScores.size() - 1],
+ states[states.size() - 1]);
for (auto& i : incompatibleClients) {
msg.appendFormat("\n - Blocked by existing device %s client for package %s"
- "(PID %" PRId32 ", priority %" PRId32 ")", i->getKey().string(),
- String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
- i->getPriority());
+ "(PID %" PRId32 ", score %" PRId32 ", state %" PRId32 ")",
+ i->getKey().string(),
+ String8{i->getValue()->getPackageName()}.string(),
+ i->getOwnerId(), i->getPriority().getScore(),
+ i->getPriority().getState());
ALOGE(" Conflicts with: Device %s, client package %s (PID %"
- PRId32 ", priority %" PRId32 ")", i->getKey().string(),
+ PRId32 ", score %" PRId32 ", state %" PRId32 ")", i->getKey().string(),
String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
- i->getPriority());
+ i->getPriority().getScore(), i->getPriority().getState());
}
// Log the client's attempt
@@ -1210,12 +1124,14 @@
// Log the clients evicted
logEvent(String8::format("EVICT device %s client held by package %s (PID"
- " %" PRId32 ", priority %" PRId32 ")\n - Evicted by device %s client for"
- " package %s (PID %d, priority %" PRId32 ")",
+ " %" PRId32 ", score %" PRId32 ", state %" PRId32 ")\n - Evicted by device %s client for"
+ " package %s (PID %d, score %" PRId32 ", state %" PRId32 ")",
i->getKey().string(), String8{clientSp->getPackageName()}.string(),
- i->getOwnerId(), i->getPriority(), cameraId.string(),
+ i->getOwnerId(), i->getPriority().getScore(),
+ i->getPriority().getState(), cameraId.string(),
packageName.string(), clientPid,
- getCameraPriorityFromProcState(priorities[priorities.size() - 1])));
+ priorityScores[priorityScores.size() - 1],
+ states[states.size() - 1]));
// Notify the client of disconnection
clientSp->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
@@ -1308,23 +1224,6 @@
ATRACE_CALL();
String8 id = String8::format("%d", cameraId);
- int apiVersion = mModule->getModuleApiVersion();
- if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
- apiVersion < CAMERA_MODULE_API_VERSION_2_3) {
- /*
- * Either the HAL version is unspecified in which case this just creates
- * a camera client selected by the latest device version, or
- * it's a particular version in which case the HAL must supported
- * the open_legacy call
- */
- String8 msg = String8::format("Camera HAL module version %x too old for connectLegacy!",
- apiVersion);
- ALOGE("%s: %s",
- __FUNCTION__, msg.string());
- logRejected(id, getCallingPid(), String8(clientPackageName),
- msg);
- return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
Status ret = Status::ok();
sp<Client> client = nullptr;
@@ -1345,7 +1244,7 @@
Status CameraService::connectDevice(
const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
- int cameraId,
+ const String16& cameraId,
const String16& clientPackageName,
int clientUid,
/*out*/
@@ -1353,7 +1252,7 @@
ATRACE_CALL();
Status ret = Status::ok();
- String8 id = String8::format("%d", cameraId);
+ String8 id = String8(cameraId);
sp<CameraDeviceClient> client = nullptr;
ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
@@ -1371,8 +1270,167 @@
return ret;
}
+template<class CALLBACK, class CLIENT>
+Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
+ int halVersion, const String16& clientPackageName, int clientUid, int clientPid,
+ apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+ /*out*/sp<CLIENT>& device) {
+ binder::Status ret = binder::Status::ok();
+
+ String8 clientName8(clientPackageName);
+
+ int originalClientPid = 0;
+
+ ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
+ "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
+ (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
+ static_cast<int>(effectiveApiLevel));
+
+ sp<CLIENT> client = nullptr;
+ {
+ // Acquire mServiceLock and prevent other clients from connecting
+ std::unique_ptr<AutoConditionLock> lock =
+ AutoConditionLock::waitAndAcquire(mServiceLockWrapper, DEFAULT_CONNECT_TIMEOUT_NS);
+
+ if (lock == nullptr) {
+ ALOGE("CameraService::connect (PID %d) rejected (too many other clients connecting)."
+ , clientPid);
+ return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+ "Cannot open camera %s for \"%s\" (PID %d): Too many other clients connecting",
+ cameraId.string(), clientName8.string(), clientPid);
+ }
+
+ // Enforce client permissions and do basic sanity checks
+ if(!(ret = validateConnectLocked(cameraId, clientName8,
+ /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
+ return ret;
+ }
+
+ // Check the shim parameters after acquiring lock, if they have already been updated and
+ // we were doing a shim update, return immediately
+ if (shimUpdateOnly) {
+ auto cameraState = getCameraState(cameraId);
+ if (cameraState != nullptr) {
+ if (!cameraState->getShimParams().isEmpty()) return ret;
+ }
+ }
+
+ status_t err;
+
+ sp<BasicClient> clientTmp = nullptr;
+ std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
+ if ((err = handleEvictionsLocked(cameraId, originalClientPid, effectiveApiLevel,
+ IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
+ /*out*/&partial)) != NO_ERROR) {
+ switch (err) {
+ case -ENODEV:
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" currently available",
+ cameraId.string());
+ case -EBUSY:
+ return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+ "Higher-priority client using camera, ID \"%s\" currently unavailable",
+ cameraId.string());
+ default:
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Unexpected error %s (%d) opening camera \"%s\"",
+ strerror(-err), err, cameraId.string());
+ }
+ }
+
+ if (clientTmp.get() != nullptr) {
+ // Handle special case for API1 MediaRecorder where the existing client is returned
+ device = static_cast<CLIENT*>(clientTmp.get());
+ return ret;
+ }
+
+ // give flashlight a chance to close devices if necessary.
+ mFlashlight->prepareDeviceOpen(cameraId);
+
+ int facing = -1;
+ int deviceVersion = getDeviceVersion(cameraId, /*out*/&facing);
+ if (facing == -1) {
+ ALOGE("%s: Unable to get camera device \"%s\" facing", __FUNCTION__, cameraId.string());
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Unable to get camera device \"%s\" facing", cameraId.string());
+ }
+
+ sp<BasicClient> tmp = nullptr;
+ if(!(ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
+ clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
+ /*out*/&tmp)).isOk()) {
+ return ret;
+ }
+ client = static_cast<CLIENT*>(tmp.get());
+
+ LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
+ __FUNCTION__);
+
+ err = client->initialize(mCameraProviderManager);
+ if (err != OK) {
+ ALOGE("%s: Could not initialize client from HAL.", __FUNCTION__);
+ // Errors could be from the HAL module open call or from AppOpsManager
+ switch(err) {
+ case BAD_VALUE:
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Illegal argument to HAL module for camera \"%s\"", cameraId.string());
+ case -EBUSY:
+ return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+ "Camera \"%s\" is already open", cameraId.string());
+ case -EUSERS:
+ return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+ "Too many cameras already open, cannot open camera \"%s\"",
+ cameraId.string());
+ case PERMISSION_DENIED:
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "No permission to open camera \"%s\"", cameraId.string());
+ case -EACCES:
+ return STATUS_ERROR_FMT(ERROR_DISABLED,
+ "Camera \"%s\" disabled by policy", cameraId.string());
+ case -ENODEV:
+ default:
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Failed to initialize camera \"%s\": %s (%d)", cameraId.string(),
+ strerror(-err), err);
+ }
+ }
+
+ // Update shim paremeters for legacy clients
+ if (effectiveApiLevel == API_1) {
+ // Assume we have always received a Client subclass for API1
+ sp<Client> shimClient = reinterpret_cast<Client*>(client.get());
+ String8 rawParams = shimClient->getParameters();
+ CameraParameters params(rawParams);
+
+ auto cameraState = getCameraState(cameraId);
+ if (cameraState != nullptr) {
+ cameraState->setShimParams(params);
+ } else {
+ ALOGE("%s: Cannot update shim parameters for camera %s, no such device exists.",
+ __FUNCTION__, cameraId.string());
+ }
+ }
+
+ if (shimUpdateOnly) {
+ // If only updating legacy shim parameters, immediately disconnect client
+ mServiceLock.unlock();
+ client->disconnect();
+ mServiceLock.lock();
+ } else {
+ // Otherwise, add client to active clients list
+ finishConnectLocked(client, partial);
+ }
+ } // lock is destroyed, allow further connect calls
+
+ // Important: release the mutex here so the client can call back into the service from its
+ // destructor (can be at the end of the call)
+ device = client;
+ return ret;
+}
+
Status CameraService::setTorchMode(const String16& cameraId, bool enabled,
const sp<IBinder>& clientBinder) {
+ Mutex::Autolock lock(mServiceLock);
ATRACE_CALL();
if (enabled && clientBinder == nullptr) {
@@ -1392,17 +1450,17 @@
"Camera ID \"%s\" is a not valid camera ID", id.string());
}
- int32_t cameraStatus = state->getStatus();
- if (cameraStatus != ICameraServiceListener::STATUS_PRESENT &&
- cameraStatus != ICameraServiceListener::STATUS_NOT_AVAILABLE) {
- ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
+ StatusInternal cameraStatus = state->getStatus();
+ if (cameraStatus != StatusInternal::PRESENT &&
+ cameraStatus != StatusInternal::NOT_AVAILABLE) {
+ ALOGE("%s: camera id is invalid %s, status %d", __FUNCTION__, id.string(), (int)cameraStatus);
return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
"Camera ID \"%s\" is a not valid camera ID", id.string());
}
{
Mutex::Autolock al(mTorchStatusMutex);
- int32_t status;
+ TorchModeStatus status;
status_t err = getTorchStatusLocked(id, &status);
if (err != OK) {
if (err == NAME_NOT_FOUND) {
@@ -1416,8 +1474,8 @@
strerror(-err), err);
}
- if (status == ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE) {
- if (cameraStatus == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
+ if (status == TorchModeStatus::NOT_AVAILABLE) {
+ if (cameraStatus == StatusInternal::NOT_AVAILABLE) {
ALOGE("%s: torch mode of camera %s is not available because "
"camera is in use", __FUNCTION__, id.string());
return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
@@ -1506,7 +1564,9 @@
return Status::ok();
}
-Status CameraService::addListener(const sp<ICameraServiceListener>& listener) {
+Status CameraService::addListener(const sp<ICameraServiceListener>& listener,
+ /*out*/
+ std::vector<hardware::CameraStatus> *cameraStatuses) {
ATRACE_CALL();
ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
@@ -1531,25 +1591,23 @@
mListenerList.push_back(listener);
}
-
- /* Immediately signal current status to this listener only */
+ /* Collect current devices and status */
{
Mutex::Autolock lock(mCameraStatesLock);
for (auto& i : mCameraStates) {
- // TODO: Update binder to use String16 for camera IDs and remove;
- int id = cameraIdToInt(i.first);
- if (id == -1) continue;
-
- listener->onStatusChanged(i.second->getStatus(), id);
+ cameraStatuses->emplace_back(i.first, mapToInterface(i.second->getStatus()));
}
}
- /* Immediately signal current torch status to this listener only */
+ /*
+ * Immediately signal current torch status to this listener only
+ * This may be a subset of all the devices, so don't include it in the response directly
+ */
{
Mutex::Autolock al(mTorchStatusMutex);
for (size_t i = 0; i < mTorchStatusMap.size(); i++ ) {
String16 id = String16(mTorchStatusMap.keyAt(i).string());
- listener->onTorchStatusChanged(mTorchStatusMap.valueAt(i), id);
+ listener->onTorchStatusChanged(mapToInterface(mTorchStatusMap.valueAt(i)), id);
}
}
@@ -1610,10 +1668,13 @@
return ret;
}
-Status CameraService::supportsCameraApi(int cameraId, int apiVersion, bool *isSupported) {
+Status CameraService::supportsCameraApi(const String16& cameraId, int apiVersion,
+ /*out*/ bool *isSupported) {
ATRACE_CALL();
- ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
+ const String8 id = String8(cameraId);
+
+ ALOGV("%s: for camera ID = %s", __FUNCTION__, id.string());
switch (apiVersion) {
case API_VERSION_1:
@@ -1625,38 +1686,36 @@
return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
- int facing = -1;
- int deviceVersion = getDeviceVersion(cameraId, &facing);
-
+ int deviceVersion = getDeviceVersion(id);
switch(deviceVersion) {
case CAMERA_DEVICE_API_VERSION_1_0:
case CAMERA_DEVICE_API_VERSION_3_0:
case CAMERA_DEVICE_API_VERSION_3_1:
if (apiVersion == API_VERSION_2) {
- ALOGV("%s: Camera id %d uses HAL version %d <3.2, doesn't support api2 without shim",
- __FUNCTION__, cameraId, deviceVersion);
+ ALOGV("%s: Camera id %s uses HAL version %d <3.2, doesn't support api2 without shim",
+ __FUNCTION__, id.string(), deviceVersion);
*isSupported = false;
} else { // if (apiVersion == API_VERSION_1) {
- ALOGV("%s: Camera id %d uses older HAL before 3.2, but api1 is always supported",
- __FUNCTION__, cameraId);
+ ALOGV("%s: Camera id %s uses older HAL before 3.2, but api1 is always supported",
+ __FUNCTION__, id.string());
*isSupported = true;
}
break;
case CAMERA_DEVICE_API_VERSION_3_2:
case CAMERA_DEVICE_API_VERSION_3_3:
case CAMERA_DEVICE_API_VERSION_3_4:
- ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
- __FUNCTION__, cameraId);
+ ALOGV("%s: Camera id %s uses HAL3.2 or newer, supports api1/api2 directly",
+ __FUNCTION__, id.string());
*isSupported = true;
break;
case -1: {
- String8 msg = String8::format("Unknown camera ID %d", cameraId);
+ String8 msg = String8::format("Unknown camera ID %s", id.string());
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
default: {
- String8 msg = String8::format("Unknown device version %d for device %d",
- deviceVersion, cameraId);
+ String8 msg = String8::format("Unknown device version %x for device %s",
+ deviceVersion, id.string());
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(ERROR_INVALID_OPERATION, msg.string());
}
@@ -1726,81 +1785,6 @@
return ret;
}
-
-/**
- * Check camera capabilities, such as support for basic color operation
- * Also check that the device HAL version is still in support
- */
-int CameraService::checkCameraCapabilities(int id, camera_info info, int *latestStrangeCameraId) {
- // device_version undefined in CAMERA_MODULE_API_VERSION_1_0,
- // All CAMERA_MODULE_API_VERSION_1_0 devices are backward-compatible
- if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
- // Verify the device version is in the supported range
- switch (info.device_version) {
- case CAMERA_DEVICE_API_VERSION_1_0:
- case CAMERA_DEVICE_API_VERSION_3_0:
- case CAMERA_DEVICE_API_VERSION_3_1:
- case CAMERA_DEVICE_API_VERSION_3_2:
- case CAMERA_DEVICE_API_VERSION_3_3:
- case CAMERA_DEVICE_API_VERSION_3_4:
- // in support
- break;
- case CAMERA_DEVICE_API_VERSION_2_0:
- case CAMERA_DEVICE_API_VERSION_2_1:
- // no longer supported
- default:
- ALOGE("%s: Device %d has HAL version %x, which is not supported",
- __FUNCTION__, id, info.device_version);
- String8 msg = String8::format(
- "Unsupported device HAL version %x for device %d",
- info.device_version, id);
- logServiceError(msg.string(), NO_INIT);
- return NO_INIT;
- }
- }
-
- // Assume all devices pre-v3.3 are backward-compatible
- bool isBackwardCompatible = true;
- if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0
- && info.device_version >= CAMERA_DEVICE_API_VERSION_3_3) {
- isBackwardCompatible = false;
- status_t res;
- camera_metadata_ro_entry_t caps;
- res = find_camera_metadata_ro_entry(
- info.static_camera_characteristics,
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
- &caps);
- if (res != 0) {
- ALOGW("%s: Unable to find camera capabilities for camera device %d",
- __FUNCTION__, id);
- caps.count = 0;
- }
- for (size_t i = 0; i < caps.count; i++) {
- if (caps.data.u8[i] ==
- ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE) {
- isBackwardCompatible = true;
- break;
- }
- }
- }
-
- if (!isBackwardCompatible) {
- mNumberOfNormalCameras--;
- *latestStrangeCameraId = id;
- } else {
- if (id > *latestStrangeCameraId) {
- ALOGE("%s: Normal camera ID %d higher than strange camera ID %d. "
- "This is not allowed due backward-compatibility requirements",
- __FUNCTION__, id, *latestStrangeCameraId);
- logServiceError("Invalid order of camera devices", NO_INIT);
- mNumberOfCameras = 0;
- mNumberOfNormalCameras = 0;
- return NO_INIT;
- }
- }
- return OK;
-}
-
std::shared_ptr<CameraService::CameraState> CameraService::getCameraState(
const String8& cameraId) const {
std::shared_ptr<CameraState> state;
@@ -1877,9 +1861,11 @@
// Log the clients evicted
logEvent(String8::format("EVICT device %s client held by package %s (PID %"
- PRId32 ", priority %" PRId32 ")\n - Evicted due to user switch.",
- i->getKey().string(), String8{clientSp->getPackageName()}.string(),
- i->getOwnerId(), i->getPriority()));
+ PRId32 ", score %" PRId32 ", state %" PRId32 ")\n - Evicted due"
+ " to user switch.", i->getKey().string(),
+ String8{clientSp->getPackageName()}.string(),
+ i->getOwnerId(), i->getPriority().getScore(),
+ i->getPriority().getState()));
}
@@ -1931,8 +1917,11 @@
const std::set<userid_t>& newUserIds) {
String8 newUsers = toString(newUserIds);
String8 oldUsers = toString(oldUserIds);
+ if (oldUsers.size() == 0) {
+ oldUsers = "<None>";
+ }
// Log the new and old users
- logEvent(String8::format("USER_SWITCH previous allowed users: %s , current allowed users: %s",
+ logEvent(String8::format("USER_SWITCH previous allowed user IDs: %s, current allowed user IDs: %s",
oldUsers.string(), newUsers.string()));
}
@@ -2041,24 +2030,25 @@
CameraService::Client::Client(const sp<CameraService>& cameraService,
const sp<ICameraClient>& cameraClient,
const String16& clientPackageName,
- int cameraId, int cameraFacing,
+ const String8& cameraIdStr, int cameraFacing,
int clientPid, uid_t clientUid,
int servicePid) :
CameraService::BasicClient(cameraService,
IInterface::asBinder(cameraClient),
clientPackageName,
- cameraId, cameraFacing,
+ cameraIdStr, cameraFacing,
clientPid, clientUid,
- servicePid)
+ servicePid),
+ mCameraId(CameraService::cameraIdToInt(cameraIdStr))
{
int callingPid = getCallingPid();
- LOG1("Client::Client E (pid %d, id %d)", callingPid, cameraId);
+ LOG1("Client::Client E (pid %d, id %d)", callingPid, mCameraId);
mRemoteCallback = cameraClient;
cameraService->loadSound();
- LOG1("Client::Client X (pid %d, id %d)", callingPid, cameraId);
+ LOG1("Client::Client X (pid %d, id %d)", callingPid, mCameraId);
}
// tear down the client
@@ -2066,26 +2056,28 @@
ALOGV("~Client");
mDestructionStarted = true;
- mCameraService->releaseSound();
+ sCameraService->releaseSound();
// unconditionally disconnect. function is idempotent
Client::disconnect();
}
+sp<CameraService> CameraService::BasicClient::BasicClient::sCameraService;
+
CameraService::BasicClient::BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
const String16& clientPackageName,
- int cameraId, int cameraFacing,
+ const String8& cameraIdStr, int cameraFacing,
int clientPid, uid_t clientUid,
int servicePid):
- mClientPackageName(clientPackageName), mDisconnected(false)
+ mCameraIdStr(cameraIdStr), mCameraFacing(cameraFacing),
+ mClientPackageName(clientPackageName), mClientPid(clientPid), mClientUid(clientUid),
+ mServicePid(servicePid),
+ mDisconnected(false),
+ mRemoteBinder(remoteCallback)
{
- mCameraService = cameraService;
- mRemoteBinder = remoteCallback;
- mCameraId = cameraId;
- mCameraFacing = cameraFacing;
- mClientPid = clientPid;
- mClientUid = clientUid;
- mServicePid = servicePid;
+ if (sCameraService == nullptr) {
+ sCameraService = cameraService;
+ }
mOpsActive = false;
mDestructionStarted = false;
@@ -2133,19 +2125,20 @@
}
mDisconnected = true;
- mCameraService->removeByClient(this);
- mCameraService->logDisconnected(String8::format("%d", mCameraId), mClientPid,
+ sCameraService->removeByClient(this);
+ sCameraService->logDisconnected(mCameraIdStr, mClientPid,
String8(mClientPackageName));
sp<IBinder> remote = getRemote();
if (remote != nullptr) {
- remote->unlinkToDeath(mCameraService);
+ remote->unlinkToDeath(sCameraService);
}
finishCameraOps();
// Notify flashlight that a camera device is closed.
- mCameraService->mFlashlight->deviceClosed(String8::format("%d", mCameraId));
- ALOGI("%s: Disconnected client for camera %d for PID %d", __FUNCTION__, mCameraId, mClientPid);
+ sCameraService->mFlashlight->deviceClosed(mCameraIdStr);
+ ALOGI("%s: Disconnected client for camera %s for PID %d", __FUNCTION__, mCameraIdStr.string(),
+ mClientPid);
// client shouldn't be able to call into us anymore
mClientPid = 0;
@@ -2197,14 +2190,14 @@
mClientUid, mClientPackageName);
if (res == AppOpsManager::MODE_ERRORED) {
- ALOGI("Camera %d: Access for \"%s\" has been revoked",
- mCameraId, String8(mClientPackageName).string());
+ ALOGI("Camera %s: Access for \"%s\" has been revoked",
+ mCameraIdStr.string(), String8(mClientPackageName).string());
return PERMISSION_DENIED;
}
if (res == AppOpsManager::MODE_IGNORED) {
- ALOGI("Camera %d: Access for \"%s\" has been restricted",
- mCameraId, String8(mClientPackageName).string());
+ ALOGI("Camera %s: Access for \"%s\" has been restricted",
+ mCameraIdStr.string(), String8(mClientPackageName).string());
// Return the same error as for device policy manager rejection
return -EACCES;
}
@@ -2212,12 +2205,11 @@
mOpsActive = true;
// Transition device availability listeners from PRESENT -> NOT_AVAILABLE
- mCameraService->updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
- String8::format("%d", mCameraId));
+ sCameraService->updateStatus(StatusInternal::NOT_AVAILABLE, mCameraIdStr);
// Transition device state to OPEN
- mCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_OPEN,
- String8::format("%d", mCameraId));
+ sCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_OPEN,
+ mCameraIdStr);
return OK;
}
@@ -2232,16 +2224,16 @@
mClientPackageName);
mOpsActive = false;
- std::initializer_list<int32_t> rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
- ICameraServiceListener::STATUS_ENUMERATING};
+ std::initializer_list<StatusInternal> rejected = {StatusInternal::PRESENT,
+ StatusInternal::ENUMERATING};
// Transition to PRESENT if the camera is not in either of the rejected states
- mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT,
- String8::format("%d", mCameraId), rejected);
+ sCameraService->updateStatus(StatusInternal::PRESENT,
+ mCameraIdStr, rejected);
// Transition device state to CLOSED
- mCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_CLOSED,
- String8::format("%d", mCameraId));
+ sCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_CLOSED,
+ mCameraIdStr);
}
// Always stop watching, even if no camera op is active
if (mOpsCallback != NULL) {
@@ -2273,7 +2265,7 @@
"UNKNOWN");
if (res != AppOpsManager::MODE_ALLOWED) {
- ALOGI("Camera %d: Access for \"%s\" revoked", mCameraId,
+ ALOGI("Camera %s: Access for \"%s\" revoked", mCameraIdStr.string(),
myName.string());
// Reset the client PID to allow server-initiated disconnect,
// and to prevent further calls by client.
@@ -2286,17 +2278,6 @@
// ----------------------------------------------------------------------------
-// Provide client strong pointer for callbacks.
-sp<CameraService::Client> CameraService::Client::getClientFromCookie(void* user) {
- String8 cameraId = String8::format("%d", (int)(intptr_t) user);
- auto clientDescriptor = gCameraService->mActiveClientManager.get(cameraId);
- if (clientDescriptor != nullptr) {
- return sp<Client>{
- static_cast<Client*>(clientDescriptor->getValue().get())};
- }
- return sp<Client>{nullptr};
-}
-
void CameraService::Client::notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) {
(void) errorCode;
@@ -2336,11 +2317,11 @@
CameraService::CameraState::CameraState(const String8& id, int cost,
const std::set<String8>& conflicting) : mId(id),
- mStatus(ICameraServiceListener::STATUS_PRESENT), mCost(cost), mConflicting(conflicting) {}
+ mStatus(StatusInternal::PRESENT), mCost(cost), mConflicting(conflicting) {}
CameraService::CameraState::~CameraState() {}
-int32_t CameraService::CameraState::getStatus() const {
+CameraService::StatusInternal CameraService::CameraState::getStatus() const {
Mutex::Autolock lock(mStatusLock);
return mStatus;
}
@@ -2420,7 +2401,8 @@
String8 key = i->getKey();
int32_t cost = i->getCost();
int32_t pid = i->getOwnerId();
- int32_t priority = i->getPriority();
+ int32_t score = i->getPriority().getScore();
+ int32_t state = i->getPriority().getState();
auto conflicting = i->getConflicting();
auto clientSp = i->getValue();
String8 packageName;
@@ -2430,8 +2412,8 @@
uid_t clientUid = clientSp->getClientUid();
clientUserId = multiuser_get_user_id(clientUid);
}
- ret.appendFormat("\n(Camera ID: %s, Cost: %" PRId32 ", PID: %" PRId32 ", Priority: %"
- PRId32 ", ", key.string(), cost, pid, priority);
+ ret.appendFormat("\n(Camera ID: %s, Cost: %" PRId32 ", PID: %" PRId32 ", Score: %"
+ PRId32 ", State: %" PRId32, key.string(), cost, pid, score, state);
if (clientSp.get() != nullptr) {
ret.appendFormat("User Id: %d, ", clientUserId);
@@ -2453,16 +2435,18 @@
CameraService::DescriptorPtr CameraService::CameraClientManager::makeClientDescriptor(
const String8& key, const sp<BasicClient>& value, int32_t cost,
- const std::set<String8>& conflictingKeys, int32_t priority, int32_t ownerId) {
+ const std::set<String8>& conflictingKeys, int32_t score, int32_t ownerId,
+ int32_t state) {
return std::make_shared<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>(
- key, value, cost, conflictingKeys, priority, ownerId);
+ key, value, cost, conflictingKeys, score, ownerId, state);
}
CameraService::DescriptorPtr CameraService::CameraClientManager::makeClientDescriptor(
const sp<BasicClient>& value, const CameraService::DescriptorPtr& partial) {
return makeClientDescriptor(partial->getKey(), value, partial->getCost(),
- partial->getConflicting(), partial->getPriority(), partial->getOwnerId());
+ partial->getConflicting(), partial->getPriority().getScore(),
+ partial->getOwnerId(), partial->getPriority().getState());
}
// ----------------------------------------------------------------------------
@@ -2486,175 +2470,122 @@
status_t CameraService::dump(int fd, const Vector<String16>& args) {
ATRACE_CALL();
- String8 result("Dump of the Camera Service:\n");
if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
- result = result.format("Permission Denial: "
- "can't dump CameraService from pid=%d, uid=%d\n",
+ dprintf(fd, "Permission Denial: can't dump CameraService from pid=%d, uid=%d\n",
getCallingPid(),
getCallingUid());
- write(fd, result.string(), result.size());
- } else {
- bool locked = tryLock(mServiceLock);
- // failed to lock - CameraService is probably deadlocked
- if (!locked) {
- result.append("CameraService may be deadlocked\n");
- write(fd, result.string(), result.size());
- }
+ return NO_ERROR;
+ }
+ bool locked = tryLock(mServiceLock);
+ // failed to lock - CameraService is probably deadlocked
+ if (!locked) {
+ dprintf(fd, "!! CameraService may be deadlocked !!\n");
+ }
- bool hasClient = false;
- if (!mModule) {
- result = String8::format("No camera module available!\n");
- write(fd, result.string(), result.size());
+ if (!mInitialized) {
+ dprintf(fd, "!! No camera HAL available !!\n");
- // Dump event log for error information
- dumpEventLog(fd);
-
- if (locked) mServiceLock.unlock();
- return NO_ERROR;
- }
-
- result = String8::format("Camera module HAL API version: 0x%x\n", mModule->getHalApiVersion());
- result.appendFormat("Camera module API version: 0x%x\n", mModule->getModuleApiVersion());
- result.appendFormat("Camera module name: %s\n", mModule->getModuleName());
- result.appendFormat("Camera module author: %s\n", mModule->getModuleAuthor());
- result.appendFormat("Number of camera devices: %d\n", mNumberOfCameras);
- result.appendFormat("Number of normal camera devices: %d\n", mNumberOfNormalCameras);
- String8 activeClientString = mActiveClientManager.toString();
- result.appendFormat("Active Camera Clients:\n%s", activeClientString.string());
- result.appendFormat("Allowed users:\n%s\n", toString(mAllowedUsers).string());
-
- sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
- if (desc == NULL) {
- result.appendFormat("Vendor tags left unimplemented.\n");
- } else {
- result.appendFormat("Vendor tag definitions:\n");
- }
-
- write(fd, result.string(), result.size());
-
- if (desc != NULL) {
- desc->dump(fd, /*verbosity*/2, /*indentation*/4);
- }
-
+ // Dump event log for error information
dumpEventLog(fd);
- bool stateLocked = tryLock(mCameraStatesLock);
- if (!stateLocked) {
- result = String8::format("CameraStates in use, may be deadlocked\n");
- write(fd, result.string(), result.size());
+ if (locked) mServiceLock.unlock();
+ return NO_ERROR;
+ }
+ dprintf(fd, "\n== Service global info: ==\n\n");
+ dprintf(fd, "Number of camera devices: %d\n", mNumberOfCameras);
+ dprintf(fd, "Number of normal camera devices: %d\n", mNumberOfNormalCameras);
+ String8 activeClientString = mActiveClientManager.toString();
+ dprintf(fd, "Active Camera Clients:\n%s", activeClientString.string());
+ dprintf(fd, "Allowed user IDs: %s\n", toString(mAllowedUsers).string());
+
+ dumpEventLog(fd);
+
+ bool stateLocked = tryLock(mCameraStatesLock);
+ if (!stateLocked) {
+ dprintf(fd, "CameraStates in use, may be deadlocked\n");
+ }
+
+ for (auto& state : mCameraStates) {
+ String8 cameraId = state.first;
+
+ dprintf(fd, "== Camera device %s dynamic info: ==\n", cameraId.string());
+
+ CameraParameters p = state.second->getShimParams();
+ if (!p.isEmpty()) {
+ dprintf(fd, " Camera1 API shim is using parameters:\n ");
+ p.dump(fd, args);
}
- for (auto& state : mCameraStates) {
- String8 cameraId = state.first;
- result = String8::format("Camera %s information:\n", cameraId.string());
- camera_info info;
-
- // TODO: Change getCameraInfo + HAL to use String cameraIds
- status_t rc = mModule->getCameraInfo(cameraIdToInt(cameraId), &info);
- if (rc != OK) {
- result.appendFormat(" Error reading static information!\n");
- write(fd, result.string(), result.size());
- } else {
- result.appendFormat(" Facing: %s\n",
- info.facing == CAMERA_FACING_BACK ? "BACK" :
- info.facing == CAMERA_FACING_FRONT ? "FRONT" : "EXTERNAL");
- result.appendFormat(" Orientation: %d\n", info.orientation);
- int deviceVersion;
- if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0) {
- deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
- } else {
- deviceVersion = info.device_version;
- }
-
- auto conflicting = state.second->getConflicting();
- result.appendFormat(" Resource Cost: %d\n", state.second->getCost());
- result.appendFormat(" Conflicting Devices:");
- for (auto& id : conflicting) {
- result.appendFormat(" %s", id.string());
- }
- if (conflicting.size() == 0) {
- result.appendFormat(" NONE");
- }
- result.appendFormat("\n");
-
- result.appendFormat(" Device version: %#x\n", deviceVersion);
- if (deviceVersion >= CAMERA_DEVICE_API_VERSION_3_0) {
- result.appendFormat(" Device static metadata:\n");
- write(fd, result.string(), result.size());
- dump_indented_camera_metadata(info.static_camera_characteristics,
- fd, /*verbosity*/2, /*indentation*/4);
- } else {
- write(fd, result.string(), result.size());
- }
-
- CameraParameters p = state.second->getShimParams();
- if (!p.isEmpty()) {
- result = String8::format(" Camera1 API shim is using parameters:\n ");
- write(fd, result.string(), result.size());
- p.dump(fd, args);
- }
- }
-
- auto clientDescriptor = mActiveClientManager.get(cameraId);
- if (clientDescriptor == nullptr) {
- result = String8::format(" Device %s is closed, no client instance\n",
- cameraId.string());
- write(fd, result.string(), result.size());
- continue;
- }
- hasClient = true;
- result = String8::format(" Device %s is open. Client instance dump:\n\n",
+ auto clientDescriptor = mActiveClientManager.get(cameraId);
+ if (clientDescriptor != nullptr) {
+ dprintf(fd, " Device %s is open. Client instance dump:\n",
cameraId.string());
- result.appendFormat("Client priority level: %d\n", clientDescriptor->getPriority());
- result.appendFormat("Client PID: %d\n", clientDescriptor->getOwnerId());
+ dprintf(fd, " Client priority score: %d state: %d\n",
+ clientDescriptor->getPriority().getScore(),
+ clientDescriptor->getPriority().getState());
+ dprintf(fd, " Client PID: %d\n", clientDescriptor->getOwnerId());
auto client = clientDescriptor->getValue();
- result.appendFormat("Client package: %s\n",
+ dprintf(fd, " Client package: %s\n",
String8(client->getPackageName()).string());
- write(fd, result.string(), result.size());
client->dumpClient(fd, args);
+ } else {
+ dprintf(fd, " Device %s is closed, no client instance\n",
+ cameraId.string());
}
- if (stateLocked) mCameraStatesLock.unlock();
+ }
- if (!hasClient) {
- result = String8::format("\nNo active camera clients yet.\n");
- write(fd, result.string(), result.size());
+ if (stateLocked) mCameraStatesLock.unlock();
+
+ if (locked) mServiceLock.unlock();
+
+ mCameraProviderManager->dump(fd, args);
+
+ dprintf(fd, "\n== Vendor tags: ==\n\n");
+
+ sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
+ if (desc == NULL) {
+ sp<VendorTagDescriptorCache> cache =
+ VendorTagDescriptorCache::getGlobalVendorTagCache();
+ if (cache == NULL) {
+ dprintf(fd, "No vendor tags.\n");
+ } else {
+ cache->dump(fd, /*verbosity*/2, /*indentation*/2);
}
+ } else {
+ desc->dump(fd, /*verbosity*/2, /*indentation*/2);
+ }
- if (locked) mServiceLock.unlock();
+ // Dump camera traces if there were any
+ dprintf(fd, "\n");
+ camera3::CameraTraces::dump(fd, args);
- // Dump camera traces if there were any
- write(fd, "\n", 1);
- camera3::CameraTraces::dump(fd, args);
-
- // Process dump arguments, if any
- int n = args.size();
- String16 verboseOption("-v");
- String16 unreachableOption("--unreachable");
- for (int i = 0; i < n; i++) {
- if (args[i] == verboseOption) {
- // change logging level
- if (i + 1 >= n) continue;
- String8 levelStr(args[i+1]);
- int level = atoi(levelStr.string());
- result = String8::format("\nSetting log level to %d.\n", level);
- setLogLevel(level);
- write(fd, result.string(), result.size());
- } else if (args[i] == unreachableOption) {
- // Dump memory analysis
- // TODO - should limit be an argument parameter?
- UnreachableMemoryInfo info;
- bool success = GetUnreachableMemory(info, /*limit*/ 10000);
- if (!success) {
- dprintf(fd, "\nUnable to dump unreachable memory. "
- "Try disabling SELinux enforcement.\n");
- } else {
- dprintf(fd, "\nDumping unreachable memory:\n");
- std::string s = info.ToString(/*log_contents*/ true);
- write(fd, s.c_str(), s.size());
- }
+ // Process dump arguments, if any
+ int n = args.size();
+ String16 verboseOption("-v");
+ String16 unreachableOption("--unreachable");
+ for (int i = 0; i < n; i++) {
+ if (args[i] == verboseOption) {
+ // change logging level
+ if (i + 1 >= n) continue;
+ String8 levelStr(args[i+1]);
+ int level = atoi(levelStr.string());
+ dprintf(fd, "\nSetting log level to %d.\n", level);
+ setLogLevel(level);
+ } else if (args[i] == unreachableOption) {
+ // Dump memory analysis
+ // TODO - should limit be an argument parameter?
+ UnreachableMemoryInfo info;
+ bool success = GetUnreachableMemory(info, /*limit*/ 10000);
+ if (!success) {
+ dprintf(fd, "\n== Unable to dump unreachable memory. "
+ "Try disabling SELinux enforcement. ==\n");
+ } else {
+ dprintf(fd, "\n== Dumping unreachable memory: ==\n");
+ std::string s = info.ToString(/*log_contents*/ true);
+ write(fd, s.c_str(), s.size());
}
}
}
@@ -2662,21 +2593,19 @@
}
void CameraService::dumpEventLog(int fd) {
- String8 result = String8("\nPrior client events (most recent at top):\n");
+ dprintf(fd, "\n== Camera service events log (most recent at top): ==\n");
Mutex::Autolock l(mLogLock);
for (const auto& msg : mEventLog) {
- result.appendFormat(" %s\n", msg.string());
+ dprintf(fd, " %s\n", msg.string());
}
if (mEventLog.size() == DEFAULT_EVENT_LOG_LENGTH) {
- result.append(" ...\n");
+ dprintf(fd, " ...\n");
} else if (mEventLog.size() == 0) {
- result.append(" [no events yet]\n");
+ dprintf(fd, " [no events yet]\n");
}
- result.append("\n");
-
- write(fd, result.string(), result.size());
+ dprintf(fd, "\n");
}
void CameraService::handleTorchClientBinderDied(const wp<IBinder> &who) {
@@ -2719,12 +2648,12 @@
__FUNCTION__);
}
-void CameraService::updateStatus(int32_t status, const String8& cameraId) {
+void CameraService::updateStatus(StatusInternal status, const String8& cameraId) {
updateStatus(status, cameraId, {});
}
-void CameraService::updateStatus(int32_t status, const String8& cameraId,
- std::initializer_list<int32_t> rejectSourceStates) {
+void CameraService::updateStatus(StatusInternal status, const String8& cameraId,
+ std::initializer_list<StatusInternal> rejectSourceStates) {
// Do not lock mServiceLock here or can get into a deadlock from
// connect() -> disconnect -> updateStatus
@@ -2739,18 +2668,18 @@
// Update the status for this camera state, then send the onStatusChangedCallbacks to each
// of the listeners with both the mStatusStatus and mStatusListenerLock held
state->updateStatus(status, cameraId, rejectSourceStates, [this]
- (const String8& cameraId, int32_t status) {
+ (const String8& cameraId, StatusInternal status) {
- if (status != ICameraServiceListener::STATUS_ENUMERATING) {
+ if (status != StatusInternal::ENUMERATING) {
// Update torch status if it has a flash unit.
Mutex::Autolock al(mTorchStatusMutex);
- int32_t torchStatus;
+ TorchModeStatus torchStatus;
if (getTorchStatusLocked(cameraId, &torchStatus) !=
NAME_NOT_FOUND) {
- int32_t newTorchStatus =
- status == ICameraServiceListener::STATUS_PRESENT ?
- ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF :
- ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+ TorchModeStatus newTorchStatus =
+ status == StatusInternal::PRESENT ?
+ TorchModeStatus::AVAILABLE_OFF :
+ TorchModeStatus::NOT_AVAILABLE;
if (torchStatus != newTorchStatus) {
onTorchStatusChangedLocked(cameraId, newTorchStatus);
}
@@ -2760,13 +2689,54 @@
Mutex::Autolock lock(mStatusListenerLock);
for (auto& listener : mListenerList) {
- // TODO: Refactor status listeners to use strings for Camera IDs and remove this.
- int id = cameraIdToInt(cameraId);
- if (id != -1) listener->onStatusChanged(status, id);
+ listener->onStatusChanged(mapToInterface(status), String16(cameraId));
}
});
}
+template<class Func>
+void CameraService::CameraState::updateStatus(StatusInternal status,
+ const String8& cameraId,
+ std::initializer_list<StatusInternal> rejectSourceStates,
+ Func onStatusUpdatedLocked) {
+ Mutex::Autolock lock(mStatusLock);
+ StatusInternal oldStatus = mStatus;
+ mStatus = status;
+
+ if (oldStatus == status) {
+ return;
+ }
+
+ ALOGV("%s: Status has changed for camera ID %s from %#x to %#x", __FUNCTION__,
+ cameraId.string(), oldStatus, status);
+
+ if (oldStatus == StatusInternal::NOT_PRESENT &&
+ (status != StatusInternal::PRESENT &&
+ status != StatusInternal::ENUMERATING)) {
+
+ ALOGW("%s: From NOT_PRESENT can only transition into PRESENT or ENUMERATING",
+ __FUNCTION__);
+ mStatus = oldStatus;
+ return;
+ }
+
+ /**
+ * Sometimes we want to conditionally do a transition.
+ * For example if a client disconnects, we want to go to PRESENT
+ * only if we weren't already in NOT_PRESENT or ENUMERATING.
+ */
+ for (auto& rejectStatus : rejectSourceStates) {
+ if (oldStatus == rejectStatus) {
+ ALOGV("%s: Rejecting status transition for Camera ID %s, since the source "
+ "state was was in one of the bad states.", __FUNCTION__, cameraId.string());
+ mStatus = oldStatus;
+ return;
+ }
+ }
+
+ onStatusUpdatedLocked(cameraId, status);
+}
+
void CameraService::updateProxyDeviceState(ICameraServiceProxy::CameraState newState,
const String8& cameraId) {
sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
@@ -2777,7 +2747,7 @@
status_t CameraService::getTorchStatusLocked(
const String8& cameraId,
- int32_t *status) const {
+ TorchModeStatus *status) const {
if (!status) {
return BAD_VALUE;
}
@@ -2792,14 +2762,12 @@
}
status_t CameraService::setTorchStatusLocked(const String8& cameraId,
- int32_t status) {
+ TorchModeStatus status) {
ssize_t index = mTorchStatusMap.indexOfKey(cameraId);
if (index == NAME_NOT_FOUND) {
return BAD_VALUE;
}
- int32_t& item =
- mTorchStatusMap.editValueAt(index);
- item = status;
+ mTorchStatusMap.editValueAt(index) = status;
return OK;
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index b35f35c..87603a3 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -29,13 +29,15 @@
#include <camera/ICameraServiceProxy.h>
#include <hardware/camera.h>
+#include <android/hardware/camera/common/1.0/types.h>
+
#include <camera/VendorTagDescriptor.h>
#include <camera/CaptureResult.h>
#include <camera/CameraParameters.h>
#include "CameraFlashlight.h"
-#include "common/CameraModule.h"
+#include "common/CameraProviderManager.h"
#include "media/RingBuffer.h"
#include "utils/AutoConditionLock.h"
#include "utils/ClientManager.h"
@@ -55,11 +57,13 @@
class CameraService :
public BinderService<CameraService>,
- public ::android::hardware::BnCameraService,
- public IBinder::DeathRecipient,
- public camera_module_callbacks_t
+ public virtual ::android::hardware::BnCameraService,
+ public virtual IBinder::DeathRecipient,
+ public camera_module_callbacks_t,
+ public virtual CameraProviderManager::StatusListener
{
friend class BinderService<CameraService>;
+ friend class CameraClient;
public:
class Client;
class BasicClient;
@@ -70,11 +74,6 @@
API_2 = 2
};
- // Process state (mirrors frameworks/base/core/java/android/app/ActivityManager.java)
- static const int PROCESS_STATE_NONEXISTENT = -1;
- static const int PROCESS_STATE_TOP = 2;
- static const int PROCESS_STATE_TOP_SLEEPING = 5;
-
// 3 second busy timeout when other clients are connecting
static const nsecs_t DEFAULT_CONNECT_TIMEOUT_NS = 3000000000;
@@ -94,11 +93,13 @@
virtual ~CameraService();
/////////////////////////////////////////////////////////////////////
- // HAL Callbacks
- virtual void onDeviceStatusChanged(int cameraId,
- camera_device_status_t newStatus);
+ // HAL Callbacks - implements CameraProviderManager::StatusListener
+
+ virtual void onDeviceStatusChanged(const String8 &cameraId,
+ hardware::camera::common::V1_0::CameraDeviceStatus newHalStatus) override;
virtual void onTorchStatusChanged(const String8& cameraId,
- int32_t newStatus);
+ hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
+ virtual void onNewProviderRegistered() override;
/////////////////////////////////////////////////////////////////////
// ICameraService
@@ -106,11 +107,14 @@
virtual binder::Status getCameraInfo(int cameraId,
hardware::CameraInfo* cameraInfo);
- virtual binder::Status getCameraCharacteristics(int cameraId,
+ virtual binder::Status getCameraCharacteristics(const String16& cameraId,
CameraMetadata* cameraInfo);
virtual binder::Status getCameraVendorTagDescriptor(
/*out*/
hardware::camera2::params::VendorTagDescriptor* desc);
+ virtual binder::Status getCameraVendorTagCache(
+ /*out*/
+ hardware::camera2::params::VendorTagDescriptorCache* cache);
virtual binder::Status connect(const sp<hardware::ICameraClient>& cameraClient,
int32_t cameraId, const String16& clientPackageName,
@@ -125,12 +129,14 @@
sp<hardware::ICamera>* device);
virtual binder::Status connectDevice(
- const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb, int32_t cameraId,
+ const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb, const String16& cameraId,
const String16& clientPackageName, int32_t clientUid,
/*out*/
sp<hardware::camera2::ICameraDeviceUser>* device);
- virtual binder::Status addListener(const sp<hardware::ICameraServiceListener>& listener);
+ virtual binder::Status addListener(const sp<hardware::ICameraServiceListener>& listener,
+ /*out*/
+ std::vector<hardware::CameraStatus>* cameraStatuses);
virtual binder::Status removeListener(
const sp<hardware::ICameraServiceListener>& listener);
@@ -147,7 +153,7 @@
// OK = supports api of that version, -EOPNOTSUPP = does not support
virtual binder::Status supportsCameraApi(
- int32_t cameraId, int32_t apiVersion,
+ const String16& cameraId, int32_t apiVersion,
/*out*/
bool *isSupported);
@@ -181,7 +187,7 @@
/////////////////////////////////////////////////////////////////////
// CameraDeviceFactory functionality
- int getDeviceVersion(int cameraId, int* facing = NULL);
+ int getDeviceVersion(const String8& cameraId, int* facing = NULL);
/////////////////////////////////////////////////////////////////////
// Shared utilities
@@ -192,7 +198,7 @@
class BasicClient : public virtual RefBase {
public:
- virtual status_t initialize(CameraModule *module) = 0;
+ virtual status_t initialize(sp<CameraProviderManager> manager) = 0;
virtual binder::Status disconnect();
// because we can't virtually inherit IInterface, which breaks
@@ -229,7 +235,7 @@
BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraIdStr,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -244,13 +250,13 @@
bool mDestructionStarted;
// these are initialized in the constructor.
- sp<CameraService> mCameraService; // immutable after constructor
- int mCameraId; // immutable after constructor
- int mCameraFacing; // immutable after constructor
- String16 mClientPackageName; // immutable after constructor
+ static sp<CameraService> sCameraService;
+ const String8 mCameraIdStr;
+ const int mCameraFacing;
+ String16 mClientPackageName;
pid_t mClientPid;
- uid_t mClientUid; // immutable after constructor
- pid_t mServicePid; // immutable after constructor
+ const uid_t mClientUid;
+ const pid_t mServicePid;
bool mDisconnected;
// - The app-side Binder interface to receive callbacks from us
@@ -316,7 +322,7 @@
Client(const sp<CameraService>& cameraService,
const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraIdStr,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -339,14 +345,12 @@
// superclass this can be cast to.
virtual bool canCastToApiClient(apiLevel level) const;
protected:
- // Convert client from cookie.
- static sp<CameraService::Client> getClientFromCookie(void* user);
-
// Initialized in constructor
// - The app-side Binder interface to receive callbacks from us
sp<hardware::ICameraClient> mRemoteCallback;
+ int mCameraId; // All API1 clients use integer camera IDs
}; // class Client
/**
@@ -395,8 +399,8 @@
* Make a ClientDescriptor object wrapping the given BasicClient strong pointer.
*/
static DescriptorPtr makeClientDescriptor(const String8& key, const sp<BasicClient>& value,
- int32_t cost, const std::set<String8>& conflictingKeys, int32_t priority,
- int32_t ownerId);
+ int32_t cost, const std::set<String8>& conflictingKeys, int32_t score,
+ int32_t ownerId, int32_t state);
/**
* Make a ClientDescriptor object wrapping the given BasicClient strong pointer with
@@ -409,6 +413,20 @@
private:
+ typedef hardware::camera::common::V1_0::CameraDeviceStatus CameraDeviceStatus;
+
+ /**
+ * Typesafe version of device status, containing both the HAL-layer and the service interface-
+ * layer values.
+ */
+ enum class StatusInternal : int32_t {
+ NOT_PRESENT = static_cast<int32_t>(CameraDeviceStatus::NOT_PRESENT),
+ PRESENT = static_cast<int32_t>(CameraDeviceStatus::PRESENT),
+ ENUMERATING = static_cast<int32_t>(CameraDeviceStatus::ENUMERATING),
+ NOT_AVAILABLE = static_cast<int32_t>(hardware::ICameraServiceListener::STATUS_NOT_AVAILABLE),
+ UNKNOWN = static_cast<int32_t>(hardware::ICameraServiceListener::STATUS_UNKNOWN)
+ };
+
/**
* Container class for the state of each logical camera device, including: ID, status, and
* dependencies on other devices. The mapping of camera ID -> state saved in mCameraStates
@@ -420,6 +438,7 @@
*/
class CameraState {
public:
+
/**
* Make a new CameraState and set the ID, cost, and conflicting devices using the values
* returned in the HAL's camera_info struct for each device.
@@ -432,7 +451,7 @@
*
* This method acquires mStatusLock.
*/
- int32_t getStatus() const;
+ StatusInternal getStatus() const;
/**
* This function updates the status for this camera device, unless the given status
@@ -445,8 +464,9 @@
* This method aquires mStatusLock.
*/
template<class Func>
- void updateStatus(int32_t status, const String8& cameraId,
- std::initializer_list<int32_t> rejectSourceStates,
+ void updateStatus(StatusInternal status,
+ const String8& cameraId,
+ std::initializer_list<StatusInternal> rejectSourceStates,
Func onStatusUpdatedLocked);
/**
@@ -477,7 +497,7 @@
private:
const String8 mId;
- int32_t mStatus; // protected by mStatusLock
+ StatusInternal mStatus; // protected by mStatusLock
const int mCost;
std::set<String8> mConflicting;
mutable Mutex mStatusLock;
@@ -487,6 +507,9 @@
// Delay-load the Camera HAL module
virtual void onFirstRef();
+ // Eumerate all camera providers in the system
+ status_t enumerateProviders();
+
// Check if we can connect, before we acquire the service lock.
// The returned originalClientPid is the PID of the original process that wants to connect to
// camera.
@@ -541,11 +564,6 @@
std::set<userid_t> mAllowedUsers;
/**
- * Check camera capabilities, such as support for basic color operation
- */
- int checkCameraCapabilities(int id, camera_info info, int *latestStrangeCameraId);
-
- /**
* Get the camera state for a given camera id.
*
* This acquires mCameraStatesLock.
@@ -657,7 +675,10 @@
sp<MediaPlayer> mSoundPlayer[NUM_SOUNDS];
int mSoundRef; // reference count (release all MediaPlayer when 0)
- CameraModule* mModule;
+ // Basic flag on whether the camera subsystem is in a usable state
+ bool mInitialized;
+
+ sp<CameraProviderManager> mCameraProviderManager;
// Guarded by mStatusListenerMutex
std::vector<sp<hardware::ICameraServiceListener>> mListenerList;
@@ -671,9 +692,12 @@
* This method must be idempotent.
* This method acquires mStatusLock and mStatusListenerLock.
*/
- void updateStatus(int32_t status, const String8& cameraId,
- std::initializer_list<int32_t> rejectedSourceStates);
- void updateStatus(int32_t status, const String8& cameraId);
+ void updateStatus(StatusInternal status,
+ const String8& cameraId,
+ std::initializer_list<StatusInternal>
+ rejectedSourceStates);
+ void updateStatus(StatusInternal status,
+ const String8& cameraId);
// flashlight control
sp<CameraFlashlight> mFlashlight;
@@ -684,7 +708,8 @@
// guard mTorchUidMap
Mutex mTorchUidMapMutex;
// camera id -> torch status
- KeyedVector<String8, int32_t> mTorchStatusMap;
+ KeyedVector<String8, hardware::camera::common::V1_0::TorchModeStatus>
+ mTorchStatusMap;
// camera id -> torch client binder
// only store the last client that turns on each camera's torch mode
KeyedVector<String8, sp<IBinder>> mTorchClientMap;
@@ -697,23 +722,19 @@
// handle torch mode status change and invoke callbacks. mTorchStatusMutex
// should be locked.
void onTorchStatusChangedLocked(const String8& cameraId,
- int32_t newStatus);
+ hardware::camera::common::V1_0::TorchModeStatus newStatus);
// get a camera's torch status. mTorchStatusMutex should be locked.
status_t getTorchStatusLocked(const String8 &cameraId,
- int32_t *status) const;
+ hardware::camera::common::V1_0::TorchModeStatus *status) const;
// set a camera's torch status. mTorchStatusMutex should be locked.
status_t setTorchStatusLocked(const String8 &cameraId,
- int32_t status);
+ hardware::camera::common::V1_0::TorchModeStatus status);
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder> &who);
- // Helpers
-
- bool setUpVendorTags();
-
/**
* Initialize and cache the metadata used by the HAL1 shim for a given cameraId.
*
@@ -729,14 +750,6 @@
*/
binder::Status getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters);
- /**
- * Generate the CameraCharacteristics metadata required by the Camera2 API
- * from the available HAL1 CameraParameters and CameraInfo.
- *
- * Sets Status to a service-specific error on failure
- */
- binder::Status generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo);
-
static int getCallingPid();
static int getCallingUid();
@@ -746,13 +759,8 @@
*/
static String8 getFormattedCurrentTime();
- /**
- * Get the camera eviction priority from the current process state given by ActivityManager.
- */
- static int getCameraPriorityFromProcState(int procState);
-
static binder::Status makeClient(const sp<CameraService>& cameraService,
- const sp<IInterface>& cameraCb, const String16& packageName, int cameraId,
+ const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client);
@@ -760,227 +768,15 @@
status_t checkCameraAccess(const String16& opPackageName);
static String8 toString(std::set<userid_t> intSet);
+ static int32_t mapToInterface(hardware::camera::common::V1_0::TorchModeStatus status);
+ static StatusInternal mapToInternal(hardware::camera::common::V1_0::CameraDeviceStatus status);
+ static int32_t mapToInterface(StatusInternal status);
static sp<ICameraServiceProxy> getCameraServiceProxy();
static void pingCameraServiceProxy();
};
-template<class Func>
-void CameraService::CameraState::updateStatus(int32_t status,
- const String8& cameraId,
- std::initializer_list<int32_t> rejectSourceStates,
- Func onStatusUpdatedLocked) {
- Mutex::Autolock lock(mStatusLock);
- int32_t oldStatus = mStatus;
- mStatus = status;
-
- if (oldStatus == status) {
- return;
- }
-
- ALOGV("%s: Status has changed for camera ID %s from %#x to %#x", __FUNCTION__,
- cameraId.string(), oldStatus, status);
-
- if (oldStatus == hardware::ICameraServiceListener::STATUS_NOT_PRESENT &&
- (status != hardware::ICameraServiceListener::STATUS_PRESENT &&
- status != hardware::ICameraServiceListener::STATUS_ENUMERATING)) {
-
- ALOGW("%s: From NOT_PRESENT can only transition into PRESENT or ENUMERATING",
- __FUNCTION__);
- mStatus = oldStatus;
- return;
- }
-
- /**
- * Sometimes we want to conditionally do a transition.
- * For example if a client disconnects, we want to go to PRESENT
- * only if we weren't already in NOT_PRESENT or ENUMERATING.
- */
- for (auto& rejectStatus : rejectSourceStates) {
- if (oldStatus == rejectStatus) {
- ALOGV("%s: Rejecting status transition for Camera ID %s, since the source "
- "state was was in one of the bad states.", __FUNCTION__, cameraId.string());
- mStatus = oldStatus;
- return;
- }
- }
-
- onStatusUpdatedLocked(cameraId, status);
-}
-
-#define STATUS_ERROR(errorCode, errorString) \
- binder::Status::fromServiceSpecificError(errorCode, \
- String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
-
-#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
- binder::Status::fromServiceSpecificError(errorCode, \
- String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, __VA_ARGS__))
-
-
-template<class CALLBACK, class CLIENT>
-binder::Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
- int halVersion, const String16& clientPackageName, int clientUid, int clientPid,
- apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
- /*out*/sp<CLIENT>& device) {
- binder::Status ret = binder::Status::ok();
-
- String8 clientName8(clientPackageName);
-
- int originalClientPid = 0;
-
- ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
- "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
- (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
- static_cast<int>(effectiveApiLevel));
-
- sp<CLIENT> client = nullptr;
- {
- // Acquire mServiceLock and prevent other clients from connecting
- std::unique_ptr<AutoConditionLock> lock =
- AutoConditionLock::waitAndAcquire(mServiceLockWrapper, DEFAULT_CONNECT_TIMEOUT_NS);
-
- if (lock == nullptr) {
- ALOGE("CameraService::connect (PID %d) rejected (too many other clients connecting)."
- , clientPid);
- return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
- "Cannot open camera %s for \"%s\" (PID %d): Too many other clients connecting",
- cameraId.string(), clientName8.string(), clientPid);
- }
-
- // Enforce client permissions and do basic sanity checks
- if(!(ret = validateConnectLocked(cameraId, clientName8,
- /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
- return ret;
- }
-
- // Check the shim parameters after acquiring lock, if they have already been updated and
- // we were doing a shim update, return immediately
- if (shimUpdateOnly) {
- auto cameraState = getCameraState(cameraId);
- if (cameraState != nullptr) {
- if (!cameraState->getShimParams().isEmpty()) return ret;
- }
- }
-
- status_t err;
-
- sp<BasicClient> clientTmp = nullptr;
- std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
- if ((err = handleEvictionsLocked(cameraId, originalClientPid, effectiveApiLevel,
- IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
- /*out*/&partial)) != NO_ERROR) {
- switch (err) {
- case -ENODEV:
- return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
- "No camera device with ID \"%s\" currently available",
- cameraId.string());
- case -EBUSY:
- return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
- "Higher-priority client using camera, ID \"%s\" currently unavailable",
- cameraId.string());
- default:
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Unexpected error %s (%d) opening camera \"%s\"",
- strerror(-err), err, cameraId.string());
- }
- }
-
- if (clientTmp.get() != nullptr) {
- // Handle special case for API1 MediaRecorder where the existing client is returned
- device = static_cast<CLIENT*>(clientTmp.get());
- return ret;
- }
-
- // give flashlight a chance to close devices if necessary.
- mFlashlight->prepareDeviceOpen(cameraId);
-
- // TODO: Update getDeviceVersion + HAL interface to use strings for Camera IDs
- int id = cameraIdToInt(cameraId);
- if (id == -1) {
- ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
- cameraId.string());
- return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
- "Bad camera ID \"%s\" passed to camera open", cameraId.string());
- }
-
- int facing = -1;
- int deviceVersion = getDeviceVersion(id, /*out*/&facing);
- sp<BasicClient> tmp = nullptr;
- if(!(ret = makeClient(this, cameraCb, clientPackageName, id, facing, clientPid,
- clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
- /*out*/&tmp)).isOk()) {
- return ret;
- }
- client = static_cast<CLIENT*>(tmp.get());
-
- LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
- __FUNCTION__);
-
- if ((err = client->initialize(mModule)) != OK) {
- ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
- // Errors could be from the HAL module open call or from AppOpsManager
- switch(err) {
- case BAD_VALUE:
- return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
- "Illegal argument to HAL module for camera \"%s\"", cameraId.string());
- case -EBUSY:
- return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
- "Camera \"%s\" is already open", cameraId.string());
- case -EUSERS:
- return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
- "Too many cameras already open, cannot open camera \"%s\"",
- cameraId.string());
- case PERMISSION_DENIED:
- return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
- "No permission to open camera \"%s\"", cameraId.string());
- case -EACCES:
- return STATUS_ERROR_FMT(ERROR_DISABLED,
- "Camera \"%s\" disabled by policy", cameraId.string());
- case -ENODEV:
- default:
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Failed to initialize camera \"%s\": %s (%d)", cameraId.string(),
- strerror(-err), err);
- }
- }
-
- // Update shim paremeters for legacy clients
- if (effectiveApiLevel == API_1) {
- // Assume we have always received a Client subclass for API1
- sp<Client> shimClient = reinterpret_cast<Client*>(client.get());
- String8 rawParams = shimClient->getParameters();
- CameraParameters params(rawParams);
-
- auto cameraState = getCameraState(cameraId);
- if (cameraState != nullptr) {
- cameraState->setShimParams(params);
- } else {
- ALOGE("%s: Cannot update shim parameters for camera %s, no such device exists.",
- __FUNCTION__, cameraId.string());
- }
- }
-
- if (shimUpdateOnly) {
- // If only updating legacy shim parameters, immediately disconnect client
- mServiceLock.unlock();
- client->disconnect();
- mServiceLock.lock();
- } else {
- // Otherwise, add client to active clients list
- finishConnectLocked(client, partial);
- }
- } // lock is destroyed, allow further connect calls
-
- // Important: release the mutex here so the client can call back into the service from its
- // destructor (can be at the end of the call)
- device = client;
- return ret;
-}
-
-#undef STATUS_ERROR_FMT
-#undef STATUS_ERROR
-
} // namespace android
#endif
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index bcd62d6..a28518e 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -56,7 +56,8 @@
int servicePid,
bool legacyMode):
Camera2ClientBase(cameraService, cameraClient, clientPackageName,
- cameraId, cameraFacing, clientPid, clientUid, servicePid),
+ String8::format("%d", cameraId), cameraFacing,
+ clientPid, clientUid, servicePid),
mParameters(cameraId, cameraFacing)
{
ATRACE_CALL();
@@ -67,13 +68,18 @@
mLegacyMode = legacyMode;
}
-status_t Camera2Client::initialize(CameraModule *module)
+status_t Camera2Client::initialize(sp<CameraProviderManager> manager) {
+ return initializeImpl(manager);
+}
+
+template<typename TProviderPtr>
+status_t Camera2Client::initializeImpl(TProviderPtr providerPtr)
{
ATRACE_CALL();
ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
status_t res;
- res = Camera2ClientBase::initialize(module);
+ res = Camera2ClientBase::initialize(providerPtr);
if (res != OK) {
return res;
}
@@ -993,7 +999,7 @@
}
status_t Camera2Client::startRecordingL(Parameters ¶ms, bool restart) {
- status_t res;
+ status_t res = OK;
ALOGV("%s: state == %d, restart = %d", __FUNCTION__, params.state, restart);
@@ -1034,7 +1040,7 @@
}
if (!restart) {
- mCameraService->playSound(CameraService::SOUND_RECORDING_START);
+ sCameraService->playSound(CameraService::SOUND_RECORDING_START);
mStreamingProcessor->updateRecordingRequest(params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
@@ -1191,7 +1197,7 @@
return;
};
- mCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
+ sCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
// Remove recording stream because the video target may be abandoned soon.
res = stopStream();
@@ -1251,6 +1257,13 @@
ALOGW("%s: Not supported in buffer queue mode.", __FUNCTION__);
}
+void Camera2Client::releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles) {
+ (void)handles;
+ ATRACE_CALL();
+ ALOGW("%s: Not supported in buffer queue mode.", __FUNCTION__);
+}
+
status_t Camera2Client::autoFocus() {
ATRACE_CALL();
Mutex::Autolock icl(mBinderSerializationLock);
@@ -1621,7 +1634,7 @@
}
status_t Camera2Client::commandPlayRecordingSoundL() {
- mCameraService->playSound(CameraService::SOUND_RECORDING_START);
+ sCameraService->playSound(CameraService::SOUND_RECORDING_START);
return OK;
}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index e2129f5..72315d4 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -72,6 +72,8 @@
virtual bool recordingEnabled();
virtual void releaseRecordingFrame(const sp<IMemory>& mem);
virtual void releaseRecordingFrameHandle(native_handle_t *handle);
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles);
virtual status_t autoFocus();
virtual status_t cancelAutoFocus();
virtual status_t takePicture(int msgType);
@@ -98,7 +100,7 @@
virtual ~Camera2Client();
- status_t initialize(CameraModule *module);
+ virtual status_t initialize(sp<CameraProviderManager> manager) override;
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -219,6 +221,9 @@
// Video snapshot jpeg size overriding helper function
status_t overrideVideoSnapshotSize(Parameters ¶ms);
+
+ template<typename TProviderPtr>
+ status_t initializeImpl(TProviderPtr providerPtr);
};
}; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 266fb03..075c2e3 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "CameraClient"
//#define LOG_NDEBUG 0
+#include <cutils/atomic.h>
#include <cutils/properties.h>
#include <gui/Surface.h>
#include <media/hardware/HardwareAPI.h>
@@ -41,7 +42,8 @@
int clientPid, int clientUid,
int servicePid, bool legacyMode):
Client(cameraService, cameraClient, clientPackageName,
- cameraId, cameraFacing, clientPid, clientUid, servicePid)
+ String8::format("%d", cameraId), cameraFacing, clientPid,
+ clientUid, servicePid)
{
int callingPid = getCallingPid();
LOG1("CameraClient::CameraClient E (pid %d, id %d)", callingPid, cameraId);
@@ -60,7 +62,7 @@
LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
}
-status_t CameraClient::initialize(CameraModule *module) {
+status_t CameraClient::initialize(sp<CameraProviderManager> manager) {
int callingPid = getCallingPid();
status_t res;
@@ -76,7 +78,7 @@
snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
mHardware = new CameraHardwareInterface(camera_device_name);
- res = mHardware->initialize(module);
+ res = mHardware->initialize(manager);
if (res != OK) {
ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -87,6 +89,7 @@
mHardware->setCallbacks(notifyCallback,
dataCallback,
dataCallbackTimestamp,
+ handleCallbackTimestampBatch,
(void *)(uintptr_t)mCameraId);
// Enable zoom, error, focus, and metadata messages by default
@@ -252,7 +255,7 @@
// Turn off all messages.
disableMsgType(CAMERA_MSG_ALL_MSGS);
mHardware->stopPreview();
- mCameraService->updateProxyDeviceState(
+ sCameraService->updateProxyDeviceState(
ICameraServiceProxy::CAMERA_STATE_IDLE,
String8::format("%d", mCameraId));
mHardware->cancelPicture();
@@ -414,7 +417,7 @@
mHardware->setPreviewWindow(mPreviewWindow);
result = mHardware->startPreview();
if (result == NO_ERROR) {
- mCameraService->updateProxyDeviceState(
+ sCameraService->updateProxyDeviceState(
ICameraServiceProxy::CAMERA_STATE_ACTIVE,
String8::format("%d", mCameraId));
}
@@ -440,7 +443,7 @@
// start recording mode
enableMsgType(CAMERA_MSG_VIDEO_FRAME);
- mCameraService->playSound(CameraService::SOUND_RECORDING_START);
+ sCameraService->playSound(CameraService::SOUND_RECORDING_START);
result = mHardware->startRecording();
if (result != NO_ERROR) {
ALOGE("mHardware->startRecording() failed with status %d", result);
@@ -457,7 +460,7 @@
disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
mHardware->stopPreview();
- mCameraService->updateProxyDeviceState(
+ sCameraService->updateProxyDeviceState(
ICameraServiceProxy::CAMERA_STATE_IDLE,
String8::format("%d", mCameraId));
mPreviewBuffer.clear();
@@ -466,14 +469,23 @@
// stop recording mode
void CameraClient::stopRecording() {
LOG1("stopRecording (pid %d)", getCallingPid());
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) return;
+ {
+ Mutex::Autolock lock(mLock);
+ if (checkPidAndHardware() != NO_ERROR) return;
- disableMsgType(CAMERA_MSG_VIDEO_FRAME);
- mHardware->stopRecording();
- mCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
+ disableMsgType(CAMERA_MSG_VIDEO_FRAME);
+ mHardware->stopRecording();
+ sCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
- mPreviewBuffer.clear();
+ mPreviewBuffer.clear();
+ }
+
+ {
+ Mutex::Autolock l(mAvailableCallbackBuffersLock);
+ if (!mAvailableCallbackBuffers.empty()) {
+ mAvailableCallbackBuffers.clear();
+ }
+ }
}
// release a recording frame
@@ -522,6 +534,50 @@
mHardware->releaseRecordingFrame(dataPtr);
}
+void CameraClient::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+ size_t n = handles.size();
+ std::vector<sp<IMemory>> frames;
+ frames.reserve(n);
+ bool error = false;
+ for (auto& handle : handles) {
+ sp<IMemory> dataPtr;
+ {
+ Mutex::Autolock l(mAvailableCallbackBuffersLock);
+ if (!mAvailableCallbackBuffers.empty()) {
+ dataPtr = mAvailableCallbackBuffers.back();
+ mAvailableCallbackBuffers.pop_back();
+ }
+ }
+
+ if (dataPtr == nullptr) {
+ ALOGE("%s: %d: No callback buffer available. Dropping frames.", __FUNCTION__,
+ __LINE__);
+ error = true;
+ break;
+ } else if (dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
+ ALOGE("%s: %d: Callback buffer must be VideoNativeHandleMetadata", __FUNCTION__,
+ __LINE__);
+ error = true;
+ break;
+ }
+
+ VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->pointer());
+ metadata->eType = kMetadataBufferTypeNativeHandleSource;
+ metadata->pHandle = handle;
+ frames.push_back(dataPtr);
+ }
+
+ if (error) {
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ } else {
+ mHardware->releaseRecordingFrameBatch(frames);
+ }
+ return;
+}
+
status_t CameraClient::setVideoBufferMode(int32_t videoBufferMode) {
LOG1("setVideoBufferMode: %d", videoBufferMode);
bool enableMetadataInBuffers = false;
@@ -697,7 +753,7 @@
}
return OK;
} else if (cmd == CAMERA_CMD_PLAY_RECORDING_SOUND) {
- mCameraService->playSound(CameraService::SOUND_RECORDING_START);
+ sCameraService->playSound(CameraService::SOUND_RECORDING_START);
} else if (cmd == CAMERA_CMD_SET_VIDEO_BUFFER_COUNT) {
// Silently ignore this command
return INVALID_OPERATION;
@@ -748,6 +804,16 @@
return false;
}
+sp<CameraClient> CameraClient::getClientFromCookie(void* user) {
+ String8 cameraId = String8::format("%d", (int)(intptr_t) user);
+ auto clientDescriptor = sCameraService->mActiveClientManager.get(cameraId);
+ if (clientDescriptor != nullptr) {
+ return sp<CameraClient>{
+ static_cast<CameraClient*>(clientDescriptor->getValue().get())};
+ }
+ return sp<CameraClient>{nullptr};
+}
+
// Callback messages can be dispatched to internal handlers or pass to our
// client's callback functions, depending on the message type.
//
@@ -767,7 +833,7 @@
int32_t ext2, void* user) {
LOG2("notifyCallback(%d)", msgType);
- sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+ sp<CameraClient> client = getClientFromCookie(user);
if (client.get() == nullptr) return;
if (!client->lockIfMessageWanted(msgType)) return;
@@ -787,7 +853,7 @@
const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user) {
LOG2("dataCallback(%d)", msgType);
- sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+ sp<CameraClient> client = getClientFromCookie(user);
if (client.get() == nullptr) return;
if (!client->lockIfMessageWanted(msgType)) return;
@@ -820,7 +886,7 @@
int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
LOG2("dataCallbackTimestamp(%d)", msgType);
- sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+ sp<CameraClient> client = getClientFromCookie(user);
if (client.get() == nullptr) return;
if (!client->lockIfMessageWanted(msgType)) return;
@@ -834,10 +900,53 @@
client->handleGenericDataTimestamp(timestamp, msgType, dataPtr);
}
+void CameraClient::handleCallbackTimestampBatch(
+ int32_t msgType, const std::vector<HandleTimestampMessage>& msgs, void* user) {
+ LOG2("dataCallbackTimestampBatch");
+ sp<CameraClient> client = getClientFromCookie(user);
+ if (client.get() == nullptr) return;
+ if (!client->lockIfMessageWanted(msgType)) return;
+
+ sp<hardware::ICameraClient> c = client->mRemoteCallback;
+ client->mLock.unlock();
+ if (c != 0 && msgs.size() > 0) {
+ size_t n = msgs.size();
+ std::vector<nsecs_t> timestamps;
+ std::vector<native_handle_t*> handles;
+ timestamps.reserve(n);
+ handles.reserve(n);
+ for (auto& msg : msgs) {
+ native_handle_t* handle = nullptr;
+ if (msg.dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
+ ALOGE("%s: dataPtr does not contain VideoNativeHandleMetadata!", __FUNCTION__);
+ return;
+ }
+ VideoNativeHandleMetadata *metadata =
+ (VideoNativeHandleMetadata*)(msg.dataPtr->pointer());
+ if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
+ handle = metadata->pHandle;
+ }
+
+ if (handle == nullptr) {
+ ALOGE("%s: VideoNativeHandleMetadata type mismatch or null handle passed!",
+ __FUNCTION__);
+ return;
+ }
+ {
+ Mutex::Autolock l(client->mAvailableCallbackBuffersLock);
+ client->mAvailableCallbackBuffers.push_back(msg.dataPtr);
+ }
+ timestamps.push_back(msg.timestamp);
+ handles.push_back(handle);
+ }
+ c->recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+ }
+}
+
// snapshot taken callback
void CameraClient::handleShutter(void) {
if (mPlayShutterSound) {
- mCameraService->playSound(CameraService::SOUND_SHUTTER);
+ sCameraService->playSound(CameraService::SOUND_SHUTTER);
}
sp<hardware::ICameraClient> c = mRemoteCallback;
@@ -850,7 +959,7 @@
// Shutters only happen in response to takePicture, so mark device as
// idle now, until preview is restarted
- mCameraService->updateProxyDeviceState(
+ sCameraService->updateProxyDeviceState(
ICameraServiceProxy::CAMERA_STATE_IDLE,
String8::format("%d", mCameraId));
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 4f46fc4..7f93fef 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -50,6 +50,8 @@
virtual bool recordingEnabled();
virtual void releaseRecordingFrame(const sp<IMemory>& mem);
virtual void releaseRecordingFrameHandle(native_handle_t *handle);
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles);
virtual status_t autoFocus();
virtual status_t cancelAutoFocus();
virtual status_t takePicture(int msgType);
@@ -70,7 +72,7 @@
bool legacyMode = false);
~CameraClient();
- status_t initialize(CameraModule *module);
+ virtual status_t initialize(sp<CameraProviderManager> manager) override;
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -98,11 +100,15 @@
// internal function used by sendCommand to enable/disable shutter sound.
status_t enableShutterSound(bool enable);
+ static sp<CameraClient> getClientFromCookie(void* user);
+
// these are static callback functions
static void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2, void* user);
static void dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata, void* user);
static void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr, void* user);
+ static void handleCallbackTimestampBatch(
+ int32_t msgType, const std::vector<HandleTimestampMessage>&, void* user);
// handlers for messages
void handleShutter(void);
void handlePreviewData(int32_t msgType, const sp<IMemory>& mem,
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 394eb4c..6e21126 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -41,11 +41,7 @@
{
SharedParameters::Lock l(client->getParameters());
- if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
- mUsePartialResult = (mNumPartialResults > 1);
- } else {
- mUsePartialResult = l.mParameters.quirks.partialResults;
- }
+ mUsePartialResult = (mNumPartialResults > 1);
// Initialize starting 3A state
m3aState.afTriggerId = l.mParameters.afTriggerCounter;
@@ -76,16 +72,7 @@
bool isPartialResult = false;
if (mUsePartialResult) {
- if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
- isPartialResult = frame.mResultExtras.partialResultCount < mNumPartialResults;
- } else {
- camera_metadata_entry_t entry;
- entry = frame.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
- if (entry.count > 0 &&
- entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
- isPartialResult = true;
- }
- }
+ isPartialResult = frame.mResultExtras.partialResultCount < mNumPartialResults;
}
if (!isPartialResult && processFaceDetect(frame.mMetadata, client) != OK) {
@@ -291,16 +278,8 @@
gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
&pendingState.awbState, frameNumber, cameraId);
- if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
- pendingState.afTriggerId = frame.mResultExtras.afTriggerId;
- pendingState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
- } else {
- gotAllStates &= updatePendingState<int32_t>(metadata,
- ANDROID_CONTROL_AF_TRIGGER_ID, &pendingState.afTriggerId, frameNumber, cameraId);
-
- gotAllStates &= updatePendingState<int32_t>(metadata,
- ANDROID_CONTROL_AE_PRECAPTURE_ID, &pendingState.aeTriggerId, frameNumber, cameraId);
- }
+ pendingState.afTriggerId = frame.mResultExtras.afTriggerId;
+ pendingState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
if (!gotAllStates) {
// If not all states are received, put the pending state to mPending3AStates.
@@ -367,9 +346,12 @@
entry = result.find(tag);
if (entry.count == 0) {
+ const camera_metadata *metaBuffer = result.getAndLock();
ALOGV("%s: Camera %d: No %s provided by HAL for frame %d in this result!",
__FUNCTION__, cameraId,
- get_camera_metadata_tag_name(tag), frameNumber);
+ get_local_camera_metadata_tag_name(tag, metaBuffer),
+ frameNumber);
+ result.unlock(metaBuffer);
return false;
} else {
switch(sizeof(Src)){
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index ae6abb7..1addcdd 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -798,16 +798,38 @@
exposureCompensationStep.data.r[0].denominator);
autoExposureLock = false;
- params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK,
- CameraParameters::FALSE);
- params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
- CameraParameters::TRUE);
+ autoExposureLockAvailable = false;
+ camera_metadata_ro_entry_t exposureLockAvailable =
+ staticInfo(ANDROID_CONTROL_AE_LOCK_AVAILABLE, 1, 1);
+ if ((0 < exposureLockAvailable.count) &&
+ (ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE ==
+ exposureLockAvailable.data.u8[0])) {
+ params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK,
+ CameraParameters::FALSE);
+ params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
+ CameraParameters::TRUE);
+ autoExposureLockAvailable = true;
+ } else {
+ params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
+ CameraParameters::FALSE);
+ }
autoWhiteBalanceLock = false;
- params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK,
- CameraParameters::FALSE);
- params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
- CameraParameters::TRUE);
+ autoWhiteBalanceLockAvailable = false;
+ camera_metadata_ro_entry_t whitebalanceLockAvailable =
+ staticInfo(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, 1, 1);
+ if ((0 < whitebalanceLockAvailable.count) &&
+ (ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE ==
+ whitebalanceLockAvailable.data.u8[0])) {
+ params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK,
+ CameraParameters::FALSE);
+ params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
+ CameraParameters::TRUE);
+ autoWhiteBalanceLockAvailable = true;
+ } else {
+ params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
+ CameraParameters::FALSE);
+ }
meteringAreas.add(Parameters::Area(0, 0, 0, 0, 0));
params.set(CameraParameters::KEY_MAX_NUM_METERING_AREAS,
@@ -816,30 +838,37 @@
"(0,0,0,0,0)");
zoom = 0;
- params.set(CameraParameters::KEY_ZOOM, zoom);
- params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
-
+ zoomAvailable = false;
camera_metadata_ro_entry_t maxDigitalZoom =
staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, /*minCount*/1, /*maxCount*/1);
if (!maxDigitalZoom.count) return NO_INIT;
- {
- String8 zoomRatios;
- float zoom = 1.f;
- float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
- (NUM_ZOOM_STEPS-1);
- bool addComma = false;
- for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
- if (addComma) zoomRatios += ",";
- addComma = true;
- zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
- zoom += zoomIncrement;
- }
- params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
- }
+ if (fabs(maxDigitalZoom.data.f[0] - 1.f) > 0.00001f) {
+ params.set(CameraParameters::KEY_ZOOM, zoom);
+ params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
- params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
- CameraParameters::TRUE);
+ {
+ String8 zoomRatios;
+ float zoom = 1.f;
+ float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
+ (NUM_ZOOM_STEPS-1);
+ bool addComma = false;
+ for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
+ if (addComma) zoomRatios += ",";
+ addComma = true;
+ zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
+ zoom += zoomIncrement;
+ }
+ params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+ }
+
+ params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+ CameraParameters::TRUE);
+ zoomAvailable = true;
+ } else {
+ params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+ CameraParameters::FALSE);
+ }
params.set(CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED,
CameraParameters::FALSE);
@@ -911,13 +940,25 @@
CameraParameters::FALSE);
}
- char value[PROPERTY_VALUE_MAX];
- property_get("camera.disable_zsl_mode", value, "0");
- if (!strcmp(value,"1") || slowJpegMode) {
+ isZslReprocessPresent = false;
+ camera_metadata_ro_entry_t availableCapabilities =
+ staticInfo(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ if (0 < availableCapabilities.count) {
+ const uint8_t *caps = availableCapabilities.data.u8;
+ for (size_t i = 0; i < availableCapabilities.count; i++) {
+ if (ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING ==
+ caps[i]) {
+ isZslReprocessPresent = true;
+ break;
+ }
+ }
+ }
+
+ if (slowJpegMode || property_get_bool("camera.disable_zsl_mode", false)) {
ALOGI("Camera %d: Disabling ZSL mode", cameraId);
allowZslMode = false;
} else {
- allowZslMode = true;
+ allowZslMode = isZslReprocessPresent;
}
ALOGI("%s: allowZslMode: %d slowJpegMode %d", __FUNCTION__, allowZslMode, slowJpegMode);
@@ -958,7 +999,7 @@
return NO_INIT;
}
- // Get supported preview fps ranges.
+ // Get supported preview fps ranges, up to default maximum.
Vector<Size> supportedPreviewSizes;
Vector<FpsRange> supportedPreviewFpsRanges;
const Size PREVIEW_SIZE_BOUND = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
@@ -966,7 +1007,8 @@
if (res != OK) return res;
for (size_t i=0; i < availableFpsRanges.count; i += 2) {
if (!isFpsSupported(supportedPreviewSizes,
- HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, availableFpsRanges.data.i32[i+1])) {
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, availableFpsRanges.data.i32[i+1]) ||
+ availableFpsRanges.data.i32[i+1] > MAX_DEFAULT_FPS) {
continue;
}
FpsRange fpsRange = {availableFpsRanges.data.i32[i], availableFpsRanges.data.i32[i+1]};
@@ -1186,11 +1228,14 @@
camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag,
size_t minCount, size_t maxCount, bool required) const {
camera_metadata_ro_entry_t entry = info->find(tag);
+ const camera_metadata_t *metaBuffer = info->getAndLock();
if (CC_UNLIKELY( entry.count == 0 ) && required) {
- const char* tagSection = get_camera_metadata_section_name(tag);
+ const char* tagSection = get_local_camera_metadata_section_name(tag,
+ metaBuffer);
if (tagSection == NULL) tagSection = "<unknown>";
- const char* tagName = get_camera_metadata_tag_name(tag);
+ const char* tagName = get_local_camera_metadata_tag_name(tag,
+ metaBuffer);
if (tagName == NULL) tagName = "<unknown>";
ALOGE("Error finding static metadata entry '%s.%s' (%x)",
@@ -1198,14 +1243,17 @@
} else if (CC_UNLIKELY(
(minCount != 0 && entry.count < minCount) ||
(maxCount != 0 && entry.count > maxCount) ) ) {
- const char* tagSection = get_camera_metadata_section_name(tag);
+ const char* tagSection = get_local_camera_metadata_section_name(tag,
+ metaBuffer);
if (tagSection == NULL) tagSection = "<unknown>";
- const char* tagName = get_camera_metadata_tag_name(tag);
+ const char* tagName = get_local_camera_metadata_tag_name(tag,
+ metaBuffer);
if (tagName == NULL) tagName = "<unknown>";
ALOGE("Malformed static metadata entry '%s.%s' (%x):"
"Expected between %zu and %zu values, but got %zu values",
tagSection, tagName, tag, minCount, maxCount, entry.count);
}
+ info->unlock(metaBuffer);
return entry;
}
@@ -1389,30 +1437,43 @@
*
* Either way, in case of multiple ranges, break the tie by
* selecting the smaller range.
+ *
+ * Always select range within 30fps if one exists.
*/
// all ranges which have previewFps
Vector<Range> candidateRanges;
+ Vector<Range> candidateFastRanges;
for (i = 0; i < availableFrameRates.count; i+=2) {
Range r = {
availableFrameRates.data.i32[i],
availableFrameRates.data.i32[i+1]
};
+ if (!isFpsSupported(availablePreviewSizes,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, r.max)) {
+ continue;
+ }
if (r.min <= previewFps && previewFps <= r.max) {
- candidateRanges.push(r);
+ if (r.max <= MAX_DEFAULT_FPS) {
+ candidateRanges.push(r);
+ } else {
+ candidateFastRanges.push(r);
+ }
}
}
- if (candidateRanges.isEmpty()) {
+ if (candidateRanges.isEmpty() && candidateFastRanges.isEmpty()) {
ALOGE("%s: Requested preview frame rate %d is not supported",
__FUNCTION__, previewFps);
return BAD_VALUE;
}
- // most applicable range with targetFps
- Range bestRange = candidateRanges[0];
- for (i = 1; i < candidateRanges.size(); ++i) {
- Range r = candidateRanges[i];
+ // most applicable range with targetFps
+ Vector<Range>& ranges =
+ candidateRanges.size() > 0 ? candidateRanges : candidateFastRanges;
+ Range bestRange = ranges[0];
+ for (i = 1; i < ranges.size(); ++i) {
+ Range r = ranges[i];
// Find by largest minIndex in recording mode
if (validatedParams.recordingHint) {
if (r.min > bestRange.min) {
@@ -1818,13 +1879,25 @@
return BAD_VALUE;
}
- // AUTO_EXPOSURE_LOCK (always supported)
- validatedParams.autoExposureLock = boolFromString(
- newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK));
+ if (autoExposureLockAvailable) {
+ validatedParams.autoExposureLock = boolFromString(
+ newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK));
+ } else if (nullptr !=
+ newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK)){
+ ALOGE("%s: Requested auto exposure lock is not supported",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
- // AUTO_WHITEBALANCE_LOCK (always supported)
- validatedParams.autoWhiteBalanceLock = boolFromString(
- newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
+ if (autoWhiteBalanceLockAvailable) {
+ validatedParams.autoWhiteBalanceLock = boolFromString(
+ newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
+ } else if (nullptr !=
+ newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK)) {
+ ALOGE("%s: Requested auto whitebalance lock is not supported",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
// METERING_AREAS
size_t maxAeRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
@@ -1844,12 +1917,14 @@
}
// ZOOM
- validatedParams.zoom = newParams.getInt(CameraParameters::KEY_ZOOM);
- if (validatedParams.zoom < 0
- || validatedParams.zoom >= (int)NUM_ZOOM_STEPS) {
- ALOGE("%s: Requested zoom level %d is not supported",
- __FUNCTION__, validatedParams.zoom);
- return BAD_VALUE;
+ if (zoomAvailable) {
+ validatedParams.zoom = newParams.getInt(CameraParameters::KEY_ZOOM);
+ if (validatedParams.zoom < 0
+ || validatedParams.zoom >= (int)NUM_ZOOM_STEPS) {
+ ALOGE("%s: Requested zoom level %d is not supported",
+ __FUNCTION__, validatedParams.zoom);
+ return BAD_VALUE;
+ }
}
// VIDEO_SIZE
@@ -1916,6 +1991,19 @@
paramsFlattened = newParams.flatten();
params = newParams;
+ slowJpegMode = false;
+ Size pictureSize = { pictureWidth, pictureHeight };
+ int64_t minFrameDurationNs = getJpegStreamMinFrameDurationNs(pictureSize);
+ if (previewFpsRange[1] > 1e9/minFrameDurationNs + FPS_MARGIN) {
+ slowJpegMode = true;
+ }
+ if (slowJpegMode || property_get_bool("camera.disable_zsl_mode", false)) {
+ allowZslMode = false;
+ } else {
+ allowZslMode = isZslReprocessPresent;
+ }
+ ALOGV("%s: allowZslMode: %d slowJpegMode %d", __FUNCTION__, allowZslMode, slowJpegMode);
+
return OK;
}
@@ -1970,10 +2058,12 @@
}
if (res != OK) return res;
- uint8_t reqWbLock = autoWhiteBalanceLock ?
- ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
- res = request->update(ANDROID_CONTROL_AWB_LOCK,
- &reqWbLock, 1);
+ if (autoWhiteBalanceLockAvailable) {
+ uint8_t reqWbLock = autoWhiteBalanceLock ?
+ ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
+ res = request->update(ANDROID_CONTROL_AWB_LOCK,
+ &reqWbLock, 1);
+ }
res = request->update(ANDROID_CONTROL_EFFECT_MODE,
&effectMode, 1);
@@ -2031,11 +2121,13 @@
&reqAeMode, 1);
if (res != OK) return res;
- uint8_t reqAeLock = autoExposureLock ?
- ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
- res = request->update(ANDROID_CONTROL_AE_LOCK,
- &reqAeLock, 1);
- if (res != OK) return res;
+ if (autoExposureLockAvailable) {
+ uint8_t reqAeLock = autoExposureLock ?
+ ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
+ res = request->update(ANDROID_CONTROL_AE_LOCK,
+ &reqAeLock, 1);
+ if (res != OK) return res;
+ }
res = request->update(ANDROID_CONTROL_AWB_MODE,
&wbMode, 1);
@@ -2770,32 +2862,14 @@
}
sizes->clear();
- if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- Vector<StreamConfiguration> scs = getStreamConfigurations();
- for (size_t i=0; i < scs.size(); i++) {
- const StreamConfiguration &sc = scs[i];
- if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
- sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
- sc.width <= limit.width && sc.height <= limit.height) {
- Size sz = {sc.width, sc.height};
- sizes->push(sz);
- }
- }
- } else {
- const size_t SIZE_COUNT = sizeof(Size) / sizeof(int);
- camera_metadata_ro_entry_t availableProcessedSizes =
- staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT);
- if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE;
-
- Size filteredSize;
- for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) {
- filteredSize.width = availableProcessedSizes.data.i32[i];
- filteredSize.height = availableProcessedSizes.data.i32[i+1];
- // Need skip the preview sizes that are too large.
- if (filteredSize.width <= limit.width &&
- filteredSize.height <= limit.height) {
- sizes->push(filteredSize);
- }
+ Vector<StreamConfiguration> scs = getStreamConfigurations();
+ for (size_t i=0; i < scs.size(); i++) {
+ const StreamConfiguration &sc = scs[i];
+ if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+ sc.width <= limit.width && sc.height <= limit.height) {
+ Size sz = {sc.width, sc.height};
+ sizes->push(sz);
}
}
@@ -2850,10 +2924,6 @@
const int STREAM_HEIGHT_OFFSET = 2;
const int STREAM_IS_INPUT_OFFSET = 3;
Vector<StreamConfiguration> scs;
- if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
- ALOGE("StreamConfiguration is only valid after device HAL 3.2!");
- return scs;
- }
camera_metadata_ro_entry_t availableStreamConfigs =
staticInfo(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
@@ -2869,37 +2939,10 @@
}
int64_t Parameters::getJpegStreamMinFrameDurationNs(Parameters::Size size) {
- if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- return getMinFrameDurationNs(size, HAL_PIXEL_FORMAT_BLOB);
- } else {
- Vector<Size> availableJpegSizes = getAvailableJpegSizes();
- size_t streamIdx = availableJpegSizes.size();
- for (size_t i = 0; i < availableJpegSizes.size(); i++) {
- if (availableJpegSizes[i].width == size.width &&
- availableJpegSizes[i].height == size.height) {
- streamIdx = i;
- break;
- }
- }
- if (streamIdx != availableJpegSizes.size()) {
- camera_metadata_ro_entry_t jpegMinDurations =
- staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS);
- if (streamIdx < jpegMinDurations.count) {
- return jpegMinDurations.data.i64[streamIdx];
- }
- }
- }
- ALOGE("%s: cannot find min frame duration for jpeg size %dx%d",
- __FUNCTION__, size.width, size.height);
- return -1;
+ return getMinFrameDurationNs(size, HAL_PIXEL_FORMAT_BLOB);
}
int64_t Parameters::getMinFrameDurationNs(Parameters::Size size, int fmt) {
- if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
- ALOGE("Min frame duration for HAL 3.1 or lower is not supported");
- return -1;
- }
-
const int STREAM_DURATION_SIZE = 4;
const int STREAM_FORMAT_OFFSET = 0;
const int STREAM_WIDTH_OFFSET = 1;
@@ -2921,13 +2964,7 @@
}
bool Parameters::isFpsSupported(const Vector<Size> &sizes, int format, int32_t fps) {
- // Skip the check for older HAL version, as the min duration is not supported.
- if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
- return true;
- }
-
// Get min frame duration for each size and check if the given fps range can be supported.
- const int32_t FPS_MARGIN = 1;
for (size_t i = 0 ; i < sizes.size(); i++) {
int64_t minFrameDuration = getMinFrameDurationNs(sizes[i], format);
if (minFrameDuration <= 0) {
@@ -2947,48 +2984,29 @@
SortedVector<int32_t> Parameters::getAvailableOutputFormats() {
SortedVector<int32_t> outputFormats; // Non-duplicated output formats
- if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- Vector<StreamConfiguration> scs = getStreamConfigurations();
- for (size_t i = 0; i < scs.size(); i++) {
- const StreamConfiguration &sc = scs[i];
- if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT) {
- outputFormats.add(sc.format);
- }
- }
- } else {
- camera_metadata_ro_entry_t availableFormats = staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
- for (size_t i = 0; i < availableFormats.count; i++) {
- outputFormats.add(availableFormats.data.i32[i]);
+ Vector<StreamConfiguration> scs = getStreamConfigurations();
+ for (size_t i = 0; i < scs.size(); i++) {
+ const StreamConfiguration &sc = scs[i];
+ if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT) {
+ outputFormats.add(sc.format);
}
}
+
return outputFormats;
}
Vector<Parameters::Size> Parameters::getAvailableJpegSizes() {
Vector<Parameters::Size> jpegSizes;
- if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- Vector<StreamConfiguration> scs = getStreamConfigurations();
- for (size_t i = 0; i < scs.size(); i++) {
- const StreamConfiguration &sc = scs[i];
- if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
- sc.format == HAL_PIXEL_FORMAT_BLOB) {
- Size sz = {sc.width, sc.height};
- jpegSizes.add(sz);
- }
- }
- } else {
- const int JPEG_SIZE_ENTRY_COUNT = 2;
- const int WIDTH_OFFSET = 0;
- const int HEIGHT_OFFSET = 1;
- camera_metadata_ro_entry_t availableJpegSizes =
- staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
- for (size_t i = 0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) {
- int width = availableJpegSizes.data.i32[i + WIDTH_OFFSET];
- int height = availableJpegSizes.data.i32[i + HEIGHT_OFFSET];
- Size sz = {width, height};
+ Vector<StreamConfiguration> scs = getStreamConfigurations();
+ for (size_t i = 0; i < scs.size(); i++) {
+ const StreamConfiguration &sc = scs[i];
+ if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ sc.format == HAL_PIXEL_FORMAT_BLOB) {
+ Size sz = {sc.width, sc.height};
jpegSizes.add(sz);
}
}
+
return jpegSizes;
}
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index c8ecbba..bea867a 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -122,7 +122,9 @@
int32_t exposureCompensation;
bool autoExposureLock;
+ bool autoExposureLockAvailable;
bool autoWhiteBalanceLock;
+ bool autoWhiteBalanceLockAvailable;
// 3A region types, for use with ANDROID_CONTROL_MAX_REGIONS
enum region_t {
@@ -135,6 +137,7 @@
Vector<Area> meteringAreas;
int zoom;
+ bool zoomAvailable;
int videoWidth, videoHeight, videoFormat;
android_dataspace videoDataSpace;
@@ -170,6 +173,8 @@
// Whether the jpeg stream is slower than 30FPS and can slow down preview.
// When slowJpegMode is true, allowZslMode must be false to avoid slowing down preview.
bool slowJpegMode;
+ // Whether ZSL reprocess is supported by the device.
+ bool isZslReprocessPresent;
// Overall camera state
enum State {
@@ -196,6 +201,10 @@
static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.001;
// Threshold for slow jpeg mode
static const int64_t kSlowJpegModeThreshold = 33400000LL; // 33.4 ms
+ // Margin for checking FPS
+ static const int32_t FPS_MARGIN = 1;
+ // Max FPS for default parameters
+ static const int32_t MAX_DEFAULT_FPS = 30;
// Full static camera info, object owned by someone else, such as
// Camera2Device.
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index bf92a2b..d79e430 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -114,16 +114,11 @@
}
// Use CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG for ZSL streaming case.
- if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_0) {
- if (params.useZeroShutterLag() && !params.recordingHint) {
- res = device->createDefaultRequest(CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG,
- &mPreviewRequest);
- } else {
- res = device->createDefaultRequest(CAMERA3_TEMPLATE_PREVIEW,
- &mPreviewRequest);
- }
+ if (params.useZeroShutterLag() && !params.recordingHint) {
+ res = device->createDefaultRequest(
+ CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG, &mPreviewRequest);
} else {
- res = device->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
+ res = device->createDefaultRequest(CAMERA3_TEMPLATE_PREVIEW,
&mPreviewRequest);
}
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index b127472..9bc31b9 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -37,9 +37,91 @@
#include "api1/client2/ZslProcessor.h"
#include "device3/Camera3Device.h"
+typedef android::RingBufferConsumer::PinnedBufferItem PinnedBufferItem;
+
namespace android {
namespace camera2 {
+namespace {
+struct TimestampFinder : public RingBufferConsumer::RingBufferComparator {
+ typedef RingBufferConsumer::BufferInfo BufferInfo;
+
+ enum {
+ SELECT_I1 = -1,
+ SELECT_I2 = 1,
+ SELECT_NEITHER = 0,
+ };
+
+ explicit TimestampFinder(nsecs_t timestamp) : mTimestamp(timestamp) {}
+ ~TimestampFinder() {}
+
+ template <typename T>
+ static void swap(T& a, T& b) {
+ T tmp = a;
+ a = b;
+ b = tmp;
+ }
+
+ /**
+ * Try to find the best candidate for a ZSL buffer.
+ * Match priority from best to worst:
+ * 1) Timestamps match.
+ * 2) Timestamp is closest to the needle (and lower).
+ * 3) Timestamp is closest to the needle (and higher).
+ *
+ */
+ virtual int compare(const BufferInfo *i1,
+ const BufferInfo *i2) const {
+ // Try to select non-null object first.
+ if (i1 == NULL) {
+ return SELECT_I2;
+ } else if (i2 == NULL) {
+ return SELECT_I1;
+ }
+
+ // Best result: timestamp is identical
+ if (i1->mTimestamp == mTimestamp) {
+ return SELECT_I1;
+ } else if (i2->mTimestamp == mTimestamp) {
+ return SELECT_I2;
+ }
+
+ const BufferInfo* infoPtrs[2] = {
+ i1,
+ i2
+ };
+ int infoSelectors[2] = {
+ SELECT_I1,
+ SELECT_I2
+ };
+
+ // Order i1,i2 so that always i1.timestamp < i2.timestamp
+ if (i1->mTimestamp > i2->mTimestamp) {
+ swap(infoPtrs[0], infoPtrs[1]);
+ swap(infoSelectors[0], infoSelectors[1]);
+ }
+
+ // Second best: closest (lower) timestamp
+ if (infoPtrs[1]->mTimestamp < mTimestamp) {
+ return infoSelectors[1];
+ } else if (infoPtrs[0]->mTimestamp < mTimestamp) {
+ return infoSelectors[0];
+ }
+
+ // Worst: closest (higher) timestamp
+ return infoSelectors[0];
+
+ /**
+ * The above cases should cover all the possibilities,
+ * and we get an 'empty' result only if the ring buffer
+ * was empty itself
+ */
+ }
+
+ const nsecs_t mTimestamp;
+}; // struct TimestampFinder
+} // namespace anonymous
+
ZslProcessor::ZslProcessor(
sp<Camera2Client> client,
wp<CaptureSequencer> sequencer):
@@ -50,8 +132,14 @@
mSequencer(sequencer),
mId(client->getCameraId()),
mZslStreamId(NO_STREAM),
+ mInputStreamId(NO_STREAM),
mFrameListHead(0),
- mHasFocuser(false) {
+ mHasFocuser(false),
+ mInputBuffer(nullptr),
+ mProducer(nullptr),
+ mInputProducer(nullptr),
+ mInputProducerSlot(-1),
+ mBuffersToDetach(0) {
// Initialize buffer queue and frame list based on pipeline max depth.
size_t pipelineMaxDepth = kDefaultMaxPipelineDepth;
if (client != 0) {
@@ -83,7 +171,6 @@
mFrameListDepth = pipelineMaxDepth;
mBufferQueueDepth = mFrameListDepth + 1;
-
mZslQueue.insertAt(0, mBufferQueueDepth);
mFrameList.insertAt(0, mFrameListDepth);
sp<CaptureSequencer> captureSequencer = mSequencer.promote();
@@ -144,7 +231,7 @@
return INVALID_OPERATION;
}
- if (mZslStreamId != NO_STREAM) {
+ if ((mZslStreamId != NO_STREAM) || (mInputStreamId != NO_STREAM)) {
// Check if stream parameters have to change
uint32_t currentWidth, currentHeight;
res = device->getStreamInfo(mZslStreamId,
@@ -157,21 +244,57 @@
}
if (currentWidth != (uint32_t)params.fastInfo.arrayWidth ||
currentHeight != (uint32_t)params.fastInfo.arrayHeight) {
- ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
- "dimensions changed",
- __FUNCTION__, client->getCameraId(), mZslStreamId);
- res = device->deleteStream(mZslStreamId);
- if (res == -EBUSY) {
- ALOGV("%s: Camera %d: Device is busy, call updateStream again "
- " after it becomes idle", __FUNCTION__, mId);
- return res;
- } else if(res != OK) {
- ALOGE("%s: Camera %d: Unable to delete old output stream "
- "for ZSL: %s (%d)", __FUNCTION__,
- client->getCameraId(), strerror(-res), res);
- return res;
+ if (mZslStreamId != NO_STREAM) {
+ ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
+ "dimensions changed",
+ __FUNCTION__, client->getCameraId(), mZslStreamId);
+ res = device->deleteStream(mZslStreamId);
+ if (res == -EBUSY) {
+ ALOGV("%s: Camera %d: Device is busy, call updateStream again "
+ " after it becomes idle", __FUNCTION__, mId);
+ return res;
+ } else if(res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old output stream "
+ "for ZSL: %s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ mZslStreamId = NO_STREAM;
}
- mZslStreamId = NO_STREAM;
+
+ if (mInputStreamId != NO_STREAM) {
+ ALOGV("%s: Camera %d: Deleting stream %d since the buffer "
+ "dimensions changed",
+ __FUNCTION__, client->getCameraId(), mInputStreamId);
+ res = device->deleteStream(mInputStreamId);
+ if (res == -EBUSY) {
+ ALOGV("%s: Camera %d: Device is busy, call updateStream again "
+ " after it becomes idle", __FUNCTION__, mId);
+ return res;
+ } else if(res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old output stream "
+ "for ZSL: %s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ mInputStreamId = NO_STREAM;
+ }
+ if (nullptr != mInputProducer.get()) {
+ mInputProducer->disconnect(NATIVE_WINDOW_API_CPU);
+ mInputProducer.clear();
+ }
+ }
+ }
+
+ if (mInputStreamId == NO_STREAM) {
+ res = device->createInputStream(params.fastInfo.arrayWidth,
+ params.fastInfo.arrayHeight, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+ &mInputStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't create input stream: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
}
}
@@ -179,21 +302,23 @@
// Create stream for HAL production
// TODO: Sort out better way to select resolution for ZSL
- // Note that format specified internally in Camera3ZslStream
- res = device->createZslStream(
- params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
- mBufferQueueDepth,
- &mZslStreamId,
- &mZslStream);
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mProducer = new RingBufferConsumer(consumer, GRALLOC_USAGE_HW_CAMERA_ZSL,
+ mBufferQueueDepth);
+ mProducer->setName(String8("Camera2-ZslRingBufferConsumer"));
+ sp<Surface> outSurface = new Surface(producer);
+
+ res = device->createStream(outSurface, params.fastInfo.arrayWidth,
+ params.fastInfo.arrayHeight, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+ HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mZslStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create ZSL stream: "
"%s (%d)", __FUNCTION__, client->getCameraId(),
strerror(-res), res);
return res;
}
-
- // Only add the camera3 buffer listener when the stream is created.
- mZslStream->addBufferListener(this);
}
client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
@@ -207,23 +332,27 @@
status_t ZslProcessor::deleteStream() {
ATRACE_CALL();
status_t res;
+ sp<Camera3Device> device = nullptr;
+ sp<Camera2Client> client = nullptr;
Mutex::Autolock l(mInputMutex);
- if (mZslStreamId != NO_STREAM) {
- sp<Camera2Client> client = mClient.promote();
+ if ((mZslStreamId != NO_STREAM) || (mInputStreamId != NO_STREAM)) {
+ client = mClient.promote();
if (client == 0) {
ALOGE("%s: Camera %d: Client does not exist", __FUNCTION__, mId);
return INVALID_OPERATION;
}
- sp<Camera3Device> device =
+ device =
reinterpret_cast<Camera3Device*>(client->getCameraDevice().get());
if (device == 0) {
ALOGE("%s: Camera %d: Device does not exist", __FUNCTION__, mId);
return INVALID_OPERATION;
}
+ }
+ if (mZslStreamId != NO_STREAM) {
res = device->deleteStream(mZslStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Cannot delete ZSL output stream %d: "
@@ -234,6 +363,23 @@
mZslStreamId = NO_STREAM;
}
+ if (mInputStreamId != NO_STREAM) {
+ res = device->deleteStream(mInputStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Cannot delete input stream %d: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ mInputStreamId, strerror(-res), res);
+ return res;
+ }
+
+ mInputStreamId = NO_STREAM;
+ }
+
+ if (nullptr != mInputProducer.get()) {
+ mInputProducer->disconnect(NATIVE_WINDOW_API_CPU);
+ mInputProducer.clear();
+ }
+
return OK;
}
@@ -282,6 +428,50 @@
return OK;
}
+void ZslProcessor::notifyInputReleased() {
+ Mutex::Autolock l(mInputMutex);
+
+ mBuffersToDetach++;
+ mBuffersToDetachSignal.signal();
+}
+
+void ZslProcessor::doNotifyInputReleasedLocked() {
+ assert(nullptr != mInputBuffer.get());
+ assert(nullptr != mInputProducer.get());
+
+ sp<GraphicBuffer> gb;
+ sp<Fence> fence;
+ auto rc = mInputProducer->detachNextBuffer(&gb, &fence);
+ if (NO_ERROR != rc) {
+ ALOGE("%s: Failed to detach buffer from input producer: %d",
+ __FUNCTION__, rc);
+ return;
+ }
+
+ BufferItem &item = mInputBuffer->getBufferItem();
+ sp<GraphicBuffer> inputBuffer = item.mGraphicBuffer;
+ if (gb->handle != inputBuffer->handle) {
+ ALOGE("%s: Input mismatch, expected buffer %p received %p", __FUNCTION__,
+ inputBuffer->handle, gb->handle);
+ return;
+ }
+
+ mInputBuffer.clear();
+ ALOGV("%s: Memory optimization, clearing ZSL queue",
+ __FUNCTION__);
+ clearZslResultQueueLocked();
+
+ // Required so we accept more ZSL requests
+ mState = RUNNING;
+}
+
+void ZslProcessor::InputProducerListener::onBufferReleased() {
+ sp<ZslProcessor> parent = mParent.promote();
+ if (nullptr != parent.get()) {
+ parent->notifyInputReleased();
+ }
+}
+
status_t ZslProcessor::pushToReprocess(int32_t requestId) {
ALOGV("%s: Send in reprocess request with id %d",
__FUNCTION__, requestId);
@@ -302,15 +492,38 @@
nsecs_t candidateTimestamp = getCandidateTimestampLocked(&metadataIdx);
if (candidateTimestamp == -1) {
- ALOGE("%s: Could not find good candidate for ZSL reprocessing",
+ ALOGV("%s: Could not find good candidate for ZSL reprocessing",
__FUNCTION__);
return NOT_ENOUGH_DATA;
+ } else {
+ ALOGV("%s: Found good ZSL candidate idx: %u",
+ __FUNCTION__, (unsigned int) metadataIdx);
}
- res = mZslStream->enqueueInputBufferByTimestamp(candidateTimestamp,
- /*actualTimestamp*/NULL);
+ if (nullptr == mInputProducer.get()) {
+ res = client->getCameraDevice()->getInputBufferProducer(
+ &mInputProducer);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to retrieve input producer: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
- if (res == mZslStream->NO_BUFFER_AVAILABLE) {
+ IGraphicBufferProducer::QueueBufferOutput output;
+ res = mInputProducer->connect(new InputProducerListener(this),
+ NATIVE_WINDOW_API_CPU, false, &output);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to connect to input producer: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ res = enqueueInputBufferByTimestamp(candidateTimestamp,
+ /*actualTimestamp*/NULL);
+ if (res == NO_BUFFER_AVAILABLE) {
ALOGV("%s: No ZSL buffers yet", __FUNCTION__);
return NOT_ENOUGH_DATA;
} else if (res != OK) {
@@ -348,7 +561,7 @@
}
int32_t inputStreams[1] =
- { mZslStreamId };
+ { mInputStreamId };
res = request.update(ANDROID_REQUEST_INPUT_STREAMS,
inputStreams, 1);
if (res != OK) {
@@ -428,6 +641,70 @@
return OK;
}
+status_t ZslProcessor::enqueueInputBufferByTimestamp(
+ nsecs_t timestamp,
+ nsecs_t* actualTimestamp) {
+
+ TimestampFinder timestampFinder = TimestampFinder(timestamp);
+
+ mInputBuffer = mProducer->pinSelectedBuffer(timestampFinder,
+ /*waitForFence*/false);
+
+ if (nullptr == mInputBuffer.get()) {
+ ALOGE("%s: No ZSL buffers were available yet", __FUNCTION__);
+ return NO_BUFFER_AVAILABLE;
+ }
+
+ nsecs_t actual = mInputBuffer->getBufferItem().mTimestamp;
+
+ if (actual != timestamp) {
+ // TODO: This is problematic, the metadata queue timestamp should
+ // usually have a corresponding ZSL buffer with the same timestamp.
+ // If this is not the case, then it is possible that we will use
+ // a ZSL buffer from a different request, which can result in
+ // side effects during the reprocess pass.
+ ALOGW("%s: ZSL buffer candidate search didn't find an exact match --"
+ " requested timestamp = %" PRId64 ", actual timestamp = %" PRId64,
+ __FUNCTION__, timestamp, actual);
+ }
+
+ if (nullptr != actualTimestamp) {
+ *actualTimestamp = actual;
+ }
+
+ BufferItem &item = mInputBuffer->getBufferItem();
+ auto rc = mInputProducer->attachBuffer(&mInputProducerSlot,
+ item.mGraphicBuffer);
+ if (OK != rc) {
+ ALOGE("%s: Failed to attach input ZSL buffer to producer: %d",
+ __FUNCTION__, rc);
+ return rc;
+ }
+
+ IGraphicBufferProducer::QueueBufferOutput output;
+ IGraphicBufferProducer::QueueBufferInput input(item.mTimestamp,
+ item.mIsAutoTimestamp, item.mDataSpace, item.mCrop,
+ item.mScalingMode, item.mTransform, item.mFence);
+ rc = mInputProducer->queueBuffer(mInputProducerSlot, input, &output);
+ if (OK != rc) {
+ ALOGE("%s: Failed to queue ZSL buffer to producer: %d",
+ __FUNCTION__, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+status_t ZslProcessor::clearInputRingBufferLocked(nsecs_t* latestTimestamp) {
+
+ if (nullptr != latestTimestamp) {
+ *latestTimestamp = mProducer->getLatestTimestamp();
+ }
+ mInputBuffer.clear();
+
+ return mProducer->clear();
+}
+
status_t ZslProcessor::clearZslQueue() {
Mutex::Autolock l(mInputMutex);
// If in middle of capture, can't clear out queue
@@ -437,10 +714,10 @@
}
status_t ZslProcessor::clearZslQueueLocked() {
- if (mZslStream != 0) {
+ if (NO_STREAM != mZslStreamId) {
// clear result metadata list first.
clearZslResultQueueLocked();
- return mZslStream->clearInputRingBuffer(&mLatestClearedBufferTimestamp);
+ return clearInputRingBufferLocked(&mLatestClearedBufferTimestamp);
}
return OK;
}
@@ -465,9 +742,18 @@
}
bool ZslProcessor::threadLoop() {
- // TODO: remove dependency on thread. For now, shut thread down right
- // away.
- return false;
+ Mutex::Autolock l(mInputMutex);
+
+ if (mBuffersToDetach == 0) {
+ status_t res = mBuffersToDetachSignal.waitRelative(mInputMutex, kWaitDuration);
+ if (res == TIMED_OUT) return true;
+ }
+ while (mBuffersToDetach > 0) {
+ doNotifyInputReleasedLocked();
+ mBuffersToDetach--;
+ }
+
+ return true;
}
void ZslProcessor::dumpZslQueue(int fd) const {
@@ -630,46 +916,5 @@
return minTimestamp;
}
-void ZslProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
- // Intentionally left empty
- // Although theoretically we could use this to get better dump info
-}
-
-void ZslProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
-
- // ignore output buffers
- if (bufferInfo.mOutput) {
- return;
- }
-
- // Lock mutex only once we know this is an input buffer returned to avoid
- // potential deadlock
- Mutex::Autolock l(mInputMutex);
- // TODO: Verify that the buffer is in our queue by looking at timestamp
- // theoretically unnecessary unless we change the following assumptions:
- // -- only 1 buffer reprocessed at a time (which is the case now)
-
- // Erase entire ZSL queue since we've now completed the capture and preview
- // is stopped.
- //
- // We need to guarantee that if we do two back-to-back captures,
- // the second won't use a buffer that's older/the same as the first, which
- // is theoretically possible if we don't clear out the queue and the
- // selection criteria is something like 'newest'. Clearing out the result
- // metadata queue on a completed capture ensures we'll only use new timestamp.
- // Calling clearZslQueueLocked is a guaranteed deadlock because this callback
- // holds the Camera3Stream internal lock (mLock), and clearZslQueueLocked requires
- // to hold the same lock.
- // TODO: need figure out a way to clear the Zsl buffer queue properly. Right now
- // it is safe not to do so, as back to back ZSL capture requires stop and start
- // preview, which will flush ZSL queue automatically.
- ALOGV("%s: Memory optimization, clearing ZSL queue",
- __FUNCTION__);
- clearZslResultQueueLocked();
-
- // Required so we accept more ZSL requests
- mState = RUNNING;
-}
-
}; // namespace camera2
}; // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index 86c06c6..1db2403 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -24,10 +24,11 @@
#include <utils/Condition.h>
#include <gui/BufferItem.h>
#include <gui/BufferItemConsumer.h>
+#include <gui/RingBufferConsumer.h>
+#include <gui/IProducerListener.h>
#include <camera/CameraMetadata.h>
#include "api1/client2/FrameProcessor.h"
-#include "device3/Camera3ZslStream.h"
namespace android {
@@ -42,7 +43,6 @@
* ZSL queue processing for HALv3.0 or newer
*/
class ZslProcessor :
- public camera3::Camera3StreamBufferListener,
virtual public Thread,
virtual public FrameProcessor::FilteredListener {
public:
@@ -81,19 +81,18 @@
void dump(int fd, const Vector<String16>& args) const;
- protected:
- /**
- **********************************************
- * Camera3StreamBufferListener implementation *
- **********************************************
- */
- typedef camera3::Camera3StreamBufferListener::BufferInfo BufferInfo;
- // Buffer was acquired by the HAL
- virtual void onBufferAcquired(const BufferInfo& bufferInfo);
- // Buffer was released by the HAL
- virtual void onBufferReleased(const BufferInfo& bufferInfo);
-
private:
+
+ class InputProducerListener : public BnProducerListener {
+ public:
+ InputProducerListener(wp<ZslProcessor> parent) : mParent(parent) {}
+ virtual void onBufferReleased();
+ virtual bool needsReleaseNotify() { return true; }
+
+ private:
+ wp<ZslProcessor> mParent;
+ };
+
static const nsecs_t kWaitDuration = 10000000; // 10 ms
nsecs_t mLatestClearedBufferTimestamp;
@@ -102,6 +101,8 @@
LOCKED
} mState;
+ enum { NO_BUFFER_AVAILABLE = BufferQueue::NO_BUFFER_AVAILABLE };
+
wp<Camera2Client> mClient;
wp<CaptureSequencer> mSequencer;
@@ -114,7 +115,7 @@
};
int mZslStreamId;
- sp<camera3::Camera3ZslStream> mZslStream;
+ int mInputStreamId;
struct ZslPair {
BufferItem buffer;
@@ -135,6 +136,15 @@
bool mHasFocuser;
+ // Input buffer queued into HAL
+ sp<RingBufferConsumer::PinnedBufferItem> mInputBuffer;
+ sp<RingBufferConsumer> mProducer;
+ sp<IGraphicBufferProducer> mInputProducer;
+ int mInputProducerSlot;
+
+ Condition mBuffersToDetachSignal;
+ int mBuffersToDetach;
+
virtual bool threadLoop();
status_t clearZslQueueLocked();
@@ -145,6 +155,12 @@
nsecs_t getCandidateTimestampLocked(size_t* metadataIdx) const;
+ status_t enqueueInputBufferByTimestamp( nsecs_t timestamp,
+ nsecs_t* actualTimestamp);
+ status_t clearInputRingBufferLocked(nsecs_t* latestTimestamp);
+ void notifyInputReleased();
+ void doNotifyInputReleasedLocked();
+
bool isFixedFocusMode(uint8_t afMode) const;
// Update the post-processing metadata with the default still capture request template
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index b4f8e21..0429e7f 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -46,7 +46,7 @@
const sp<CameraService>& cameraService,
const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -67,7 +67,7 @@
CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService,
const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -79,22 +79,26 @@
mRequestIdCounter(0) {
ATRACE_CALL();
- ALOGI("CameraDeviceClient %d: Opened", cameraId);
+ ALOGI("CameraDeviceClient %s: Opened", cameraId.string());
}
-status_t CameraDeviceClient::initialize(CameraModule *module)
-{
+status_t CameraDeviceClient::initialize(sp<CameraProviderManager> manager) {
+ return initializeImpl(manager);
+}
+
+template<typename TProviderPtr>
+status_t CameraDeviceClient::initializeImpl(TProviderPtr providerPtr) {
ATRACE_CALL();
status_t res;
- res = Camera2ClientBase::initialize(module);
+ res = Camera2ClientBase::initialize(providerPtr);
if (res != OK) {
return res;
}
String8 threadName;
mFrameProcessor = new FrameProcessorBase(mDevice);
- threadName = String8::format("CDU-%d-FrameProc", mCameraId);
+ threadName = String8::format("CDU-%s-FrameProc", mCameraIdStr.string());
mFrameProcessor->run(threadName.string());
mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
@@ -138,25 +142,27 @@
}
if (requests.empty()) {
- ALOGE("%s: Camera %d: Sent null request. Rejecting request.",
- __FUNCTION__, mCameraId);
+ ALOGE("%s: Camera %s: Sent null request. Rejecting request.",
+ __FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Empty request list");
}
List<const CameraMetadata> metadataRequestList;
+ std::list<const SurfaceMap> surfaceMapList;
submitInfo->mRequestId = mRequestIdCounter;
uint32_t loopCounter = 0;
for (auto&& request: requests) {
if (request.mIsReprocess) {
if (!mInputStream.configured) {
- ALOGE("%s: Camera %d: no input stream is configured.", __FUNCTION__, mCameraId);
+ ALOGE("%s: Camera %s: no input stream is configured.", __FUNCTION__,
+ mCameraIdStr.string());
return STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "No input configured for camera %d but request is for reprocessing",
- mCameraId);
+ "No input configured for camera %s but request is for reprocessing",
+ mCameraIdStr.string());
} else if (streaming) {
- ALOGE("%s: Camera %d: streaming reprocess requests not supported.", __FUNCTION__,
- mCameraId);
+ ALOGE("%s: Camera %s: streaming reprocess requests not supported.", __FUNCTION__,
+ mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Repeating reprocess requests not supported");
}
@@ -164,13 +170,13 @@
CameraMetadata metadata(request.mMetadata);
if (metadata.isEmpty()) {
- ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
- __FUNCTION__, mCameraId);
+ ALOGE("%s: Camera %s: Sent empty metadata packet. Rejecting request.",
+ __FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request settings are empty");
} else if (request.mSurfaceList.isEmpty()) {
- ALOGE("%s: Camera %d: Requests must have at least one surface target. "
- "Rejecting request.", __FUNCTION__, mCameraId);
+ ALOGE("%s: Camera %s: Requests must have at least one surface target. "
+ "Rejecting request.", __FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request has no output targets");
}
@@ -182,11 +188,11 @@
}
/**
- * Write in the output stream IDs which we calculate from
- * the capture request's list of surface targets
+ * Write in the output stream IDs and map from stream ID to surface ID
+ * which we calculate from the capture request's list of surface target
*/
+ SurfaceMap surfaceMap;
Vector<int32_t> outputStreamIds;
- outputStreamIds.setCapacity(request.mSurfaceList.size());
for (sp<Surface> surface : request.mSurfaceList) {
if (surface == 0) continue;
@@ -195,17 +201,23 @@
// Trying to submit request with surface that wasn't created
if (idx == NAME_NOT_FOUND) {
- ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
+ ALOGE("%s: Camera %s: Tried to submit a request with a surface that"
" we have not called createStream on",
- __FUNCTION__, mCameraId);
+ __FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request targets Surface that is not part of current capture session");
}
- int streamId = mStreamMap.valueAt(idx);
- outputStreamIds.push_back(streamId);
- ALOGV("%s: Camera %d: Appending output stream %d to request",
- __FUNCTION__, mCameraId, streamId);
+ const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
+ if (surfaceMap.find(streamSurfaceId.streamId()) == surfaceMap.end()) {
+ surfaceMap[streamSurfaceId.streamId()] = std::vector<size_t>();
+ outputStreamIds.push_back(streamSurfaceId.streamId());
+ }
+ surfaceMap[streamSurfaceId.streamId()].push_back(streamSurfaceId.surfaceId());
+
+ ALOGV("%s: Camera %s: Appending output stream %d surface %d to request",
+ __FUNCTION__, mCameraIdStr.string(), streamSurfaceId.streamId(),
+ streamSurfaceId.surfaceId());
}
metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
@@ -217,19 +229,22 @@
metadata.update(ANDROID_REQUEST_ID, &(submitInfo->mRequestId), /*size*/1);
loopCounter++; // loopCounter starts from 1
- ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)",
- __FUNCTION__, mCameraId, submitInfo->mRequestId, loopCounter, requests.size());
+ ALOGV("%s: Camera %s: Creating request with ID %d (%d of %zu)",
+ __FUNCTION__, mCameraIdStr.string(), submitInfo->mRequestId,
+ loopCounter, requests.size());
metadataRequestList.push_back(metadata);
+ surfaceMapList.push_back(surfaceMap);
}
mRequestIdCounter++;
if (streaming) {
- err = mDevice->setStreamingRequestList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+ err = mDevice->setStreamingRequestList(metadataRequestList, surfaceMapList,
+ &(submitInfo->mLastFrameNumber));
if (err != OK) {
String8 msg = String8::format(
- "Camera %d: Got error %s (%d) after trying to set streaming request",
- mCameraId, strerror(-err), err);
+ "Camera %s: Got error %s (%d) after trying to set streaming request",
+ mCameraIdStr.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
msg.string());
@@ -238,11 +253,12 @@
mStreamingRequestId = submitInfo->mRequestId;
}
} else {
- err = mDevice->captureList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+ err = mDevice->captureList(metadataRequestList, surfaceMapList,
+ &(submitInfo->mLastFrameNumber));
if (err != OK) {
String8 msg = String8::format(
- "Camera %d: Got error %s (%d) after trying to submit capture request",
- mCameraId, strerror(-err), err);
+ "Camera %s: Got error %s (%d) after trying to submit capture request",
+ mCameraIdStr.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
msg.string());
@@ -250,7 +266,7 @@
ALOGV("%s: requestId = %d ", __FUNCTION__, submitInfo->mRequestId);
}
- ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId);
+ ALOGV("%s: Camera %s: End of function", __FUNCTION__, mCameraIdStr.string());
return res;
}
@@ -274,8 +290,8 @@
Mutex::Autolock idLock(mStreamingRequestIdLock);
if (mStreamingRequestId != requestId) {
- String8 msg = String8::format("Camera %d: Canceling request ID %d doesn't match "
- "current request ID %d", mCameraId, requestId, mStreamingRequestId);
+ String8 msg = String8::format("Camera %s: Canceling request ID %d doesn't match "
+ "current request ID %d", mCameraIdStr.string(), requestId, mStreamingRequestId);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -283,13 +299,13 @@
err = mDevice->clearStreamingRequest(lastFrameNumber);
if (err == OK) {
- ALOGV("%s: Camera %d: Successfully cleared streaming request",
- __FUNCTION__, mCameraId);
+ ALOGV("%s: Camera %s: Successfully cleared streaming request",
+ __FUNCTION__, mCameraIdStr.string());
mStreamingRequestId = REQUEST_ID_NONE;
} else {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error clearing streaming request: %s (%d)",
- mCameraId, strerror(-err), err);
+ "Camera %s: Error clearing streaming request: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
}
return res;
@@ -301,9 +317,10 @@
return binder::Status::ok();
}
-binder::Status CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
- ALOGV("%s: ending configure (%d input stream, %zu output streams)",
- __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
+binder::Status CameraDeviceClient::endConfigure(int operatingMode) {
+ ALOGV("%s: ending configure (%d input stream, %zu output surfaces)",
+ __FUNCTION__, mInputStream.configured ? 1 : 0,
+ mStreamMap.size());
binder::Status res;
if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
@@ -314,7 +331,16 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
+ if (operatingMode < 0) {
+ String8 msg = String8::format(
+ "Camera %s: Invalid operating mode %d requested", mCameraIdStr.string(), operatingMode);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ msg.string());
+ }
+
// Sanitize the high speed session against necessary capability bit.
+ bool isConstrainedHighSpeed = (operatingMode == ICameraDeviceUser::CONSTRAINED_HIGH_SPEED_MODE);
if (isConstrainedHighSpeed) {
CameraMetadata staticInfo = mDevice->info();
camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
@@ -328,23 +354,23 @@
}
if (!isConstrainedHighSpeedSupported) {
String8 msg = String8::format(
- "Camera %d: Try to create a constrained high speed configuration on a device"
- " that doesn't support it.", mCameraId);
+ "Camera %s: Try to create a constrained high speed configuration on a device"
+ " that doesn't support it.", mCameraIdStr.string());
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
msg.string());
}
}
- status_t err = mDevice->configureStreams(isConstrainedHighSpeed);
+ status_t err = mDevice->configureStreams(operatingMode);
if (err == BAD_VALUE) {
- String8 msg = String8::format("Camera %d: Unsupported set of inputs/outputs provided",
- mCameraId);
+ String8 msg = String8::format("Camera %s: Unsupported set of inputs/outputs provided",
+ mCameraIdStr.string());
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
} else if (err != OK) {
- String8 msg = String8::format("Camera %d: Error configuring streams: %s (%d)",
- mCameraId, strerror(-err), err);
+ String8 msg = String8::format("Camera %s: Error configuring streams: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
@@ -366,7 +392,7 @@
}
bool isInput = false;
- ssize_t index = NAME_NOT_FOUND;
+ std::vector<sp<IBinder>> surfaces;
ssize_t dIndex = NAME_NOT_FOUND;
if (mInputStream.configured && mInputStream.id == streamId) {
@@ -374,26 +400,24 @@
} else {
// Guard against trying to delete non-created streams
for (size_t i = 0; i < mStreamMap.size(); ++i) {
- if (streamId == mStreamMap.valueAt(i)) {
- index = i;
+ if (streamId == mStreamMap.valueAt(i).streamId()) {
+ surfaces.push_back(mStreamMap.keyAt(i));
+ }
+ }
+
+ // See if this stream is one of the deferred streams.
+ for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
+ if (streamId == mDeferredStreams[i]) {
+ dIndex = i;
break;
}
}
- if (index == NAME_NOT_FOUND) {
- // See if this stream is one of the deferred streams.
- for (size_t i = 0; i < mDeferredStreams.size(); ++i) {
- if (streamId == mDeferredStreams[i]) {
- dIndex = i;
- break;
- }
- }
- if (dIndex == NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no such"
- " stream created yet", mCameraId, streamId);
- ALOGW("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
+ if (surfaces.empty() && dIndex == NAME_NOT_FOUND) {
+ String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
+ " stream created yet", mCameraIdStr.string(), streamId);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
}
@@ -401,17 +425,21 @@
status_t err = mDevice->deleteStream(streamId);
if (err != OK) {
- String8 msg = String8::format("Camera %d: Unexpected error %s (%d) when deleting stream %d",
- mCameraId, strerror(-err), err, streamId);
+ String8 msg = String8::format("Camera %s: Unexpected error %s (%d) when deleting stream %d",
+ mCameraIdStr.string(), strerror(-err), err, streamId);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
} else {
if (isInput) {
mInputStream.configured = false;
- } else if (index != NAME_NOT_FOUND) {
- mStreamMap.removeItemsAt(index);
} else {
- mDeferredStreams.removeItemsAt(dIndex);
+ for (auto& surface : surfaces) {
+ mStreamMap.removeItem(surface);
+ }
+
+ if (dIndex != NAME_NOT_FOUND) {
+ mDeferredStreams.removeItemsAt(dIndex);
+ }
}
}
@@ -429,14 +457,25 @@
Mutex::Autolock icl(mBinderSerializationLock);
- sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
- bool deferredConsumer = bufferProducer == NULL;
+ const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
+ outputConfiguration.getGraphicBufferProducers();
+ size_t numBufferProducers = bufferProducers.size();
+ bool deferredConsumer = outputConfiguration.isDeferred();
+ bool isShared = outputConfiguration.isShared();
+
+ if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
+ ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
+ __FUNCTION__, bufferProducers.size(), MAX_SURFACES_PER_STREAM);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
+ }
+ bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
int surfaceType = outputConfiguration.getSurfaceType();
bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
(surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+
if (deferredConsumer && !validSurfaceType) {
ALOGE("%s: Target surface is invalid: bufferProducer = %p, surfaceType = %d.",
- __FUNCTION__, bufferProducer.get(), surfaceType);
+ __FUNCTION__, bufferProducers[0].get(), surfaceType);
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
}
@@ -444,116 +483,75 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
- int width, height, format;
- int32_t consumerUsage;
- android_dataspace dataSpace;
+ std::vector<sp<Surface>> surfaces;
+ std::vector<sp<IBinder>> binders;
status_t err;
// Create stream for deferred surface case.
- if (deferredConsumer) {
- return createDeferredSurfaceStreamLocked(outputConfiguration, newStreamId);
+ if (deferredConsumerOnly) {
+ return createDeferredSurfaceStreamLocked(outputConfiguration, isShared, newStreamId);
}
- // Don't create multiple streams for the same target surface
- {
- ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
+ OutputStreamInfo streamInfo;
+ bool isStreamInfoValid = false;
+ for (auto& bufferProducer : bufferProducers) {
+ // Don't create multiple streams for the same target surface
+ sp<IBinder> binder = IInterface::asBinder(bufferProducer);
+ ssize_t index = mStreamMap.indexOfKey(binder);
if (index != NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %d: Surface already has a stream created for it "
- "(ID %zd)", mCameraId, index);
+ String8 msg = String8::format("Camera %s: Surface already has a stream created for it "
+ "(ID %zd)", mCameraIdStr.string(), index);
ALOGW("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
}
- }
- // HACK b/10949105
- // Query consumer usage bits to set async operation mode for
- // GLConsumer using controlledByApp parameter.
- bool useAsync = false;
- if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
- &consumerUsage)) != OK) {
- String8 msg = String8::format("Camera %d: Failed to query Surface consumer usage: %s (%d)",
- mCameraId, strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
- ALOGW("%s: Camera %d with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
- __FUNCTION__, mCameraId, consumerUsage);
- useAsync = true;
- }
+ sp<Surface> surface;
+ res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer);
- int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
- GRALLOC_USAGE_RENDERSCRIPT;
- int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
- GraphicBuffer::USAGE_HW_TEXTURE |
- GraphicBuffer::USAGE_HW_COMPOSER;
- bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
- (consumerUsage & allowedFlags) != 0;
+ if (!res.isOk())
+ return res;
- sp<IBinder> binder = IInterface::asBinder(bufferProducer);
- sp<Surface> surface = new Surface(bufferProducer, useAsync);
- ANativeWindow *anw = surface.get();
+ if (!isStreamInfoValid) {
+ // Streaming sharing is only supported for IMPLEMENTATION_DEFINED
+ // formats.
+ if (isShared && streamInfo.format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ String8 msg = String8::format("Camera %s: Stream sharing is only supported for "
+ "IMPLEMENTATION_DEFINED format", mCameraIdStr.string());
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ isStreamInfoValid = true;
+ }
- if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
- String8 msg = String8::format("Camera %d: Failed to query Surface width: %s (%d)",
- mCameraId, strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
- String8 msg = String8::format("Camera %d: Failed to query Surface height: %s (%d)",
- mCameraId, strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
- String8 msg = String8::format("Camera %d: Failed to query Surface format: %s (%d)",
- mCameraId, strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
- reinterpret_cast<int*>(&dataSpace))) != OK) {
- String8 msg = String8::format("Camera %d: Failed to query Surface dataspace: %s (%d)",
- mCameraId, strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
-
- // FIXME: remove this override since the default format should be
- // IMPLEMENTATION_DEFINED. b/9487482
- if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
- format <= HAL_PIXEL_FORMAT_BGRA_8888) {
- ALOGW("%s: Camera %d: Overriding format %#x to IMPLEMENTATION_DEFINED",
- __FUNCTION__, mCameraId, format);
- format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- }
-
- // Round dimensions to the nearest dimensions available for this format
- if (flexibleConsumer && isPublicFormat(format) &&
- !CameraDeviceClient::roundBufferDimensionNearest(width, height,
- format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
- String8 msg = String8::format("Camera %d: No supported stream configurations with "
- "format %#x defined, failed to create output stream", mCameraId, format);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ binders.push_back(IInterface::asBinder(bufferProducer));
+ surfaces.push_back(surface);
}
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
- err = mDevice->createStream(surface, width, height, format, dataSpace,
+ err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
+ streamInfo.height, streamInfo.format, streamInfo.dataSpace,
static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, outputConfiguration.getSurfaceSetID());
+ &streamId, outputConfiguration.getSurfaceSetID(), isShared);
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
- mCameraId, width, height, format, dataSpace, strerror(-err), err);
+ "Camera %s: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
+ mCameraIdStr.string(), streamInfo.width, streamInfo.height, streamInfo.format,
+ streamInfo.dataSpace, strerror(-err), err);
} else {
- mStreamMap.add(binder, streamId);
+ int i = 0;
+ for (auto& binder : binders) {
+ ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %d",
+ __FUNCTION__, binder.get(), streamId, i);
+ mStreamMap.add(binder, StreamSurfaceId(streamId, i++));
+ }
- ALOGV("%s: Camera %d: Successfully created a new stream ID %d for output surface"
- " (%d x %d) with format 0x%x.",
- __FUNCTION__, mCameraId, streamId, width, height, format);
+ mStreamInfoMap[streamId] = streamInfo;
+
+ ALOGV("%s: Camera %s: Successfully created a new stream ID %d for output surface"
+ " (%d x %d) with format 0x%x.",
+ __FUNCTION__, mCameraIdStr.string(), streamId, streamInfo.width,
+ streamInfo.height, streamInfo.format);
// Set transform flags to ensure preview to be rotated correctly.
res = setStreamTransformLocked(streamId);
@@ -566,6 +564,7 @@
binder::Status CameraDeviceClient::createDeferredSurfaceStreamLocked(
const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+ bool isShared,
/*out*/
int* newStreamId) {
int width, height, format, surfaceType;
@@ -590,23 +589,28 @@
consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
}
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
- err = mDevice->createStream(/*surface*/nullptr, width, height, format, dataSpace,
+ std::vector<sp<Surface>> noSurface;
+ err = mDevice->createStream(noSurface, /*hasDeferredConsumer*/true, width,
+ height, format, dataSpace,
static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, outputConfiguration.getSurfaceSetID(), consumerUsage);
+ &streamId, outputConfiguration.getSurfaceSetID(), isShared, consumerUsage);
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
- mCameraId, width, height, format, dataSpace, strerror(-err), err);
+ "Camera %s: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
+ mCameraIdStr.string(), width, height, format, dataSpace, strerror(-err), err);
} else {
// Can not add streamId to mStreamMap here, as the surface is deferred. Add it to
// a separate list to track. Once the deferred surface is set, this id will be
// relocated to mStreamMap.
mDeferredStreams.push_back(streamId);
- ALOGV("%s: Camera %d: Successfully created a new stream ID %d for a deferred surface"
+ mStreamInfoMap.emplace(std::piecewise_construct, std::forward_as_tuple(streamId),
+ std::forward_as_tuple(width, height, format, dataSpace, consumerUsage));
+
+ ALOGV("%s: Camera %s: Successfully created a new stream ID %d for a deferred surface"
" (%d x %d) stream with format 0x%x.",
- __FUNCTION__, mCameraId, streamId, width, height, format);
+ __FUNCTION__, mCameraIdStr.string(), streamId, width, height, format);
// Set transform flags to ensure preview to be rotated correctly.
res = setStreamTransformLocked(streamId);
@@ -661,8 +665,8 @@
}
if (mInputStream.configured) {
- String8 msg = String8::format("Camera %d: Already has an input stream "
- "configured (ID %zd)", mCameraId, mInputStream.id);
+ String8 msg = String8::format("Camera %s: Already has an input stream "
+ "configured (ID %zd)", mCameraIdStr.string(), mInputStream.id);
ALOGE("%s: %s", __FUNCTION__, msg.string() );
return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
}
@@ -676,13 +680,13 @@
mInputStream.format = format;
mInputStream.id = streamId;
- ALOGV("%s: Camera %d: Successfully created a new input stream ID %d",
- __FUNCTION__, mCameraId, streamId);
+ ALOGV("%s: Camera %s: Successfully created a new input stream ID %d",
+ __FUNCTION__, mCameraIdStr.string(), streamId);
*newStreamId = streamId;
} else {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error creating new input stream: %s (%d)", mCameraId,
+ "Camera %s: Error creating new input stream: %s (%d)", mCameraIdStr.string(),
strerror(-err), err);
}
@@ -706,8 +710,8 @@
status_t err = mDevice->getInputBufferProducer(&producer);
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error getting input Surface: %s (%d)",
- mCameraId, strerror(-err), err);
+ "Camera %s: Error getting input Surface: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
} else {
inputSurface->name = String16("CameraInput");
inputSurface->graphicBufferProducer = producer;
@@ -746,6 +750,139 @@
}
}
+binder::Status CameraDeviceClient::createSurfaceFromGbp(
+ OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp) {
+
+ // bufferProducer must be non-null
+ if (gbp == nullptr) {
+ String8 msg = String8::format("Camera %s: Surface is NULL", mCameraIdStr.string());
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ // HACK b/10949105
+ // Query consumer usage bits to set async operation mode for
+ // GLConsumer using controlledByApp parameter.
+ bool useAsync = false;
+ int32_t consumerUsage;
+ status_t err;
+ if ((err = gbp->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+ &consumerUsage)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
+ ALOGW("%s: Camera %s with consumer usage flag: 0x%x: Forcing asynchronous mode for stream",
+ __FUNCTION__, mCameraIdStr.string(), consumerUsage);
+ useAsync = true;
+ }
+
+ int32_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
+ GRALLOC_USAGE_RENDERSCRIPT;
+ int32_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
+ GraphicBuffer::USAGE_HW_TEXTURE |
+ GraphicBuffer::USAGE_HW_COMPOSER;
+ bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
+ (consumerUsage & allowedFlags) != 0;
+
+ surface = new Surface(gbp, useAsync);
+ ANativeWindow *anw = surface.get();
+
+ int width, height, format;
+ android_dataspace dataSpace;
+ if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+ reinterpret_cast<int*>(&dataSpace))) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+
+ // FIXME: remove this override since the default format should be
+ // IMPLEMENTATION_DEFINED. b/9487482
+ if (format >= HAL_PIXEL_FORMAT_RGBA_8888 &&
+ format <= HAL_PIXEL_FORMAT_BGRA_8888) {
+ ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
+ __FUNCTION__, mCameraIdStr.string(), format);
+ format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ }
+ // Round dimensions to the nearest dimensions available for this format
+ if (flexibleConsumer && isPublicFormat(format) &&
+ !CameraDeviceClient::roundBufferDimensionNearest(width, height,
+ format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
+ String8 msg = String8::format("Camera %s: No supported stream configurations with "
+ "format %#x defined, failed to create output stream",
+ mCameraIdStr.string(), format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (!isStreamInfoValid) {
+ streamInfo.width = width;
+ streamInfo.height = height;
+ streamInfo.format = format;
+ streamInfo.dataSpace = dataSpace;
+ streamInfo.consumerUsage = consumerUsage;
+ return binder::Status::ok();
+ }
+ if (width != streamInfo.width) {
+ String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
+ mCameraIdStr.string(), width, streamInfo.width);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (height != streamInfo.height) {
+ String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
+ mCameraIdStr.string(), height, streamInfo.height);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (format != streamInfo.format) {
+ String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
+ mCameraIdStr.string(), format, streamInfo.format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (dataSpace != streamInfo.dataSpace) {
+ String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
+ mCameraIdStr.string(), dataSpace, streamInfo.dataSpace);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ //At the native side, there isn't a way to check whether 2 surfaces come from the same
+ //surface class type. Use usage flag to approximate the comparison.
+ if (consumerUsage != streamInfo.consumerUsage) {
+ String8 msg = String8::format(
+ "Camera %s:Surface usage flag doesn't match 0x%x vs 0x%x",
+ mCameraIdStr.string(), consumerUsage, streamInfo.consumerUsage);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ }
+ return binder::Status::ok();
+}
+
bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
/*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
@@ -828,13 +965,13 @@
request->swap(metadata);
} else if (err == BAD_VALUE) {
res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "Camera %d: Template ID %d is invalid or not supported: %s (%d)",
- mCameraId, templateId, strerror(-err), err);
+ "Camera %s: Template ID %d is invalid or not supported: %s (%d)",
+ mCameraIdStr.string(), templateId, strerror(-err), err);
} else {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error creating default request for template %d: %s (%d)",
- mCameraId, templateId, strerror(-err), err);
+ "Camera %s: Error creating default request for template %d: %s (%d)",
+ mCameraIdStr.string(), templateId, strerror(-err), err);
}
return res;
}
@@ -882,16 +1019,16 @@
Mutex::Autolock idLock(mStreamingRequestIdLock);
if (mStreamingRequestId != REQUEST_ID_NONE) {
String8 msg = String8::format(
- "Camera %d: Try to waitUntilIdle when there are active streaming requests",
- mCameraId);
+ "Camera %s: Try to waitUntilIdle when there are active streaming requests",
+ mCameraIdStr.string());
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
status_t err = mDevice->waitUntilDrained();
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error waiting to drain: %s (%d)",
- mCameraId, strerror(-err), err);
+ "Camera %s: Error waiting to drain: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
}
ALOGV("%s Done", __FUNCTION__);
return res;
@@ -917,7 +1054,7 @@
status_t err = mDevice->flush(lastFrameNumber);
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error flushing device: %s (%d)", mCameraId, strerror(-err), err);
+ "Camera %s: Error flushing device: %s (%d)", mCameraIdStr.string(), strerror(-err), err);
}
return res;
}
@@ -934,15 +1071,15 @@
// Guard against trying to prepare non-created streams
ssize_t index = NAME_NOT_FOUND;
for (size_t i = 0; i < mStreamMap.size(); ++i) {
- if (streamId == mStreamMap.valueAt(i)) {
+ if (streamId == mStreamMap.valueAt(i).streamId()) {
index = i;
break;
}
}
if (index == NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
- "with that ID exists", mCameraId, streamId);
+ String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no stream "
+ "with that ID exists", mCameraIdStr.string(), streamId);
ALOGW("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -952,11 +1089,11 @@
status_t err = mDevice->prepare(streamId);
if (err == BAD_VALUE) {
res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "Camera %d: Stream %d has already been used, and cannot be prepared",
- mCameraId, streamId);
+ "Camera %s: Stream %d has already been used, and cannot be prepared",
+ mCameraIdStr.string(), streamId);
} else if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error preparing stream %d: %s (%d)", mCameraId, streamId,
+ "Camera %s: Error preparing stream %d: %s (%d)", mCameraIdStr.string(), streamId,
strerror(-err), err);
}
return res;
@@ -974,22 +1111,22 @@
// Guard against trying to prepare non-created streams
ssize_t index = NAME_NOT_FOUND;
for (size_t i = 0; i < mStreamMap.size(); ++i) {
- if (streamId == mStreamMap.valueAt(i)) {
+ if (streamId == mStreamMap.valueAt(i).streamId()) {
index = i;
break;
}
}
if (index == NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
- "with that ID exists", mCameraId, streamId);
+ String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no stream "
+ "with that ID exists", mCameraIdStr.string(), streamId);
ALOGW("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (maxCount <= 0) {
- String8 msg = String8::format("Camera %d: maxCount (%d) must be greater than 0",
- mCameraId, maxCount);
+ String8 msg = String8::format("Camera %s: maxCount (%d) must be greater than 0",
+ mCameraIdStr.string(), maxCount);
ALOGE("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -999,11 +1136,11 @@
status_t err = mDevice->prepare(maxCount, streamId);
if (err == BAD_VALUE) {
res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "Camera %d: Stream %d has already been used, and cannot be prepared",
- mCameraId, streamId);
+ "Camera %s: Stream %d has already been used, and cannot be prepared",
+ mCameraIdStr.string(), streamId);
} else if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error preparing stream %d: %s (%d)", mCameraId, streamId,
+ "Camera %s: Error preparing stream %d: %s (%d)", mCameraIdStr.string(), streamId,
strerror(-err), err);
}
@@ -1022,15 +1159,15 @@
// Guard against trying to prepare non-created streams
ssize_t index = NAME_NOT_FOUND;
for (size_t i = 0; i < mStreamMap.size(); ++i) {
- if (streamId == mStreamMap.valueAt(i)) {
+ if (streamId == mStreamMap.valueAt(i).streamId()) {
index = i;
break;
}
}
if (index == NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
- "with that ID exists", mCameraId, streamId);
+ String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no stream "
+ "with that ID exists", mCameraIdStr.string(), streamId);
ALOGW("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -1040,18 +1177,18 @@
status_t err = mDevice->tearDown(streamId);
if (err == BAD_VALUE) {
res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "Camera %d: Stream %d is still in use, cannot be torn down",
- mCameraId, streamId);
+ "Camera %s: Stream %d is still in use, cannot be torn down",
+ mCameraIdStr.string(), streamId);
} else if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error tearing down stream %d: %s (%d)", mCameraId, streamId,
+ "Camera %s: Error tearing down stream %d: %s (%d)", mCameraIdStr.string(), streamId,
strerror(-err), err);
}
return res;
}
-binder::Status CameraDeviceClient::setDeferredConfiguration(int32_t streamId,
+binder::Status CameraDeviceClient::finalizeOutputConfigurations(int32_t streamId,
const hardware::camera2::params::OutputConfiguration &outputConfiguration) {
ATRACE_CALL();
@@ -1060,24 +1197,41 @@
Mutex::Autolock icl(mBinderSerializationLock);
- sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
+ const std::vector<sp<IGraphicBufferProducer> >& bufferProducers =
+ outputConfiguration.getGraphicBufferProducers();
- // Client code should guarantee that the surface is from SurfaceView or SurfaceTexture.
- if (bufferProducer == NULL) {
- ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
+ if (bufferProducers.size() == 0) {
+ ALOGE("%s: bufferProducers must not be empty", __FUNCTION__);
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
}
- // Check if this stram id is one of the deferred streams
- ssize_t index = NAME_NOT_FOUND;
- for (size_t i = 0; i < mDeferredStreams.size(); i++) {
- if (streamId == mDeferredStreams[i]) {
- index = i;
+
+ // streamId should be in mStreamMap if this stream already has a surface attached
+ // to it. Otherwise, it should be in mDeferredStreams.
+ bool streamIdConfigured = false;
+ ssize_t deferredStreamIndex = NAME_NOT_FOUND;
+ for (size_t i = 0; i < mStreamMap.size(); i++) {
+ if (mStreamMap.valueAt(i).streamId() == streamId) {
+ streamIdConfigured = true;
break;
}
}
- if (index == NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %d: deferred surface is set to a unknown stream"
- "(ID %d)", mCameraId, streamId);
+ for (size_t i = 0; i < mDeferredStreams.size(); i++) {
+ if (streamId == mDeferredStreams[i]) {
+ deferredStreamIndex = i;
+ break;
+ }
+
+ }
+ if (deferredStreamIndex == NAME_NOT_FOUND && !streamIdConfigured) {
+ String8 msg = String8::format("Camera %s: deferred surface is set to a unknown stream"
+ "(ID %d)", mCameraIdStr.string(), streamId);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (mStreamInfoMap[streamId].finalized) {
+ String8 msg = String8::format("Camera %s: finalizeOutputConfigurations has been called"
+ " on stream ID %d", mCameraIdStr.string(), streamId);
ALOGW("%s: %s", __FUNCTION__, msg.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
@@ -1086,36 +1240,61 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
- // Don't create multiple streams for the same target surface
- {
+ std::vector<sp<Surface>> consumerSurfaces;
+ std::vector<size_t> consumerSurfaceIds;
+ size_t surfaceId = 0;
+ for (auto& bufferProducer : bufferProducers) {
+ // Don't create multiple streams for the same target surface
ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
if (index != NAME_NOT_FOUND) {
- String8 msg = String8::format("Camera %d: Surface already has a stream created "
- " for it (ID %zd)", mCameraId, index);
- ALOGW("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
+ ALOGV("Camera %s: Surface already has a stream created "
+ " for it (ID %zd)", mCameraIdStr.string(), index);
+ surfaceId++;
+ continue;
}
+
+ sp<Surface> surface;
+ res = createSurfaceFromGbp(mStreamInfoMap[streamId], true /*isStreamInfoValid*/,
+ surface, bufferProducer);
+
+ if (!res.isOk())
+ return res;
+
+ consumerSurfaces.push_back(surface);
+ consumerSurfaceIds.push_back(surfaceId);
+ surfaceId++;
}
- status_t err;
-
- // Always set to async, as we know the deferred surface is for preview streaming.
- sp<Surface> consumerSurface = new Surface(bufferProducer, /*useAsync*/true);
+ // Gracefully handle case where finalizeOutputConfigurations is called
+ // without any new surface.
+ if (consumerSurfaces.size() == 0) {
+ mStreamInfoMap[streamId].finalized = true;
+ return res;
+ }
// Finish the deferred stream configuration with the surface.
- err = mDevice->setConsumerSurface(streamId, consumerSurface);
+ status_t err;
+ err = mDevice->setConsumerSurfaces(streamId, consumerSurfaces);
if (err == OK) {
- sp<IBinder> binder = IInterface::asBinder(bufferProducer);
- mStreamMap.add(binder, streamId);
- mDeferredStreams.removeItemsAt(index);
+ for (size_t i = 0; i < consumerSurfaces.size(); i++) {
+ sp<IBinder> binder = IInterface::asBinder(
+ consumerSurfaces[i]->getIGraphicBufferProducer());
+ ALOGV("%s: mStreamMap add binder %p streamId %d, surfaceId %zu", __FUNCTION__,
+ binder.get(), streamId, consumerSurfaceIds[i]);
+ mStreamMap.add(binder, StreamSurfaceId(streamId, consumerSurfaceIds[i]));
+ }
+ if (deferredStreamIndex != NAME_NOT_FOUND) {
+ mDeferredStreams.removeItemsAt(deferredStreamIndex);
+ }
+ mStreamInfoMap[streamId].finalized = true;
} else if (err == NO_INIT) {
res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "Camera %d: Deferred surface is invalid: %s (%d)",
- mCameraId, strerror(-err), err);
+ "Camera %s: Deferred surface is invalid: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
} else {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
- "Camera %d: Error setting output stream deferred surface: %s (%d)",
- mCameraId, strerror(-err), err);
+ "Camera %s: Error setting output stream deferred surface: %s (%d)",
+ mCameraIdStr.string(), strerror(-err), err);
}
return res;
@@ -1126,35 +1305,34 @@
}
status_t CameraDeviceClient::dumpClient(int fd, const Vector<String16>& args) {
- String8 result;
- result.appendFormat("CameraDeviceClient[%d] (%p) dump:\n",
- mCameraId,
+ dprintf(fd, " CameraDeviceClient[%s] (%p) dump:\n",
+ mCameraIdStr.string(),
(getRemoteCallback() != NULL ?
IInterface::asBinder(getRemoteCallback()).get() : NULL) );
- result.appendFormat(" Current client UID %u\n", mClientUid);
+ dprintf(fd, " Current client UID %u\n", mClientUid);
- result.append(" State:\n");
- result.appendFormat(" Request ID counter: %d\n", mRequestIdCounter);
+ dprintf(fd, " State:\n");
+ dprintf(fd, " Request ID counter: %d\n", mRequestIdCounter);
if (mInputStream.configured) {
- result.appendFormat(" Current input stream ID: %d\n",
- mInputStream.id);
+ dprintf(fd, " Current input stream ID: %d\n", mInputStream.id);
} else {
- result.append(" No input stream configured.\n");
+ dprintf(fd, " No input stream configured.\n");
}
if (!mStreamMap.isEmpty()) {
- result.append(" Current output stream IDs:\n");
+ dprintf(fd, " Current output stream/surface IDs:\n");
for (size_t i = 0; i < mStreamMap.size(); i++) {
- result.appendFormat(" Stream %d\n", mStreamMap.valueAt(i));
+ dprintf(fd, " Stream %d Surface %d\n",
+ mStreamMap.valueAt(i).streamId(),
+ mStreamMap.valueAt(i).surfaceId());
}
} else if (!mDeferredStreams.isEmpty()) {
- result.append(" Current deferred surface output stream IDs:\n");
+ dprintf(fd, " Current deferred surface output stream IDs:\n");
for (auto& streamId : mDeferredStreams) {
- result.appendFormat(" Stream %d\n", streamId);
+ dprintf(fd, " Stream %d\n", streamId);
}
} else {
- result.append(" No output streams configured.\n");
+ dprintf(fd, " No output streams configured.\n");
}
- write(fd, result.string(), result.size());
// TODO: print dynamic/request section from most recent requests
mFrameProcessor->dump(fd, args);
@@ -1210,18 +1388,26 @@
}
}
+void CameraDeviceClient::notifyRequestQueueEmpty() {
+ // Thread safe. Don't bother locking.
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+ if (remoteCb != 0) {
+ remoteCb->onRequestQueueEmpty();
+ }
+}
+
void CameraDeviceClient::detachDevice() {
if (mDevice == 0) return;
- ALOGV("Camera %d: Stopping processors", mCameraId);
+ ALOGV("Camera %s: Stopping processors", mCameraIdStr.string());
mFrameProcessor->removeListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
FRAME_PROCESSOR_LISTENER_MAX_ID,
/*listener*/this);
mFrameProcessor->requestExit();
- ALOGV("Camera %d: Waiting for threads", mCameraId);
+ ALOGV("Camera %s: Waiting for threads", mCameraIdStr.string());
mFrameProcessor->join();
- ALOGV("Camera %d: Disconnecting device", mCameraId);
+ ALOGV("Camera %s: Disconnecting device", mCameraIdStr.string());
// WORKAROUND: HAL refuses to disconnect while there's streams in flight
{
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index de283ea..2bf73a0 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -42,7 +42,7 @@
CameraDeviceClientBase(const sp<CameraService>& cameraService,
const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -70,70 +70,70 @@
const hardware::camera2::CaptureRequest& request,
bool streaming = false,
/*out*/
- hardware::camera2::utils::SubmitInfo *submitInfo = nullptr);
+ hardware::camera2::utils::SubmitInfo *submitInfo = nullptr) override;
// List of requests are copied.
virtual binder::Status submitRequestList(
const std::vector<hardware::camera2::CaptureRequest>& requests,
bool streaming = false,
/*out*/
- hardware::camera2::utils::SubmitInfo *submitInfo = nullptr);
+ hardware::camera2::utils::SubmitInfo *submitInfo = nullptr) override;
virtual binder::Status cancelRequest(int requestId,
/*out*/
- int64_t* lastFrameNumber = NULL);
+ int64_t* lastFrameNumber = NULL) override;
- virtual binder::Status beginConfigure();
+ virtual binder::Status beginConfigure() override;
- virtual binder::Status endConfigure(bool isConstrainedHighSpeed = false);
+ virtual binder::Status endConfigure(int operatingMode) override;
// Returns -EBUSY if device is not idle
- virtual binder::Status deleteStream(int streamId);
+ virtual binder::Status deleteStream(int streamId) override;
virtual binder::Status createStream(
const hardware::camera2::params::OutputConfiguration &outputConfiguration,
/*out*/
- int32_t* newStreamId = NULL);
+ int32_t* newStreamId = NULL) override;
// Create an input stream of width, height, and format.
virtual binder::Status createInputStream(int width, int height, int format,
/*out*/
- int32_t* newStreamId = NULL);
+ int32_t* newStreamId = NULL) override;
// Get the buffer producer of the input stream
virtual binder::Status getInputSurface(
/*out*/
- view::Surface *inputSurface);
+ view::Surface *inputSurface) override;
// Create a request object from a template.
virtual binder::Status createDefaultRequest(int templateId,
/*out*/
- hardware::camera2::impl::CameraMetadataNative* request);
+ hardware::camera2::impl::CameraMetadataNative* request) override;
// Get the static metadata for the camera
// -- Caller owns the newly allocated metadata
virtual binder::Status getCameraInfo(
/*out*/
- hardware::camera2::impl::CameraMetadataNative* cameraCharacteristics);
+ hardware::camera2::impl::CameraMetadataNative* cameraCharacteristics) override;
// Wait until all the submitted requests have finished processing
- virtual binder::Status waitUntilIdle();
+ virtual binder::Status waitUntilIdle() override;
// Flush all active and pending requests as fast as possible
virtual binder::Status flush(
/*out*/
- int64_t* lastFrameNumber = NULL);
+ int64_t* lastFrameNumber = NULL) override;
// Prepare stream by preallocating its buffers
- virtual binder::Status prepare(int32_t streamId);
+ virtual binder::Status prepare(int32_t streamId) override;
// Tear down stream resources by freeing its unused buffers
- virtual binder::Status tearDown(int32_t streamId);
+ virtual binder::Status tearDown(int32_t streamId) override;
// Prepare stream by preallocating up to maxCount of its buffers
- virtual binder::Status prepare2(int32_t maxCount, int32_t streamId);
+ virtual binder::Status prepare2(int32_t maxCount, int32_t streamId) override;
- // Set the deferred surface for a stream.
- virtual binder::Status setDeferredConfiguration(int32_t streamId,
- const hardware::camera2::params::OutputConfiguration &outputConfiguration);
+ // Finalize the output configurations with surfaces not added before.
+ virtual binder::Status finalizeOutputConfigurations(int32_t streamId,
+ const hardware::camera2::params::OutputConfiguration &outputConfiguration) override;
/**
* Interface used by CameraService
@@ -142,14 +142,14 @@
CameraDeviceClient(const sp<CameraService>& cameraService,
const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
int servicePid);
virtual ~CameraDeviceClient();
- virtual status_t initialize(CameraModule *module);
+ virtual status_t initialize(sp<CameraProviderManager> manager) override;
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -164,6 +164,7 @@
const CaptureResultExtras& resultExtras);
virtual void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp);
virtual void notifyPrepared(int streamId);
+ virtual void notifyRequestQueueEmpty();
virtual void notifyRepeatingRequestError(long lastFrameNumber);
/**
@@ -178,6 +179,52 @@
status_t getRotationTransformLocked(/*out*/int32_t* transform);
private:
+ // StreamSurfaceId encapsulates streamId + surfaceId for a particular surface.
+ // streamId specifies the index of the stream the surface belongs to, and the
+ // surfaceId specifies the index of the surface within the stream. (one stream
+ // could contain multiple surfaces.)
+ class StreamSurfaceId final {
+ public:
+ StreamSurfaceId() {
+ mStreamId = -1;
+ mSurfaceId = -1;
+ }
+ StreamSurfaceId(int32_t streamId, int32_t surfaceId) {
+ mStreamId = streamId;
+ mSurfaceId = surfaceId;
+ }
+ int32_t streamId() const {
+ return mStreamId;
+ }
+ int32_t surfaceId() const {
+ return mSurfaceId;
+ }
+
+ private:
+ int32_t mStreamId;
+ int32_t mSurfaceId;
+
+ }; // class StreamSurfaceId
+
+ // OutputStreamInfo describes the property of a camera stream.
+ class OutputStreamInfo {
+ public:
+ int width;
+ int height;
+ int format;
+ android_dataspace dataSpace;
+ int32_t consumerUsage;
+ bool finalized = false;
+ OutputStreamInfo() :
+ width(-1), height(-1), format(-1), dataSpace(HAL_DATASPACE_UNKNOWN),
+ consumerUsage(0) {}
+ OutputStreamInfo(int _width, int _height, int _format, android_dataspace _dataSpace,
+ int32_t _consumerUsage) :
+ width(_width), height(_height), format(_format),
+ dataSpace(_dataSpace), consumerUsage(_consumerUsage) {}
+ };
+
+private:
/** ICameraDeviceUser interface-related private members */
/** Preview callback related members */
@@ -185,6 +232,9 @@
static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
+ template<typename TProviderPtr>
+ status_t initializeImpl(TProviderPtr providerPtr);
+
/** Utility members */
binder::Status checkPidStatus(const char* checkLocation);
bool enforceRequestPermissions(CameraMetadata& metadata);
@@ -195,6 +245,7 @@
// Create an output stream with surface deferred for future.
binder::Status createDeferredSurfaceStreamLocked(
const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+ bool isShared,
int* newStreamId = NULL);
// Set the stream transform flags to automatically rotate the camera stream for preview use
@@ -211,8 +262,13 @@
//check if format is not custom format
static bool isPublicFormat(int32_t format);
- // IGraphicsBufferProducer binder -> Stream ID for output streams
- KeyedVector<sp<IBinder>, int> mStreamMap;
+ // Create a Surface from an IGraphicBufferProducer. Returns error if
+ // IGraphicBufferProducer's property doesn't match with streamInfo
+ binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp);
+
+ // IGraphicsBufferProducer binder -> Stream ID + Surface ID for output streams
+ KeyedVector<sp<IBinder>, StreamSurfaceId> mStreamMap;
struct InputStreamConfiguration {
bool configured;
@@ -233,6 +289,11 @@
// as there are no surfaces available and can not be put into mStreamMap. Once the deferred
// Surface is configured, the stream id will be moved to mStreamMap.
Vector<int32_t> mDeferredStreams;
+
+ // stream ID -> outputStreamInfo mapping
+ std::unordered_map<int32_t, OutputStreamInfo> mStreamInfoMap;
+
+ static const int32_t MAX_SURFACES_PER_STREAM = 2;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index ccd1e4d..32ee273 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -47,7 +47,7 @@
const sp<CameraService>& cameraService,
const sp<TCamCallbacks>& remoteCallback,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
@@ -55,10 +55,10 @@
TClientBase(cameraService, remoteCallback, clientPackageName,
cameraId, cameraFacing, clientPid, clientUid, servicePid),
mSharedCameraCallbacks(remoteCallback),
- mDeviceVersion(cameraService->getDeviceVersion(cameraId)),
+ mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
mDeviceActive(false)
{
- ALOGI("Camera %d: Opened. Client: %s (PID %d, UID %d)", cameraId,
+ ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
String8(clientPackageName).string(), clientPid, clientUid);
mInitialClientPid = clientPid;
@@ -79,10 +79,16 @@
}
template <typename TClientBase>
-status_t Camera2ClientBase<TClientBase>::initialize(CameraModule *module) {
+status_t Camera2ClientBase<TClientBase>::initialize(sp<CameraProviderManager> manager) {
+ return initializeImpl(manager);
+}
+
+template <typename TClientBase>
+template <typename TProviderPtr>
+status_t Camera2ClientBase<TClientBase>::initializeImpl(TProviderPtr providerPtr) {
ATRACE_CALL();
- ALOGV("%s: Initializing client for camera %d", __FUNCTION__,
- TClientBase::mCameraId);
+ ALOGV("%s: Initializing client for camera %s", __FUNCTION__,
+ TClientBase::mCameraIdStr.string());
status_t res;
// Verify ops permissions
@@ -92,15 +98,15 @@
}
if (mDevice == NULL) {
- ALOGE("%s: Camera %d: No device connected",
- __FUNCTION__, TClientBase::mCameraId);
+ ALOGE("%s: Camera %s: No device connected",
+ __FUNCTION__, TClientBase::mCameraIdStr.string());
return NO_INIT;
}
- res = mDevice->initialize(module);
+ res = mDevice->initialize(providerPtr);
if (res != OK) {
- ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
- __FUNCTION__, TClientBase::mCameraId, strerror(-res), res);
+ ALOGE("%s: Camera %s: unable to initialize device: %s (%d)",
+ __FUNCTION__, TClientBase::mCameraIdStr.string(), strerror(-res), res);
return res;
}
@@ -118,8 +124,8 @@
disconnect();
- ALOGI("Closed Camera %d. Client was: %s (PID %d, UID %u)",
- TClientBase::mCameraId,
+ ALOGI("Closed Camera %s. Client was: %s (PID %d, UID %u)",
+ TClientBase::mCameraIdStr.string(),
String8(TClientBase::mClientPackageName).string(),
mInitialClientPid, TClientBase::mClientUid);
}
@@ -128,8 +134,8 @@
status_t Camera2ClientBase<TClientBase>::dumpClient(int fd,
const Vector<String16>& args) {
String8 result;
- result.appendFormat("Camera2ClientBase[%d] (%p) PID: %d, dump:\n",
- TClientBase::mCameraId,
+ result.appendFormat("Camera2ClientBase[%s] (%p) PID: %d, dump:\n",
+ TClientBase::mCameraIdStr.string(),
(TClientBase::getRemoteCallback() != NULL ?
IInterface::asBinder(TClientBase::getRemoteCallback()).get() : NULL),
TClientBase::mClientPid);
@@ -180,13 +186,13 @@
if (callingPid != TClientBase::mClientPid &&
callingPid != TClientBase::mServicePid) return res;
- ALOGV("Camera %d: Shutting down", TClientBase::mCameraId);
+ ALOGV("Camera %s: Shutting down", TClientBase::mCameraIdStr.string());
detachDevice();
CameraService::BasicClient::disconnect();
- ALOGV("Camera %d: Shut down complete complete", TClientBase::mCameraId);
+ ALOGV("Camera %s: Shut down complete complete", TClientBase::mCameraIdStr.string());
return res;
}
@@ -198,7 +204,7 @@
mDevice.clear();
- ALOGV("Camera %d: Detach complete", TClientBase::mCameraId);
+ ALOGV("Camera %s: Detach complete", TClientBase::mCameraIdStr.string());
}
template <typename TClientBase>
@@ -211,10 +217,10 @@
if (TClientBase::mClientPid != 0 &&
getCallingPid() != TClientBase::mClientPid) {
- ALOGE("%s: Camera %d: Connection attempt from pid %d; "
+ ALOGE("%s: Camera %s: Connection attempt from pid %d; "
"current locked to pid %d",
__FUNCTION__,
- TClientBase::mCameraId,
+ TClientBase::mCameraIdStr.string(),
getCallingPid(),
TClientBase::mClientPid);
return BAD_VALUE;
@@ -242,8 +248,7 @@
void Camera2ClientBase<TClientBase>::notifyIdle() {
if (mDeviceActive) {
getCameraService()->updateProxyDeviceState(
- ICameraServiceProxy::CAMERA_STATE_IDLE,
- String8::format("%d", TClientBase::mCameraId));
+ ICameraServiceProxy::CAMERA_STATE_IDLE, TClientBase::mCameraIdStr);
}
mDeviceActive = false;
@@ -258,8 +263,7 @@
if (!mDeviceActive) {
getCameraService()->updateProxyDeviceState(
- ICameraServiceProxy::CAMERA_STATE_ACTIVE,
- String8::format("%d", TClientBase::mCameraId));
+ ICameraServiceProxy::CAMERA_STATE_ACTIVE, TClientBase::mCameraIdStr);
}
mDeviceActive = true;
@@ -307,6 +311,12 @@
}
template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyRequestQueueEmpty() {
+
+ ALOGV("%s: Request queue now empty", __FUNCTION__);
+}
+
+template <typename TClientBase>
void Camera2ClientBase<TClientBase>::notifyRepeatingRequestError(long lastFrameNumber) {
(void)lastFrameNumber;
@@ -316,7 +326,7 @@
template <typename TClientBase>
int Camera2ClientBase<TClientBase>::getCameraId() const {
- return TClientBase::mCameraId;
+ return std::stoi(TClientBase::mCameraIdStr.string());
}
template <typename TClientBase>
@@ -331,7 +341,7 @@
template <typename TClientBase>
const sp<CameraService>& Camera2ClientBase<TClientBase>::getCameraService() {
- return TClientBase::mCameraService;
+ return TClientBase::sCameraService;
}
template <typename TClientBase>
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index dbbf638..e898d5d 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -18,7 +18,6 @@
#define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H
#include "common/CameraDeviceBase.h"
-#include "common/CameraModule.h"
#include "camera/CaptureResult.h"
namespace android {
@@ -49,14 +48,14 @@
Camera2ClientBase(const sp<CameraService>& cameraService,
const sp<TCamCallbacks>& remoteCallback,
const String16& clientPackageName,
- int cameraId,
+ const String8& cameraId,
int cameraFacing,
int clientPid,
uid_t clientUid,
int servicePid);
virtual ~Camera2ClientBase();
- virtual status_t initialize(CameraModule *module);
+ virtual status_t initialize(sp<CameraProviderManager> manager);
virtual status_t dumpClient(int fd, const Vector<String16>& args);
/**
@@ -73,6 +72,7 @@
virtual void notifyAutoWhitebalance(uint8_t newState,
int triggerId);
virtual void notifyPrepared(int streamId);
+ virtual void notifyRequestQueueEmpty();
virtual void notifyRepeatingRequestError(long lastFrameNumber);
int getCameraId() const;
@@ -139,6 +139,10 @@
virtual void detachDevice();
bool mDeviceActive;
+
+private:
+ template<typename TProviderPtr>
+ status_t initializeImpl(TProviderPtr providerPtr);
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 984d84b..d9059f3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
#define ANDROID_SERVERS_CAMERA_CAMERADEVICEBASE_H
+#include <list>
+
#include <utils/RefBase.h>
#include <utils/String8.h>
#include <utils/String16.h>
@@ -28,13 +30,17 @@
#include "hardware/camera3.h"
#include "camera/CameraMetadata.h"
#include "camera/CaptureResult.h"
-#include "common/CameraModule.h"
#include "gui/IGraphicBufferProducer.h"
#include "device3/Camera3StreamInterface.h"
#include "binder/Status.h"
namespace android {
+class CameraProviderManager;
+
+// Mapping of output stream index to surface ids
+typedef std::unordered_map<int, std::vector<size_t> > SurfaceMap;
+
/**
* Base interface for version >= 2 camera device classes, which interface to
* camera HAL device versions >= 2.
@@ -46,9 +52,9 @@
/**
* The device's camera ID
*/
- virtual int getId() const = 0;
+ virtual const String8& getId() const = 0;
- virtual status_t initialize(CameraModule *module) = 0;
+ virtual status_t initialize(sp<CameraProviderManager> manager) = 0;
virtual status_t disconnect() = 0;
virtual status_t dump(int fd, const Vector<String16> &args) = 0;
@@ -70,6 +76,7 @@
* Output lastFrameNumber is the expected last frame number of the list of requests.
*/
virtual status_t captureList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) = 0;
/**
@@ -85,6 +92,7 @@
* Output lastFrameNumber is the last frame number of the previous streaming request.
*/
virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber = NULL) = 0;
/**
@@ -111,7 +119,20 @@
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
- uint32_t consumerUsage = 0) = 0;
+ bool isShared = false, uint32_t consumerUsage = 0) = 0;
+
+ /**
+ * Create an output stream of the requested size, format, rotation and
+ * dataspace with a number of consumers.
+ *
+ * For HAL_PIXEL_FORMAT_BLOB formats, the width and height should be the
+ * logical dimensions of the buffer, not the number of bytes.
+ */
+ virtual status_t createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+ bool isShared = false, uint32_t consumerUsage = 0) = 0;
/**
* Create an input stream of width, height, and format.
@@ -122,12 +143,6 @@
int32_t format, /*out*/ int32_t *id) = 0;
/**
- * Create an input reprocess stream that uses buffers from an existing
- * output stream.
- */
- virtual status_t createReprocessStreamFromStream(int outputId, int *id) = 0;
-
- /**
* Get information about a given stream.
*/
virtual status_t getStreamInfo(int id,
@@ -146,12 +161,6 @@
virtual status_t deleteStream(int id) = 0;
/**
- * Delete reprocess stream. Must not be called if there are requests in
- * flight which reference that stream.
- */
- virtual status_t deleteReprocessStream(int id) = 0;
-
- /**
* Take the currently-defined set of streams and configure the HAL to use
* them. This is a long-running operation (may be several hundered ms).
*
@@ -161,7 +170,7 @@
* - BAD_VALUE if the set of streams was invalid (e.g. fmts or sizes)
* - INVALID_OPERATION if the device was in the wrong state
*/
- virtual status_t configureStreams(bool isConstrainedHighSpeed = false) = 0;
+ virtual status_t configureStreams(int operatingMode = 0) = 0;
// get the buffer producer of the input stream
virtual status_t getInputBufferProducer(
@@ -204,6 +213,7 @@
virtual void notifyShutter(const CaptureResultExtras &resultExtras,
nsecs_t timestamp) = 0;
virtual void notifyPrepared(int streamId) = 0;
+ virtual void notifyRequestQueueEmpty() = 0;
// Required only for API1
virtual void notifyAutoFocus(uint8_t newState, int triggerId) = 0;
@@ -265,21 +275,6 @@
virtual status_t triggerPrecaptureMetering(uint32_t id) = 0;
/**
- * Abstract interface for clients that want to listen to reprocess buffer
- * release events
- */
- struct BufferReleasedListener : public virtual RefBase {
- virtual void onBufferReleased(buffer_handle_t *handle) = 0;
- };
-
- /**
- * Push a buffer to be reprocessed into a reprocessing stream, and
- * provide a listener to call once the buffer is returned by the HAL
- */
- virtual status_t pushReprocessBuffer(int reprocessStreamId,
- buffer_handle_t *buffer, wp<BufferReleasedListener> listener) = 0;
-
- /**
* Flush all pending and in-flight requests. Blocks until flush is
* complete.
* Output lastFrameNumber is the last frame number of the previous streaming request.
@@ -310,14 +305,10 @@
virtual status_t prepare(int maxCount, int streamId) = 0;
/**
- * Get the HAL device version.
- */
- virtual uint32_t getDeviceVersion() = 0;
-
- /**
* Set the deferred consumer surface and finish the rest of the stream configuration.
*/
- virtual status_t setConsumerSurface(int streamId, sp<Surface> consumer) = 0;
+ virtual status_t setConsumerSurfaces(int streamId,
+ const std::vector<sp<Surface>>& consumers) = 0;
};
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
deleted file mode 100644
index 073144c..0000000
--- a/services/camera/libcameraservice/common/CameraModule.cpp
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "CameraModule"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-
-#include <utils/Trace.h>
-
-#include "CameraModule.h"
-
-namespace android {
-
-void CameraModule::deriveCameraCharacteristicsKeys(
- uint32_t deviceVersion, CameraMetadata &chars) {
- ATRACE_CALL();
-
- Vector<int32_t> derivedCharKeys;
- Vector<int32_t> derivedRequestKeys;
- Vector<int32_t> derivedResultKeys;
- // Keys added in HAL3.3
- if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_3) {
- Vector<uint8_t> controlModes;
- uint8_t data = ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE;
- chars.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &data, /*count*/1);
- data = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE;
- chars.update(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, &data, /*count*/1);
- controlModes.push(ANDROID_CONTROL_MODE_AUTO);
- camera_metadata_entry entry = chars.find(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
- if (entry.count > 1 || entry.data.u8[0] != ANDROID_CONTROL_SCENE_MODE_DISABLED) {
- controlModes.push(ANDROID_CONTROL_MODE_USE_SCENE_MODE);
- }
-
- // Only advertise CONTROL_OFF mode if 3A manual controls are supported.
- bool isManualAeSupported = false;
- bool isManualAfSupported = false;
- bool isManualAwbSupported = false;
- entry = chars.find(ANDROID_CONTROL_AE_AVAILABLE_MODES);
- if (entry.count > 0) {
- for (size_t i = 0; i < entry.count; i++) {
- if (entry.data.u8[i] == ANDROID_CONTROL_AE_MODE_OFF) {
- isManualAeSupported = true;
- break;
- }
- }
- }
- entry = chars.find(ANDROID_CONTROL_AF_AVAILABLE_MODES);
- if (entry.count > 0) {
- for (size_t i = 0; i < entry.count; i++) {
- if (entry.data.u8[i] == ANDROID_CONTROL_AF_MODE_OFF) {
- isManualAfSupported = true;
- break;
- }
- }
- }
- entry = chars.find(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
- if (entry.count > 0) {
- for (size_t i = 0; i < entry.count; i++) {
- if (entry.data.u8[i] == ANDROID_CONTROL_AWB_MODE_OFF) {
- isManualAwbSupported = true;
- break;
- }
- }
- }
- if (isManualAeSupported && isManualAfSupported && isManualAwbSupported) {
- controlModes.push(ANDROID_CONTROL_MODE_OFF);
- }
-
- chars.update(ANDROID_CONTROL_AVAILABLE_MODES, controlModes);
-
- entry = chars.find(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS);
- // HAL3.2 devices passing existing CTS test should all support all LSC modes and LSC map
- bool lensShadingModeSupported = false;
- if (entry.count > 0) {
- for (size_t i = 0; i < entry.count; i++) {
- if (entry.data.i32[i] == ANDROID_SHADING_MODE) {
- lensShadingModeSupported = true;
- break;
- }
- }
- }
- Vector<uint8_t> lscModes;
- Vector<uint8_t> lscMapModes;
- lscModes.push(ANDROID_SHADING_MODE_FAST);
- lscModes.push(ANDROID_SHADING_MODE_HIGH_QUALITY);
- lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF);
- if (lensShadingModeSupported) {
- lscModes.push(ANDROID_SHADING_MODE_OFF);
- lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON);
- }
- chars.update(ANDROID_SHADING_AVAILABLE_MODES, lscModes);
- chars.update(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, lscMapModes);
-
- derivedCharKeys.push(ANDROID_CONTROL_AE_LOCK_AVAILABLE);
- derivedCharKeys.push(ANDROID_CONTROL_AWB_LOCK_AVAILABLE);
- derivedCharKeys.push(ANDROID_CONTROL_AVAILABLE_MODES);
- derivedCharKeys.push(ANDROID_SHADING_AVAILABLE_MODES);
- derivedCharKeys.push(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES);
-
- // Need update android.control.availableHighSpeedVideoConfigurations since HAL3.3
- // adds batch size to this array.
- entry = chars.find(ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS);
- if (entry.count > 0) {
- Vector<int32_t> highSpeedConfig;
- for (size_t i = 0; i < entry.count; i += 4) {
- highSpeedConfig.add(entry.data.i32[i]); // width
- highSpeedConfig.add(entry.data.i32[i + 1]); // height
- highSpeedConfig.add(entry.data.i32[i + 2]); // fps_min
- highSpeedConfig.add(entry.data.i32[i + 3]); // fps_max
- highSpeedConfig.add(1); // batchSize_max. default to 1 for HAL3.2
- }
- chars.update(ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS,
- highSpeedConfig);
- }
- }
-
- // Keys added in HAL3.4
- if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_4) {
- // Check if HAL supports RAW_OPAQUE output
- camera_metadata_entry entry = chars.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
- bool supportRawOpaque = false;
- bool supportAnyRaw = false;
- const int STREAM_CONFIGURATION_SIZE = 4;
- const int STREAM_FORMAT_OFFSET = 0;
- const int STREAM_WIDTH_OFFSET = 1;
- const int STREAM_HEIGHT_OFFSET = 2;
- const int STREAM_IS_INPUT_OFFSET = 3;
- Vector<int32_t> rawOpaqueSizes;
-
- for (size_t i=0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
- int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
- int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
- int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
- int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
- if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
- format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
- supportRawOpaque = true;
- rawOpaqueSizes.push(width);
- rawOpaqueSizes.push(height);
- // 2 bytes per pixel. This rough estimation is only used when
- // HAL does not fill in the opaque raw size
- rawOpaqueSizes.push(width * height *2);
- }
- if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
- (format == HAL_PIXEL_FORMAT_RAW16 ||
- format == HAL_PIXEL_FORMAT_RAW10 ||
- format == HAL_PIXEL_FORMAT_RAW12 ||
- format == HAL_PIXEL_FORMAT_RAW_OPAQUE)) {
- supportAnyRaw = true;
- }
- }
-
- if (supportRawOpaque) {
- entry = chars.find(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
- if (entry.count == 0) {
- // Fill in estimated value if HAL does not list it
- chars.update(ANDROID_SENSOR_OPAQUE_RAW_SIZE, rawOpaqueSizes);
- derivedCharKeys.push(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
- }
- }
-
- // Check if HAL supports any RAW output, if so, fill in postRawSensitivityBoost range
- if (supportAnyRaw) {
- int32_t defaultRange[2] = {100, 100};
- entry = chars.find(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE);
- if (entry.count == 0) {
- // Fill in default value (100, 100)
- chars.update(
- ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE,
- defaultRange, 2);
- derivedCharKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE);
- // Actual request/results will be derived by camera device.
- derivedRequestKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST);
- derivedResultKeys.push(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST);
- }
- }
- }
-
- // Always add a default for the pre-correction active array if the vendor chooses to omit this
- camera_metadata_entry entry = chars.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
- if (entry.count == 0) {
- Vector<int32_t> preCorrectionArray;
- entry = chars.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
- preCorrectionArray.appendArray(entry.data.i32, entry.count);
- chars.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, preCorrectionArray);
- derivedCharKeys.push(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
- }
-
- // Add those newly added keys to AVAILABLE_CHARACTERISTICS_KEYS
- // This has to be done at this end of this function.
- if (derivedCharKeys.size() > 0) {
- appendAvailableKeys(
- chars, ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, derivedCharKeys);
- }
- if (derivedRequestKeys.size() > 0) {
- appendAvailableKeys(
- chars, ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, derivedRequestKeys);
- }
- if (derivedResultKeys.size() > 0) {
- appendAvailableKeys(
- chars, ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, derivedResultKeys);
- }
- return;
-}
-
-void CameraModule::appendAvailableKeys(CameraMetadata &chars,
- int32_t keyTag, const Vector<int32_t>& appendKeys) {
- camera_metadata_entry entry = chars.find(keyTag);
- Vector<int32_t> availableKeys;
- availableKeys.setCapacity(entry.count + appendKeys.size());
- for (size_t i = 0; i < entry.count; i++) {
- availableKeys.push(entry.data.i32[i]);
- }
- for (size_t i = 0; i < appendKeys.size(); i++) {
- availableKeys.push(appendKeys[i]);
- }
- chars.update(keyTag, availableKeys);
-}
-
-CameraModule::CameraModule(camera_module_t *module) {
- if (module == NULL) {
- ALOGE("%s: camera hardware module must not be null", __FUNCTION__);
- assert(0);
- }
- mModule = module;
-}
-
-CameraModule::~CameraModule()
-{
- while (mCameraInfoMap.size() > 0) {
- camera_info cameraInfo = mCameraInfoMap.editValueAt(0);
- if (cameraInfo.static_camera_characteristics != NULL) {
- free_camera_metadata(
- const_cast<camera_metadata_t*>(cameraInfo.static_camera_characteristics));
- }
- mCameraInfoMap.removeItemsAt(0);
- }
-}
-
-int CameraModule::init() {
- ATRACE_CALL();
- int res = OK;
- if (getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4 &&
- mModule->init != NULL) {
- ATRACE_BEGIN("camera_module->init");
- res = mModule->init();
- ATRACE_END();
- }
- mCameraInfoMap.setCapacity(getNumberOfCameras());
- return res;
-}
-
-int CameraModule::getCameraInfo(int cameraId, struct camera_info *info) {
- ATRACE_CALL();
- Mutex::Autolock lock(mCameraInfoLock);
- if (cameraId < 0) {
- ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
- return -EINVAL;
- }
-
- // Only override static_camera_characteristics for API2 devices
- int apiVersion = mModule->common.module_api_version;
- if (apiVersion < CAMERA_MODULE_API_VERSION_2_0) {
- int ret;
- ATRACE_BEGIN("camera_module->get_camera_info");
- ret = mModule->get_camera_info(cameraId, info);
- // Fill in this so CameraService won't be confused by
- // possibly 0 device_version
- info->device_version = CAMERA_DEVICE_API_VERSION_1_0;
- ATRACE_END();
- return ret;
- }
-
- ssize_t index = mCameraInfoMap.indexOfKey(cameraId);
- if (index == NAME_NOT_FOUND) {
- // Get camera info from raw module and cache it
- camera_info rawInfo, cameraInfo;
- ATRACE_BEGIN("camera_module->get_camera_info");
- int ret = mModule->get_camera_info(cameraId, &rawInfo);
- ATRACE_END();
- if (ret != 0) {
- return ret;
- }
- int deviceVersion = rawInfo.device_version;
- if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_0) {
- // static_camera_characteristics is invalid
- *info = rawInfo;
- return ret;
- }
- CameraMetadata m;
- m = rawInfo.static_camera_characteristics;
- deriveCameraCharacteristicsKeys(rawInfo.device_version, m);
- cameraInfo = rawInfo;
- cameraInfo.static_camera_characteristics = m.release();
- index = mCameraInfoMap.add(cameraId, cameraInfo);
- }
-
- assert(index != NAME_NOT_FOUND);
- // return the cached camera info
- *info = mCameraInfoMap[index];
- return OK;
-}
-
-int CameraModule::open(const char* id, struct hw_device_t** device) {
- int res;
- ATRACE_BEGIN("camera_module->open");
- res = filterOpenErrorCode(mModule->common.methods->open(&mModule->common, id, device));
- ATRACE_END();
- return res;
-}
-
-int CameraModule::openLegacy(
- const char* id, uint32_t halVersion, struct hw_device_t** device) {
- int res;
- ATRACE_BEGIN("camera_module->open_legacy");
- res = mModule->open_legacy(&mModule->common, id, halVersion, device);
- ATRACE_END();
- return res;
-}
-
-int CameraModule::getNumberOfCameras() {
- int numCameras;
- ATRACE_BEGIN("camera_module->get_number_of_cameras");
- numCameras = mModule->get_number_of_cameras();
- ATRACE_END();
- return numCameras;
-}
-
-int CameraModule::setCallbacks(const camera_module_callbacks_t *callbacks) {
- int res;
- ATRACE_BEGIN("camera_module->set_callbacks");
- res = mModule->set_callbacks(callbacks);
- ATRACE_END();
- return res;
-}
-
-bool CameraModule::isVendorTagDefined() {
- return mModule->get_vendor_tag_ops != NULL;
-}
-
-void CameraModule::getVendorTagOps(vendor_tag_ops_t* ops) {
- if (mModule->get_vendor_tag_ops) {
- ATRACE_BEGIN("camera_module->get_vendor_tag_ops");
- mModule->get_vendor_tag_ops(ops);
- ATRACE_END();
- }
-}
-
-int CameraModule::setTorchMode(const char* camera_id, bool enable) {
- int res;
- ATRACE_BEGIN("camera_module->set_torch_mode");
- res = mModule->set_torch_mode(camera_id, enable);
- ATRACE_END();
- return res;
-}
-
-status_t CameraModule::filterOpenErrorCode(status_t err) {
- switch(err) {
- case NO_ERROR:
- case -EBUSY:
- case -EINVAL:
- case -EUSERS:
- return err;
- default:
- break;
- }
- return -ENODEV;
-}
-
-uint16_t CameraModule::getModuleApiVersion() {
- return mModule->common.module_api_version;
-}
-
-const char* CameraModule::getModuleName() {
- return mModule->common.name;
-}
-
-uint16_t CameraModule::getHalApiVersion() {
- return mModule->common.hal_api_version;
-}
-
-const char* CameraModule::getModuleAuthor() {
- return mModule->common.author;
-}
-
-void* CameraModule::getDso() {
- return mModule->common.dso;
-}
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraModule.h b/services/camera/libcameraservice/common/CameraModule.h
deleted file mode 100644
index d131a26..0000000
--- a/services/camera/libcameraservice/common/CameraModule.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERAMODULE_H
-#define ANDROID_SERVERS_CAMERA_CAMERAMODULE_H
-
-#include <hardware/camera.h>
-#include <camera/CameraMetadata.h>
-#include <utils/Mutex.h>
-#include <utils/KeyedVector.h>
-
-namespace android {
-/**
- * A wrapper class for HAL camera module.
- *
- * This class wraps camera_module_t returned from HAL to provide a wrapped
- * get_camera_info implementation which CameraService generates some
- * camera characteristics keys defined in newer HAL version on an older HAL.
- */
-class CameraModule {
-public:
- explicit CameraModule(camera_module_t *module);
- virtual ~CameraModule();
-
- // Must be called after construction
- // Returns OK on success, NO_INIT on failure
- int init();
-
- int getCameraInfo(int cameraId, struct camera_info *info);
- int getNumberOfCameras(void);
- int open(const char* id, struct hw_device_t** device);
- int openLegacy(const char* id, uint32_t halVersion, struct hw_device_t** device);
- int setCallbacks(const camera_module_callbacks_t *callbacks);
- bool isVendorTagDefined();
- void getVendorTagOps(vendor_tag_ops_t* ops);
- int setTorchMode(const char* camera_id, bool enable);
- uint16_t getModuleApiVersion();
- const char* getModuleName();
- uint16_t getHalApiVersion();
- const char* getModuleAuthor();
- // Only used by CameraModuleFixture native test. Do NOT use elsewhere.
- void *getDso();
-
-private:
- // Derive camera characteristics keys defined after HAL device version
- static void deriveCameraCharacteristicsKeys(uint32_t deviceVersion, CameraMetadata &chars);
- // Helper function to append available[request|result|chars]Keys
- static void appendAvailableKeys(CameraMetadata &chars,
- int32_t keyTag, const Vector<int32_t>& appendKeys);
- status_t filterOpenErrorCode(status_t err);
- camera_module_t *mModule;
- KeyedVector<int, camera_info> mCameraInfoMap;
- Mutex mCameraInfoLock;
-};
-
-} // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
new file mode 100644
index 0000000..a02090b
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -0,0 +1,1347 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraProviderManager"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include "CameraProviderManager.h"
+
+#include <chrono>
+#include <inttypes.h>
+#include <hidl/ServiceManagement.h>
+#include <functional>
+#include <camera_metadata_hidden.h>
+
+namespace android {
+
+using namespace ::android::hardware::camera;
+using namespace ::android::hardware::camera::common::V1_0;
+
+namespace {
+// Hardcoded name for the passthrough HAL implementation, since it can't be discovered via the
+// service manager
+const std::string kLegacyProviderName("legacy/0");
+
+// Slash-separated list of provider types to consider for use via the old camera API
+const std::string kStandardProviderTypes("internal/legacy");
+
+} // anonymous namespace
+
+CameraProviderManager::HardwareServiceInteractionProxy
+CameraProviderManager::sHardwareServiceInteractionProxy{};
+
+CameraProviderManager::~CameraProviderManager() {
+}
+
+status_t CameraProviderManager::initialize(wp<CameraProviderManager::StatusListener> listener,
+ ServiceInteractionProxy* proxy) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ if (proxy == nullptr) {
+ ALOGE("%s: No valid service interaction proxy provided", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ mListener = listener;
+ mServiceProxy = proxy;
+
+ // Registering will trigger notifications for all already-known providers
+ bool success = mServiceProxy->registerForNotifications(
+ /* instance name, empty means no filter */ "",
+ this);
+ if (!success) {
+ ALOGE("%s: Unable to register with hardware service manager for notifications "
+ "about camera providers", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ // See if there's a passthrough HAL, but let's not complain if there's not
+ addProviderLocked(kLegacyProviderName, /*expected*/ false);
+
+ return OK;
+}
+
+int CameraProviderManager::getCameraCount() const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ int count = 0;
+ for (auto& provider : mProviders) {
+ count += provider->mUniqueDeviceCount;
+ }
+ return count;
+}
+
+int CameraProviderManager::getAPI1CompatibleCameraCount() const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ int count = 0;
+ for (auto& provider : mProviders) {
+ if (kStandardProviderTypes.find(provider->getType()) != std::string::npos) {
+ count += provider->mUniqueAPI1CompatibleCameraIds.size();
+ }
+ }
+ return count;
+}
+
+std::vector<std::string> CameraProviderManager::getCameraDeviceIds() const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ std::vector<std::string> deviceIds;
+ for (auto& provider : mProviders) {
+ for (auto& id : provider->mUniqueCameraIds) {
+ deviceIds.push_back(id);
+ }
+ }
+ return deviceIds;
+}
+
+std::vector<std::string> CameraProviderManager::getAPI1CompatibleCameraDeviceIds() const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ std::vector<std::string> deviceIds;
+ for (auto& provider : mProviders) {
+ if (kStandardProviderTypes.find(provider->getType()) != std::string::npos) {
+ for (auto& id : provider->mUniqueAPI1CompatibleCameraIds) {
+ deviceIds.push_back(id);
+ }
+ }
+ }
+ return deviceIds;
+}
+
+bool CameraProviderManager::isValidDevice(const std::string &id, uint16_t majorVersion) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ return isValidDeviceLocked(id, majorVersion);
+}
+
+bool CameraProviderManager::isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const {
+ for (auto& provider : mProviders) {
+ for (auto& deviceInfo : provider->mDevices) {
+ if (deviceInfo->mId == id && deviceInfo->mVersion.get_major() == majorVersion) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool CameraProviderManager::hasFlashUnit(const std::string &id) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) return false;
+
+ return deviceInfo->hasFlashUnit();
+}
+
+status_t CameraProviderManager::getResourceCost(const std::string &id,
+ CameraResourceCost* cost) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ *cost = deviceInfo->mResourceCost;
+ return OK;
+}
+
+status_t CameraProviderManager::getCameraInfo(const std::string &id,
+ hardware::CameraInfo* info) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ return deviceInfo->getCameraInfo(info);
+}
+
+status_t CameraProviderManager::getCameraCharacteristics(const std::string &id,
+ CameraMetadata* characteristics) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ return deviceInfo->getCameraCharacteristics(characteristics);
+}
+
+status_t CameraProviderManager::getHighestSupportedVersion(const std::string &id,
+ hardware::hidl_version *v) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ hardware::hidl_version maxVersion{0,0};
+ bool found = false;
+ for (auto& provider : mProviders) {
+ for (auto& deviceInfo : provider->mDevices) {
+ if (deviceInfo->mId == id) {
+ if (deviceInfo->mVersion > maxVersion) {
+ maxVersion = deviceInfo->mVersion;
+ found = true;
+ }
+ }
+ }
+ }
+ if (!found) {
+ return NAME_NOT_FOUND;
+ }
+ *v = maxVersion;
+ return OK;
+}
+
+bool CameraProviderManager::supportSetTorchMode(const std::string &id) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ bool support = false;
+ for (auto& provider : mProviders) {
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo != nullptr) {
+ provider->mInterface->isSetTorchModeSupported(
+ [&support](auto status, bool supported) {
+ if (status == Status::OK) {
+ support = supported;
+ }
+ });
+ }
+ }
+ return support;
+}
+
+status_t CameraProviderManager::setTorchMode(const std::string &id, bool enabled) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ return deviceInfo->setTorchMode(enabled);
+}
+
+status_t CameraProviderManager::setUpVendorTags() {
+ sp<VendorTagDescriptorCache> tagCache = new VendorTagDescriptorCache();
+
+ for (auto& provider : mProviders) {
+ hardware::hidl_vec<VendorTagSection> vts;
+ Status status;
+ hardware::Return<void> ret;
+ ret = provider->mInterface->getVendorTags(
+ [&](auto s, const auto& vendorTagSecs) {
+ status = s;
+ if (s == Status::OK) {
+ vts = vendorTagSecs;
+ }
+ });
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error getting vendor tags from provider '%s': %s",
+ __FUNCTION__, provider->mProviderName.c_str(), ret.description().c_str());
+ return DEAD_OBJECT;
+ }
+ if (status != Status::OK) {
+ return mapToStatusT(status);
+ }
+
+ // Read all vendor tag definitions into a descriptor
+ sp<VendorTagDescriptor> desc;
+ status_t res;
+ if ((res = HidlVendorTagDescriptor::createDescriptorFromHidl(vts, /*out*/desc))
+ != OK) {
+ ALOGE("%s: Could not generate descriptor from vendor tag operations,"
+ "received error %s (%d). Camera clients will not be able to use"
+ "vendor tags", __FUNCTION__, strerror(res), res);
+ return res;
+ }
+
+ tagCache->addVendorDescriptor(provider->mProviderTagid, desc);
+ }
+
+ VendorTagDescriptorCache::setAsGlobalVendorTagCache(tagCache);
+
+ return OK;
+}
+
+status_t CameraProviderManager::openSession(const std::string &id,
+ const sp<hardware::camera::device::V3_2::ICameraDeviceCallback>& callback,
+ /*out*/
+ sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session) {
+
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id,
+ /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ auto *deviceInfo3 = static_cast<ProviderInfo::DeviceInfo3*>(deviceInfo);
+
+ Status status;
+ hardware::Return<void> ret;
+ ret = deviceInfo3->mInterface->open(callback, [&status, &session]
+ (Status s, const sp<device::V3_2::ICameraDeviceSession>& cameraSession) {
+ status = s;
+ if (status == Status::OK) {
+ *session = cameraSession;
+ }
+ });
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error opening a session for camera device %s: %s",
+ __FUNCTION__, id.c_str(), ret.description().c_str());
+ return DEAD_OBJECT;
+ }
+ return mapToStatusT(status);
+}
+
+status_t CameraProviderManager::openSession(const std::string &id,
+ const sp<hardware::camera::device::V1_0::ICameraDeviceCallback>& callback,
+ /*out*/
+ sp<hardware::camera::device::V1_0::ICameraDevice> *session) {
+
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id,
+ /*minVersion*/ {1,0}, /*maxVersion*/ {2,0});
+ if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+ auto *deviceInfo1 = static_cast<ProviderInfo::DeviceInfo1*>(deviceInfo);
+
+ hardware::Return<Status> status = deviceInfo1->mInterface->open(callback);
+ if (!status.isOk()) {
+ ALOGE("%s: Transaction error opening a session for camera device %s: %s",
+ __FUNCTION__, id.c_str(), status.description().c_str());
+ return DEAD_OBJECT;
+ }
+ if (status == Status::OK) {
+ *session = deviceInfo1->mInterface;
+ }
+ return mapToStatusT(status);
+}
+
+
+hardware::Return<void> CameraProviderManager::onRegistration(
+ const hardware::hidl_string& /*fqName*/,
+ const hardware::hidl_string& name,
+ bool /*preexisting*/) {
+ {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ addProviderLocked(name);
+ }
+
+ sp<StatusListener> listener = getStatusListener();
+ if (nullptr != listener.get()) {
+ listener->onNewProviderRegistered();
+ }
+
+ return hardware::Return<void>();
+}
+
+status_t CameraProviderManager::dump(int fd, const Vector<String16>& args) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ for (auto& provider : mProviders) {
+ provider->dump(fd, args);
+ }
+ return OK;
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo* CameraProviderManager::findDeviceInfoLocked(
+ const std::string& id,
+ hardware::hidl_version minVersion, hardware::hidl_version maxVersion) const {
+ for (auto& provider : mProviders) {
+ for (auto& deviceInfo : provider->mDevices) {
+ if (deviceInfo->mId == id &&
+ minVersion <= deviceInfo->mVersion && maxVersion >= deviceInfo->mVersion) {
+ return deviceInfo.get();
+ }
+ }
+ }
+ return nullptr;
+}
+
+metadata_vendor_id_t CameraProviderManager::getProviderTagIdLocked(
+ const std::string& id, hardware::hidl_version minVersion,
+ hardware::hidl_version maxVersion) const {
+ metadata_vendor_id_t ret = CAMERA_METADATA_INVALID_VENDOR_ID;
+
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ for (auto& provider : mProviders) {
+ for (auto& deviceInfo : provider->mDevices) {
+ if (deviceInfo->mId == id &&
+ minVersion <= deviceInfo->mVersion &&
+ maxVersion >= deviceInfo->mVersion) {
+ return provider->mProviderTagid;
+ }
+ }
+ }
+
+ return ret;
+}
+
+status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
+ for (const auto& providerInfo : mProviders) {
+ if (providerInfo->mProviderName == newProvider) {
+ ALOGW("%s: Camera provider HAL with name '%s' already registered", __FUNCTION__,
+ newProvider.c_str());
+ return ALREADY_EXISTS;
+ }
+ }
+
+ sp<provider::V2_4::ICameraProvider> interface;
+ interface = mServiceProxy->getService(newProvider);
+
+ if (interface == nullptr) {
+ if (expected) {
+ ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+ newProvider.c_str());
+ return BAD_VALUE;
+ } else {
+ return OK;
+ }
+ }
+
+ sp<ProviderInfo> providerInfo =
+ new ProviderInfo(newProvider, interface, this);
+ status_t res = providerInfo->initialize();
+ if (res != OK) {
+ return res;
+ }
+
+ mProviders.push_back(providerInfo);
+
+ return OK;
+}
+
+status_t CameraProviderManager::removeProvider(const std::string& provider) {
+ std::unique_lock<std::mutex> lock(mInterfaceMutex);
+ std::vector<String8> removedDeviceIds;
+ status_t res = NAME_NOT_FOUND;
+ for (auto it = mProviders.begin(); it != mProviders.end(); it++) {
+ if ((*it)->mProviderName == provider) {
+ removedDeviceIds.reserve((*it)->mDevices.size());
+ for (auto& deviceInfo : (*it)->mDevices) {
+ removedDeviceIds.push_back(String8(deviceInfo->mId.c_str()));
+ }
+ mProviders.erase(it);
+ res = OK;
+ break;
+ }
+ }
+ if (res != OK) {
+ ALOGW("%s: Camera provider HAL with name '%s' is not registered", __FUNCTION__,
+ provider.c_str());
+ } else {
+ // Inform camera service of loss of presence for all the devices from this provider,
+ // without lock held for reentrancy
+ sp<StatusListener> listener = getStatusListener();
+ if (listener != nullptr) {
+ lock.unlock();
+ for (auto& id : removedDeviceIds) {
+ listener->onDeviceStatusChanged(id, CameraDeviceStatus::NOT_PRESENT);
+ }
+ }
+ }
+ return res;
+}
+
+sp<CameraProviderManager::StatusListener> CameraProviderManager::getStatusListener() const {
+ return mListener.promote();
+}
+
+/**** Methods for ProviderInfo ****/
+
+
+CameraProviderManager::ProviderInfo::ProviderInfo(
+ const std::string &providerName,
+ sp<provider::V2_4::ICameraProvider>& interface,
+ CameraProviderManager *manager) :
+ mProviderName(providerName),
+ mInterface(interface),
+ mProviderTagid(generateVendorTagId(providerName)),
+ mUniqueDeviceCount(0),
+ mManager(manager) {
+ (void) mManager;
+}
+
+status_t CameraProviderManager::ProviderInfo::initialize() {
+ status_t res = parseProviderName(mProviderName, &mType, &mId);
+ if (res != OK) {
+ ALOGE("%s: Invalid provider name, ignoring", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ ALOGI("Connecting to new camera provider: %s, isRemote? %d",
+ mProviderName.c_str(), mInterface->isRemote());
+ hardware::Return<Status> status = mInterface->setCallback(this);
+ if (!status.isOk()) {
+ ALOGE("%s: Transaction error setting up callbacks with camera provider '%s': %s",
+ __FUNCTION__, mProviderName.c_str(), status.description().c_str());
+ return DEAD_OBJECT;
+ }
+ if (status != Status::OK) {
+ ALOGE("%s: Unable to register callbacks with camera provider '%s'",
+ __FUNCTION__, mProviderName.c_str());
+ return mapToStatusT(status);
+ }
+
+ hardware::Return<bool> linked = mInterface->linkToDeath(this, /*cookie*/ mId);
+ if (!linked.isOk()) {
+ ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
+ __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
+ return DEAD_OBJECT;
+ } else if (!linked) {
+ ALOGW("%s: Unable to link to provider '%s' death notifications",
+ __FUNCTION__, mProviderName.c_str());
+ }
+
+ // Get initial list of camera devices, if any
+ std::vector<std::string> devices;
+ hardware::Return<void> ret = mInterface->getCameraIdList([&status, &devices](
+ Status idStatus,
+ const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames) {
+ status = idStatus;
+ if (status == Status::OK) {
+ for (size_t i = 0; i < cameraDeviceNames.size(); i++) {
+ devices.push_back(cameraDeviceNames[i]);
+ }
+ } });
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error in getting camera ID list from provider '%s': %s",
+ __FUNCTION__, mProviderName.c_str(), linked.description().c_str());
+ return DEAD_OBJECT;
+ }
+ if (status != Status::OK) {
+ ALOGE("%s: Unable to query for camera devices from provider '%s'",
+ __FUNCTION__, mProviderName.c_str());
+ return mapToStatusT(status);
+ }
+
+ sp<StatusListener> listener = mManager->getStatusListener();
+ for (auto& device : devices) {
+ std::string id;
+ status_t res = addDevice(device,
+ hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT, &id);
+ if (res != OK) {
+ ALOGE("%s: Unable to enumerate camera device '%s': %s (%d)",
+ __FUNCTION__, device.c_str(), strerror(-res), res);
+ continue;
+ }
+ }
+
+ for (auto& device : mDevices) {
+ mUniqueCameraIds.insert(device->mId);
+ if (device->isAPI1Compatible()) {
+ mUniqueAPI1CompatibleCameraIds.insert(device->mId);
+ }
+ }
+ mUniqueDeviceCount = mUniqueCameraIds.size();
+
+ ALOGI("Camera provider %s ready with %zu camera devices",
+ mProviderName.c_str(), mDevices.size());
+
+ return OK;
+}
+
+const std::string& CameraProviderManager::ProviderInfo::getType() const {
+ return mType;
+}
+
+status_t CameraProviderManager::ProviderInfo::addDevice(const std::string& name,
+ CameraDeviceStatus initialStatus, /*out*/ std::string* parsedId) {
+
+ ALOGI("Enumerating new camera device: %s", name.c_str());
+
+ uint16_t major, minor;
+ std::string type, id;
+
+ status_t res = parseDeviceName(name, &major, &minor, &type, &id);
+ if (res != OK) {
+ return res;
+ }
+ if (type != mType) {
+ ALOGE("%s: Device type %s does not match provider type %s", __FUNCTION__,
+ type.c_str(), mType.c_str());
+ return BAD_VALUE;
+ }
+ if (mManager->isValidDeviceLocked(id, major)) {
+ ALOGE("%s: Device %s: ID %s is already in use for device major version %d", __FUNCTION__,
+ name.c_str(), id.c_str(), major);
+ return BAD_VALUE;
+ }
+
+ std::unique_ptr<DeviceInfo> deviceInfo;
+ switch (major) {
+ case 1:
+ deviceInfo = initializeDeviceInfo<DeviceInfo1>(name, mProviderTagid,
+ id, minor);
+ break;
+ case 3:
+ deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, mProviderTagid,
+ id, minor);
+ break;
+ default:
+ ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
+ name.c_str(), major);
+ return BAD_VALUE;
+ }
+ if (deviceInfo == nullptr) return BAD_VALUE;
+ deviceInfo->mStatus = initialStatus;
+
+ mDevices.push_back(std::move(deviceInfo));
+
+ if (parsedId != nullptr) {
+ *parsedId = id;
+ }
+ return OK;
+}
+
+status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
+ dprintf(fd, "== Camera Provider HAL %s (v2.4, %s) static info: %zu devices: ==\n",
+ mProviderName.c_str(), mInterface->isRemote() ? "remote" : "passthrough",
+ mDevices.size());
+
+ for (auto& device : mDevices) {
+ dprintf(fd, "== Camera HAL device %s (v%d.%d) static information: ==\n", device->mName.c_str(),
+ device->mVersion.get_major(), device->mVersion.get_minor());
+ dprintf(fd, " Resource cost: %d\n", device->mResourceCost.resourceCost);
+ if (device->mResourceCost.conflictingDevices.size() == 0) {
+ dprintf(fd, " Conflicting devices: None\n");
+ } else {
+ dprintf(fd, " Conflicting devices:\n");
+ for (size_t i = 0; i < device->mResourceCost.conflictingDevices.size(); i++) {
+ dprintf(fd, " %s\n",
+ device->mResourceCost.conflictingDevices[i].c_str());
+ }
+ }
+ dprintf(fd, " API1 info:\n");
+ dprintf(fd, " Has a flash unit: %s\n",
+ device->hasFlashUnit() ? "true" : "false");
+ hardware::CameraInfo info;
+ status_t res = device->getCameraInfo(&info);
+ if (res != OK) {
+ dprintf(fd, " <Error reading camera info: %s (%d)>\n",
+ strerror(-res), res);
+ } else {
+ dprintf(fd, " Facing: %s\n",
+ info.facing == hardware::CAMERA_FACING_BACK ? "Back" : "Front");
+ dprintf(fd, " Orientation: %d\n", info.orientation);
+ }
+ CameraMetadata info2;
+ res = device->getCameraCharacteristics(&info2);
+ if (res == INVALID_OPERATION) {
+ dprintf(fd, " API2 not directly supported\n");
+ } else if (res != OK) {
+ dprintf(fd, " <Error reading camera characteristics: %s (%d)>\n",
+ strerror(-res), res);
+ } else {
+ dprintf(fd, " API2 camera characteristics:\n");
+ info2.dump(fd, /*verbosity*/ 2, /*indentation*/ 4);
+ }
+ }
+ return OK;
+}
+
+hardware::Return<void> CameraProviderManager::ProviderInfo::cameraDeviceStatusChange(
+ const hardware::hidl_string& cameraDeviceName,
+ CameraDeviceStatus newStatus) {
+ sp<StatusListener> listener;
+ std::string id;
+ {
+ std::lock_guard<std::mutex> lock(mLock);
+ bool known = false;
+ for (auto& deviceInfo : mDevices) {
+ if (deviceInfo->mName == cameraDeviceName) {
+ ALOGI("Camera device %s status is now %s, was %s", cameraDeviceName.c_str(),
+ deviceStatusToString(newStatus), deviceStatusToString(deviceInfo->mStatus));
+ deviceInfo->mStatus = newStatus;
+ // TODO: Handle device removal (NOT_PRESENT)
+ id = deviceInfo->mId;
+ known = true;
+ break;
+ }
+ }
+ // Previously unseen device; status must not be NOT_PRESENT
+ if (!known) {
+ if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
+ ALOGW("Camera provider %s says an unknown camera device %s is not present. Curious.",
+ mProviderName.c_str(), cameraDeviceName.c_str());
+ return hardware::Void();
+ }
+ addDevice(cameraDeviceName, newStatus, &id);
+ }
+ listener = mManager->getStatusListener();
+ }
+ // Call without lock held to allow reentrancy into provider manager
+ if (listener != nullptr) {
+ listener->onDeviceStatusChanged(String8(id.c_str()), newStatus);
+ }
+ return hardware::Void();
+}
+
+hardware::Return<void> CameraProviderManager::ProviderInfo::torchModeStatusChange(
+ const hardware::hidl_string& cameraDeviceName,
+ TorchModeStatus newStatus) {
+ sp<StatusListener> listener;
+ std::string id;
+ {
+ std::lock_guard<std::mutex> lock(mManager->mStatusListenerMutex);
+ bool known = false;
+ for (auto& deviceInfo : mDevices) {
+ if (deviceInfo->mName == cameraDeviceName) {
+ ALOGI("Camera device %s torch status is now %s", cameraDeviceName.c_str(),
+ torchStatusToString(newStatus));
+ id = deviceInfo->mId;
+ known = true;
+ break;
+ }
+ }
+ if (!known) {
+ ALOGW("Camera provider %s says an unknown camera %s now has torch status %d. Curious.",
+ mProviderName.c_str(), cameraDeviceName.c_str(), newStatus);
+ return hardware::Void();
+ }
+ listener = mManager->getStatusListener();
+ }
+ // Call without lock held to allow reentrancy into provider manager
+ if (listener != nullptr) {
+ listener->onTorchStatusChanged(String8(id.c_str()), newStatus);
+ }
+ return hardware::Void();
+}
+
+void CameraProviderManager::ProviderInfo::serviceDied(uint64_t cookie,
+ const wp<hidl::base::V1_0::IBase>& who) {
+ (void) who;
+ ALOGI("Camera provider '%s' has died; removing it", mProviderName.c_str());
+ if (cookie != mId) {
+ ALOGW("%s: Unexpected serviceDied cookie %" PRIu64 ", expected %" PRIu32,
+ __FUNCTION__, cookie, mId);
+ }
+ mManager->removeProvider(mProviderName);
+}
+
+template<class DeviceInfoT>
+std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
+ CameraProviderManager::ProviderInfo::initializeDeviceInfo(
+ const std::string &name, const metadata_vendor_id_t tagId,
+ const std::string &id, uint16_t minorVersion) const {
+ Status status;
+
+ auto cameraInterface =
+ getDeviceInterface<typename DeviceInfoT::InterfaceT>(name);
+ if (cameraInterface == nullptr) return nullptr;
+
+ CameraResourceCost resourceCost;
+ cameraInterface->getResourceCost([&status, &resourceCost](
+ Status s, CameraResourceCost cost) {
+ status = s;
+ resourceCost = cost;
+ });
+ if (status != Status::OK) {
+ ALOGE("%s: Unable to obtain resource costs for camera device %s: %s", __FUNCTION__,
+ name.c_str(), statusToString(status));
+ return nullptr;
+ }
+ return std::unique_ptr<DeviceInfo>(
+ new DeviceInfoT(name, tagId, id, minorVersion, resourceCost,
+ cameraInterface));
+}
+
+template<class InterfaceT>
+sp<InterfaceT>
+CameraProviderManager::ProviderInfo::getDeviceInterface(const std::string &name) const {
+ ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
+ name.c_str(), InterfaceT::version.get_major());
+ return nullptr;
+}
+
+template<>
+sp<device::V1_0::ICameraDevice>
+CameraProviderManager::ProviderInfo::getDeviceInterface
+ <device::V1_0::ICameraDevice>(const std::string &name) const {
+ Status status;
+ sp<device::V1_0::ICameraDevice> cameraInterface;
+ hardware::Return<void> ret;
+ ret = mInterface->getCameraDeviceInterface_V1_x(name, [&status, &cameraInterface](
+ Status s, sp<device::V1_0::ICameraDevice> interface) {
+ status = s;
+ cameraInterface = interface;
+ });
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error trying to obtain interface for camera device %s: %s",
+ __FUNCTION__, name.c_str(), ret.description().c_str());
+ return nullptr;
+ }
+ if (status != Status::OK) {
+ ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
+ name.c_str(), statusToString(status));
+ return nullptr;
+ }
+ return cameraInterface;
+}
+
+template<>
+sp<device::V3_2::ICameraDevice>
+CameraProviderManager::ProviderInfo::getDeviceInterface
+ <device::V3_2::ICameraDevice>(const std::string &name) const {
+ Status status;
+ sp<device::V3_2::ICameraDevice> cameraInterface;
+ hardware::Return<void> ret;
+ ret = mInterface->getCameraDeviceInterface_V3_x(name, [&status, &cameraInterface](
+ Status s, sp<device::V3_2::ICameraDevice> interface) {
+ status = s;
+ cameraInterface = interface;
+ });
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error trying to obtain interface for camera device %s: %s",
+ __FUNCTION__, name.c_str(), ret.description().c_str());
+ return nullptr;
+ }
+ if (status != Status::OK) {
+ ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
+ name.c_str(), statusToString(status));
+ return nullptr;
+ }
+ return cameraInterface;
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo::~DeviceInfo() {}
+
+template<class InterfaceT>
+status_t CameraProviderManager::ProviderInfo::DeviceInfo::setTorchMode(InterfaceT& interface,
+ bool enabled) {
+ Status s = interface->setTorchMode(enabled ? TorchMode::ON : TorchMode::OFF);
+ return mapToStatusT(s);
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo1::DeviceInfo1(const std::string& name,
+ const metadata_vendor_id_t tagId, const std::string &id,
+ uint16_t minorVersion,
+ const CameraResourceCost& resourceCost,
+ sp<InterfaceT> interface) :
+ DeviceInfo(name, tagId, id, hardware::hidl_version{1, minorVersion},
+ resourceCost),
+ mInterface(interface) {
+ // Get default parameters and initialize flash unit availability
+ // Requires powering on the camera device
+ hardware::Return<Status> status = mInterface->open(nullptr);
+ if (!status.isOk()) {
+ ALOGE("%s: Transaction error opening camera device %s to check for a flash unit: %s",
+ __FUNCTION__, mId.c_str(), status.description().c_str());
+ return;
+ }
+ if (status != Status::OK) {
+ ALOGE("%s: Unable to open camera device %s to check for a flash unit: %s", __FUNCTION__,
+ mId.c_str(), CameraProviderManager::statusToString(status));
+ return;
+ }
+ hardware::Return<void> ret;
+ ret = mInterface->getParameters([this](const hardware::hidl_string& parms) {
+ mDefaultParameters.unflatten(String8(parms.c_str()));
+ });
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error reading camera device %s params to check for a flash unit: %s",
+ __FUNCTION__, mId.c_str(), status.description().c_str());
+ return;
+ }
+ const char *flashMode =
+ mDefaultParameters.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES);
+ if (flashMode && strstr(flashMode, CameraParameters::FLASH_MODE_TORCH)) {
+ mHasFlashUnit = true;
+ }
+
+ ret = mInterface->close();
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error closing camera device %s after check for a flash unit: %s",
+ __FUNCTION__, mId.c_str(), status.description().c_str());
+ }
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo1::~DeviceInfo1() {}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo1::setTorchMode(bool enabled) {
+ return DeviceInfo::setTorchMode(mInterface, enabled);
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo1::getCameraInfo(
+ hardware::CameraInfo *info) const {
+ if (info == nullptr) return BAD_VALUE;
+
+ Status status;
+ device::V1_0::CameraInfo cInfo;
+ hardware::Return<void> ret;
+ ret = mInterface->getCameraInfo([&status, &cInfo](Status s, device::V1_0::CameraInfo camInfo) {
+ status = s;
+ cInfo = camInfo;
+ });
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error reading camera info from device %s: %s",
+ __FUNCTION__, mId.c_str(), ret.description().c_str());
+ return DEAD_OBJECT;
+ }
+ if (status != Status::OK) {
+ return mapToStatusT(status);
+ }
+
+ switch(cInfo.facing) {
+ case device::V1_0::CameraFacing::BACK:
+ info->facing = hardware::CAMERA_FACING_BACK;
+ break;
+ case device::V1_0::CameraFacing::EXTERNAL:
+ // Map external to front for legacy API
+ case device::V1_0::CameraFacing::FRONT:
+ info->facing = hardware::CAMERA_FACING_FRONT;
+ break;
+ default:
+ ALOGW("%s: Device %s: Unknown camera facing: %d",
+ __FUNCTION__, mId.c_str(), cInfo.facing);
+ info->facing = hardware::CAMERA_FACING_BACK;
+ }
+ info->orientation = cInfo.orientation;
+
+ return OK;
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo3::DeviceInfo3(const std::string& name,
+ const metadata_vendor_id_t tagId, const std::string &id,
+ uint16_t minorVersion,
+ const CameraResourceCost& resourceCost,
+ sp<InterfaceT> interface) :
+ DeviceInfo(name, tagId, id, hardware::hidl_version{3, minorVersion},
+ resourceCost),
+ mInterface(interface) {
+ // Get camera characteristics and initialize flash unit availability
+ Status status;
+ hardware::Return<void> ret;
+ ret = mInterface->getCameraCharacteristics([&status, this](Status s,
+ device::V3_2::CameraMetadata metadata) {
+ status = s;
+ if (s == Status::OK) {
+ camera_metadata_t *buffer =
+ reinterpret_cast<camera_metadata_t*>(metadata.data());
+ size_t expectedSize = metadata.size();
+ int res = validate_camera_metadata_structure(buffer, &expectedSize);
+ if (res == OK || res == CAMERA_METADATA_VALIDATION_SHIFTED) {
+ set_camera_metadata_vendor_id(buffer, mProviderTagid);
+ mCameraCharacteristics = buffer;
+ } else {
+ ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+ status = Status::INTERNAL_ERROR;
+ }
+ }
+ });
+ if (!ret.isOk()) {
+ ALOGE("%s: Transaction error getting camera characteristics for device %s"
+ " to check for a flash unit: %s", __FUNCTION__, mId.c_str(),
+ ret.description().c_str());
+ return;
+ }
+ if (status != Status::OK) {
+ ALOGE("%s: Unable to get camera characteristics for device %s: %s (%d)",
+ __FUNCTION__, mId.c_str(), CameraProviderManager::statusToString(status), status);
+ return;
+ }
+ camera_metadata_entry flashAvailable =
+ mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
+ if (flashAvailable.count == 1 &&
+ flashAvailable.data.u8[0] == ANDROID_FLASH_INFO_AVAILABLE_TRUE) {
+ mHasFlashUnit = true;
+ } else {
+ mHasFlashUnit = false;
+ }
+}
+
+CameraProviderManager::ProviderInfo::DeviceInfo3::~DeviceInfo3() {}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::setTorchMode(bool enabled) {
+ return DeviceInfo::setTorchMode(mInterface, enabled);
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraInfo(
+ hardware::CameraInfo *info) const {
+ if (info == nullptr) return BAD_VALUE;
+
+ camera_metadata_ro_entry facing =
+ mCameraCharacteristics.find(ANDROID_LENS_FACING);
+ if (facing.count == 1) {
+ switch (facing.data.u8[0]) {
+ case ANDROID_LENS_FACING_BACK:
+ info->facing = hardware::CAMERA_FACING_BACK;
+ break;
+ case ANDROID_LENS_FACING_EXTERNAL:
+ // Map external to front for legacy API
+ case ANDROID_LENS_FACING_FRONT:
+ info->facing = hardware::CAMERA_FACING_FRONT;
+ break;
+ }
+ } else {
+ ALOGE("%s: Unable to find android.lens.facing static metadata", __FUNCTION__);
+ return NAME_NOT_FOUND;
+ }
+
+ camera_metadata_ro_entry orientation =
+ mCameraCharacteristics.find(ANDROID_SENSOR_ORIENTATION);
+ if (orientation.count == 1) {
+ info->orientation = orientation.data.i32[0];
+ } else {
+ ALOGE("%s: Unable to find android.sensor.orientation static metadata", __FUNCTION__);
+ return NAME_NOT_FOUND;
+ }
+
+ return OK;
+}
+bool CameraProviderManager::ProviderInfo::DeviceInfo3::isAPI1Compatible() const {
+ bool isBackwardCompatible = false;
+ camera_metadata_ro_entry_t caps = mCameraCharacteristics.find(
+ ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ for (size_t i = 0; i < caps.count; i++) {
+ if (caps.data.u8[i] ==
+ ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE) {
+ isBackwardCompatible = true;
+ break;
+ }
+ }
+
+ return isBackwardCompatible;
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraCharacteristics(
+ CameraMetadata *characteristics) const {
+ if (characteristics == nullptr) return BAD_VALUE;
+
+ *characteristics = mCameraCharacteristics;
+ return OK;
+}
+
+status_t CameraProviderManager::ProviderInfo::parseProviderName(const std::string& name,
+ std::string *type, uint32_t *id) {
+ // Format must be "<type>/<id>"
+#define ERROR_MSG_PREFIX "%s: Invalid provider name '%s'. " \
+ "Should match '<type>/<id>' - "
+
+ if (!type || !id) return INVALID_OPERATION;
+
+ std::string::size_type slashIdx = name.find('/');
+ if (slashIdx == std::string::npos || slashIdx == name.size() - 1) {
+ ALOGE(ERROR_MSG_PREFIX
+ "does not have / separator between type and id",
+ __FUNCTION__, name.c_str());
+ return BAD_VALUE;
+ }
+
+ std::string typeVal = name.substr(0, slashIdx);
+
+ char *endPtr;
+ errno = 0;
+ long idVal = strtol(name.c_str() + slashIdx + 1, &endPtr, 10);
+ if (errno != 0) {
+ ALOGE(ERROR_MSG_PREFIX
+ "cannot parse provider id as an integer: %s (%d)",
+ __FUNCTION__, name.c_str(), strerror(errno), errno);
+ return BAD_VALUE;
+ }
+ if (endPtr != name.c_str() + name.size()) {
+ ALOGE(ERROR_MSG_PREFIX
+ "provider id has unexpected length",
+ __FUNCTION__, name.c_str());
+ return BAD_VALUE;
+ }
+ if (idVal < 0) {
+ ALOGE(ERROR_MSG_PREFIX
+ "id is negative: %ld",
+ __FUNCTION__, name.c_str(), idVal);
+ return BAD_VALUE;
+ }
+
+#undef ERROR_MSG_PREFIX
+
+ *type = typeVal;
+ *id = static_cast<uint32_t>(idVal);
+
+ return OK;
+}
+
+metadata_vendor_id_t CameraProviderManager::ProviderInfo::generateVendorTagId(
+ const std::string &name) {
+ metadata_vendor_id_t ret = std::hash<std::string> {} (name);
+ // CAMERA_METADATA_INVALID_VENDOR_ID is not a valid hash value
+ if (CAMERA_METADATA_INVALID_VENDOR_ID == ret) {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+status_t CameraProviderManager::ProviderInfo::parseDeviceName(const std::string& name,
+ uint16_t *major, uint16_t *minor, std::string *type, std::string *id) {
+
+ // Format must be "device@<major>.<minor>/<type>/<id>"
+
+#define ERROR_MSG_PREFIX "%s: Invalid device name '%s'. " \
+ "Should match 'device@<major>.<minor>/<type>/<id>' - "
+
+ if (!major || !minor || !type || !id) return INVALID_OPERATION;
+
+ // Verify starting prefix
+ const char expectedPrefix[] = "device@";
+
+ if (name.find(expectedPrefix) != 0) {
+ ALOGE(ERROR_MSG_PREFIX
+ "does not start with '%s'",
+ __FUNCTION__, name.c_str(), expectedPrefix);
+ return BAD_VALUE;
+ }
+
+ // Extract major/minor versions
+ constexpr std::string::size_type atIdx = sizeof(expectedPrefix) - 2;
+ std::string::size_type dotIdx = name.find('.', atIdx);
+ if (dotIdx == std::string::npos) {
+ ALOGE(ERROR_MSG_PREFIX
+ "does not have @<major>. version section",
+ __FUNCTION__, name.c_str());
+ return BAD_VALUE;
+ }
+ std::string::size_type typeSlashIdx = name.find('/', dotIdx);
+ if (typeSlashIdx == std::string::npos) {
+ ALOGE(ERROR_MSG_PREFIX
+ "does not have .<minor>/ version section",
+ __FUNCTION__, name.c_str());
+ return BAD_VALUE;
+ }
+
+ char *endPtr;
+ errno = 0;
+ long majorVal = strtol(name.c_str() + atIdx + 1, &endPtr, 10);
+ if (errno != 0) {
+ ALOGE(ERROR_MSG_PREFIX
+ "cannot parse major version: %s (%d)",
+ __FUNCTION__, name.c_str(), strerror(errno), errno);
+ return BAD_VALUE;
+ }
+ if (endPtr != name.c_str() + dotIdx) {
+ ALOGE(ERROR_MSG_PREFIX
+ "major version has unexpected length",
+ __FUNCTION__, name.c_str());
+ return BAD_VALUE;
+ }
+ long minorVal = strtol(name.c_str() + dotIdx + 1, &endPtr, 10);
+ if (errno != 0) {
+ ALOGE(ERROR_MSG_PREFIX
+ "cannot parse minor version: %s (%d)",
+ __FUNCTION__, name.c_str(), strerror(errno), errno);
+ return BAD_VALUE;
+ }
+ if (endPtr != name.c_str() + typeSlashIdx) {
+ ALOGE(ERROR_MSG_PREFIX
+ "minor version has unexpected length",
+ __FUNCTION__, name.c_str());
+ return BAD_VALUE;
+ }
+ if (majorVal < 0 || majorVal > UINT16_MAX || minorVal < 0 || minorVal > UINT16_MAX) {
+ ALOGE(ERROR_MSG_PREFIX
+ "major/minor version is out of range of uint16_t: %ld.%ld",
+ __FUNCTION__, name.c_str(), majorVal, minorVal);
+ return BAD_VALUE;
+ }
+
+ // Extract type and id
+
+ std::string::size_type instanceSlashIdx = name.find('/', typeSlashIdx + 1);
+ if (instanceSlashIdx == std::string::npos) {
+ ALOGE(ERROR_MSG_PREFIX
+ "does not have /<type>/ component",
+ __FUNCTION__, name.c_str());
+ return BAD_VALUE;
+ }
+ std::string typeVal = name.substr(typeSlashIdx + 1, instanceSlashIdx - typeSlashIdx - 1);
+
+ if (instanceSlashIdx == name.size() - 1) {
+ ALOGE(ERROR_MSG_PREFIX
+ "does not have an /<id> component",
+ __FUNCTION__, name.c_str());
+ return BAD_VALUE;
+ }
+ std::string idVal = name.substr(instanceSlashIdx + 1);
+
+#undef ERROR_MSG_PREFIX
+
+ *major = static_cast<uint16_t>(majorVal);
+ *minor = static_cast<uint16_t>(minorVal);
+ *type = typeVal;
+ *id = idVal;
+
+ return OK;
+}
+
+
+
+CameraProviderManager::ProviderInfo::~ProviderInfo() {
+ // Destruction of ProviderInfo is only supposed to happen when the respective
+ // CameraProvider interface dies, so do not unregister callbacks.
+
+}
+
+status_t CameraProviderManager::mapToStatusT(const Status& s) {
+ switch(s) {
+ case Status::OK:
+ return OK;
+ case Status::ILLEGAL_ARGUMENT:
+ return BAD_VALUE;
+ case Status::CAMERA_IN_USE:
+ return -EBUSY;
+ case Status::MAX_CAMERAS_IN_USE:
+ return -EUSERS;
+ case Status::METHOD_NOT_SUPPORTED:
+ return UNKNOWN_TRANSACTION;
+ case Status::OPERATION_NOT_SUPPORTED:
+ return INVALID_OPERATION;
+ case Status::CAMERA_DISCONNECTED:
+ return DEAD_OBJECT;
+ case Status::INTERNAL_ERROR:
+ return INVALID_OPERATION;
+ }
+ ALOGW("Unexpected HAL status code %d", s);
+ return INVALID_OPERATION;
+}
+
+const char* CameraProviderManager::statusToString(const Status& s) {
+ switch(s) {
+ case Status::OK:
+ return "OK";
+ case Status::ILLEGAL_ARGUMENT:
+ return "ILLEGAL_ARGUMENT";
+ case Status::CAMERA_IN_USE:
+ return "CAMERA_IN_USE";
+ case Status::MAX_CAMERAS_IN_USE:
+ return "MAX_CAMERAS_IN_USE";
+ case Status::METHOD_NOT_SUPPORTED:
+ return "METHOD_NOT_SUPPORTED";
+ case Status::OPERATION_NOT_SUPPORTED:
+ return "OPERATION_NOT_SUPPORTED";
+ case Status::CAMERA_DISCONNECTED:
+ return "CAMERA_DISCONNECTED";
+ case Status::INTERNAL_ERROR:
+ return "INTERNAL_ERROR";
+ }
+ ALOGW("Unexpected HAL status code %d", s);
+ return "UNKNOWN_ERROR";
+}
+
+const char* CameraProviderManager::deviceStatusToString(const CameraDeviceStatus& s) {
+ switch(s) {
+ case CameraDeviceStatus::NOT_PRESENT:
+ return "NOT_PRESENT";
+ case CameraDeviceStatus::PRESENT:
+ return "PRESENT";
+ case CameraDeviceStatus::ENUMERATING:
+ return "ENUMERATING";
+ }
+ ALOGW("Unexpected HAL device status code %d", s);
+ return "UNKNOWN_STATUS";
+}
+
+const char* CameraProviderManager::torchStatusToString(const TorchModeStatus& s) {
+ switch(s) {
+ case TorchModeStatus::NOT_AVAILABLE:
+ return "NOT_AVAILABLE";
+ case TorchModeStatus::AVAILABLE_OFF:
+ return "AVAILABLE_OFF";
+ case TorchModeStatus::AVAILABLE_ON:
+ return "AVAILABLE_ON";
+ }
+ ALOGW("Unexpected HAL torch mode status code %d", s);
+ return "UNKNOWN_STATUS";
+}
+
+
+status_t HidlVendorTagDescriptor::createDescriptorFromHidl(
+ const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
+ /*out*/
+ sp<VendorTagDescriptor>& descriptor) {
+
+ int tagCount = 0;
+
+ for (size_t s = 0; s < vts.size(); s++) {
+ tagCount += vts[s].tags.size();
+ }
+
+ if (tagCount < 0 || tagCount > INT32_MAX) {
+ ALOGE("%s: tag count %d from vendor tag sections is invalid.", __FUNCTION__, tagCount);
+ return BAD_VALUE;
+ }
+
+ Vector<uint32_t> tagArray;
+ LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
+ "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
+
+
+ sp<HidlVendorTagDescriptor> desc = new HidlVendorTagDescriptor();
+ desc->mTagCount = tagCount;
+
+ SortedVector<String8> sections;
+ KeyedVector<uint32_t, String8> tagToSectionMap;
+
+ int idx = 0;
+ for (size_t s = 0; s < vts.size(); s++) {
+ const hardware::camera::common::V1_0::VendorTagSection& section = vts[s];
+ const char *sectionName = section.sectionName.c_str();
+ if (sectionName == NULL) {
+ ALOGE("%s: no section name defined for vendor tag section %zu.", __FUNCTION__, s);
+ return BAD_VALUE;
+ }
+ String8 sectionString(sectionName);
+ sections.add(sectionString);
+
+ for (size_t j = 0; j < section.tags.size(); j++) {
+ uint32_t tag = section.tags[j].tagId;
+ if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+ ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+
+ tagArray.editItemAt(idx++) = section.tags[j].tagId;
+
+ const char *tagName = section.tags[j].tagName.c_str();
+ if (tagName == NULL) {
+ ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+ desc->mTagToNameMap.add(tag, String8(tagName));
+ tagToSectionMap.add(tag, sectionString);
+
+ int tagType = (int) section.tags[j].tagType;
+ if (tagType < 0 || tagType >= NUM_TYPES) {
+ ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+ return BAD_VALUE;
+ }
+ desc->mTagToTypeMap.add(tag, tagType);
+ }
+ }
+
+ desc->mSections = sections;
+
+ for (size_t i = 0; i < tagArray.size(); ++i) {
+ uint32_t tag = tagArray[i];
+ String8 sectionString = tagToSectionMap.valueFor(tag);
+
+ // Set up tag to section index map
+ ssize_t index = sections.indexOf(sectionString);
+ LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
+ desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
+
+ // Set up reverse mapping
+ ssize_t reverseIndex = -1;
+ if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+ KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+ reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+ }
+ desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+ }
+
+ descriptor = std::move(desc);
+ return OK;
+}
+
+
+} // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
new file mode 100644
index 0000000..e82282f
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -0,0 +1,416 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERAPROVIDER_H
+#define ANDROID_SERVERS_CAMERA_CAMERAPROVIDER_H
+
+#include <vector>
+#include <set>
+#include <string>
+#include <mutex>
+
+#include <camera/CameraParameters2.h>
+#include <camera/CameraMetadata.h>
+#include <camera/CameraBase.h>
+#include <utils/Errors.h>
+#include <android/hardware/camera/common/1.0/types.h>
+#include <android/hardware/camera/provider/2.4/ICameraProvider.h>
+//#include <android/hardware/camera/provider/2.4/ICameraProviderCallbacks.h>
+#include <android/hidl/manager/1.0/IServiceNotification.h>
+#include <camera/VendorTagDescriptor.h>
+
+namespace android {
+
+/**
+ * The vendor tag descriptor class that takes HIDL vendor tag information as
+ * input. Not part of VendorTagDescriptor class because that class is used
+ * in AIDL generated sources which don't have access to HIDL headers.
+ */
+class HidlVendorTagDescriptor : public VendorTagDescriptor {
+public:
+ /**
+ * Create a VendorTagDescriptor object from the HIDL VendorTagSection
+ * vector.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ static status_t createDescriptorFromHidl(
+ const hardware::hidl_vec<hardware::camera::common::V1_0::VendorTagSection>& vts,
+ /*out*/
+ sp<VendorTagDescriptor>& descriptor);
+};
+
+/**
+ * A manager for all camera providers available on an Android device.
+ *
+ * Responsible for enumerating providers and the individual camera devices
+ * they export, both at startup and as providers and devices are added/removed.
+ *
+ * Provides methods for requesting information about individual devices and for
+ * opening them for active use.
+ *
+ */
+class CameraProviderManager : virtual public hidl::manager::V1_0::IServiceNotification {
+public:
+
+ ~CameraProviderManager();
+
+ // Tiny proxy for the static methods in a HIDL interface that communicate with the hardware
+ // service manager, to be replacable in unit tests with a fake.
+ struct ServiceInteractionProxy {
+ virtual bool registerForNotifications(
+ const std::string &serviceName,
+ const sp<hidl::manager::V1_0::IServiceNotification>
+ ¬ification) = 0;
+ virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
+ const std::string &serviceName) = 0;
+ virtual ~ServiceInteractionProxy() {}
+ };
+
+ // Standard use case - call into the normal generated static methods which invoke
+ // the real hardware service manager
+ struct HardwareServiceInteractionProxy : public ServiceInteractionProxy {
+ virtual bool registerForNotifications(
+ const std::string &serviceName,
+ const sp<hidl::manager::V1_0::IServiceNotification>
+ ¬ification) override {
+ return hardware::camera::provider::V2_4::ICameraProvider::registerForNotifications(
+ serviceName, notification);
+ }
+ virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
+ const std::string &serviceName) override {
+ return hardware::camera::provider::V2_4::ICameraProvider::getService(serviceName);
+ }
+ };
+
+ /**
+ * Listener interface for device/torch status changes
+ */
+ struct StatusListener : virtual public RefBase {
+ ~StatusListener() {}
+
+ virtual void onDeviceStatusChanged(const String8 &cameraId,
+ hardware::camera::common::V1_0::CameraDeviceStatus newStatus) = 0;
+ virtual void onTorchStatusChanged(const String8 &cameraId,
+ hardware::camera::common::V1_0::TorchModeStatus newStatus) = 0;
+ virtual void onNewProviderRegistered() = 0;
+ };
+
+ /**
+ * Initialize the manager and give it a status listener; optionally accepts a service
+ * interaction proxy.
+ *
+ * The default proxy communicates via the hardware service manager; alternate proxies can be
+ * used for testing. The lifetime of the proxy must exceed the lifetime of the manager.
+ */
+ status_t initialize(wp<StatusListener> listener,
+ ServiceInteractionProxy *proxy = &sHardwareServiceInteractionProxy);
+
+ /**
+ * Retrieve the total number of available cameras. This value may change dynamically as cameras
+ * are added or removed.
+ */
+ int getCameraCount() const;
+
+ /**
+ * Retrieve the number of API1 compatible cameras; these are internal and
+ * backwards-compatible. This is the set of cameras that will be
+ * accessible via the old camera API, with IDs in range of
+ * [0, getAPI1CompatibleCameraCount()-1]. This value is not expected to change dynamically.
+ */
+ int getAPI1CompatibleCameraCount() const;
+
+ std::vector<std::string> getCameraDeviceIds() const;
+
+ std::vector<std::string> getAPI1CompatibleCameraDeviceIds() const;
+
+ /**
+ * Return true if a device with a given ID and major version exists
+ */
+ bool isValidDevice(const std::string &id, uint16_t majorVersion) const;
+
+ /**
+ * Return true if a device with a given ID has a flash unit. Returns false
+ * for devices that are unknown.
+ */
+ bool hasFlashUnit(const std::string &id) const;
+
+ /**
+ * Return the resource cost of this camera device
+ */
+ status_t getResourceCost(const std::string &id,
+ hardware::camera::common::V1_0::CameraResourceCost* cost) const;
+
+ /**
+ * Return the old camera API camera info
+ */
+ status_t getCameraInfo(const std::string &id,
+ hardware::CameraInfo* info) const;
+
+ /**
+ * Return API2 camera characteristics - returns NAME_NOT_FOUND if a device ID does
+ * not have a v3 or newer HAL version.
+ */
+ status_t getCameraCharacteristics(const std::string &id,
+ CameraMetadata* characteristics) const;
+
+ /**
+ * Return the highest supported device interface version for this ID
+ */
+ status_t getHighestSupportedVersion(const std::string &id,
+ hardware::hidl_version *v);
+
+ /**
+ * Check if a given camera device support setTorchMode API.
+ */
+ bool supportSetTorchMode(const std::string &id);
+
+ /**
+ * Turn on or off the flashlight on a given camera device.
+ * May fail if the device does not support this API, is in active use, or if the device
+ * doesn't exist, etc.
+ */
+ status_t setTorchMode(const std::string &id, bool enabled);
+
+ /**
+ * Setup vendor tags for all registered providers
+ */
+ status_t setUpVendorTags();
+
+ /**
+ * Open an active session to a camera device.
+ *
+ * This fully powers on the camera device hardware, and returns a handle to a
+ * session to be used for hardware configuration and operation.
+ */
+ status_t openSession(const std::string &id,
+ const sp<hardware::camera::device::V3_2::ICameraDeviceCallback>& callback,
+ /*out*/
+ sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session);
+
+ status_t openSession(const std::string &id,
+ const sp<hardware::camera::device::V1_0::ICameraDeviceCallback>& callback,
+ /*out*/
+ sp<hardware::camera::device::V1_0::ICameraDevice> *session);
+
+ /**
+ * IServiceNotification::onRegistration
+ * Invoked by the hardware service manager when a new camera provider is registered
+ */
+ virtual hardware::Return<void> onRegistration(const hardware::hidl_string& fqName,
+ const hardware::hidl_string& name,
+ bool preexisting) override;
+
+ /**
+ * Dump out information about available providers and devices
+ */
+ status_t dump(int fd, const Vector<String16>& args);
+
+ /**
+ * Conversion methods between HAL Status and status_t and strings
+ */
+ static status_t mapToStatusT(const hardware::camera::common::V1_0::Status& s);
+ static const char* statusToString(const hardware::camera::common::V1_0::Status& s);
+
+ /*
+ * Return provider type for a specific device.
+ */
+ metadata_vendor_id_t getProviderTagIdLocked(const std::string& id,
+ hardware::hidl_version minVersion = hardware::hidl_version{0,0},
+ hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
+
+private:
+ // All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
+ mutable std::mutex mInterfaceMutex;
+
+ // the status listener update callbacks will lock mStatusMutex
+ mutable std::mutex mStatusListenerMutex;
+ wp<StatusListener> mListener;
+ ServiceInteractionProxy* mServiceProxy;
+
+ static HardwareServiceInteractionProxy sHardwareServiceInteractionProxy;
+
+ struct ProviderInfo :
+ virtual public hardware::camera::provider::V2_4::ICameraProviderCallback,
+ virtual public hardware::hidl_death_recipient
+ {
+ const std::string mProviderName;
+ const sp<hardware::camera::provider::V2_4::ICameraProvider> mInterface;
+ const metadata_vendor_id_t mProviderTagid;
+
+ ProviderInfo(const std::string &providerName,
+ sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
+ CameraProviderManager *manager);
+ ~ProviderInfo();
+
+ status_t initialize();
+
+ const std::string& getType() const;
+
+ status_t addDevice(const std::string& name,
+ hardware::camera::common::V1_0::CameraDeviceStatus initialStatus =
+ hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT,
+ /*out*/ std::string *parsedId = nullptr);
+
+ status_t dump(int fd, const Vector<String16>& args) const;
+
+ // ICameraProviderCallbacks interface - these lock the parent mInterfaceMutex
+ virtual hardware::Return<void> cameraDeviceStatusChange(
+ const hardware::hidl_string& cameraDeviceName,
+ hardware::camera::common::V1_0::CameraDeviceStatus newStatus) override;
+ virtual hardware::Return<void> torchModeStatusChange(
+ const hardware::hidl_string& cameraDeviceName,
+ hardware::camera::common::V1_0::TorchModeStatus newStatus) override;
+
+ // hidl_death_recipient interface - this locks the parent mInterfaceMutex
+ virtual void serviceDied(uint64_t cookie, const wp<hidl::base::V1_0::IBase>& who) override;
+
+ // Basic device information, common to all camera devices
+ struct DeviceInfo {
+ const std::string mName; // Full instance name
+ const std::string mId; // ID section of full name
+ const hardware::hidl_version mVersion;
+ const metadata_vendor_id_t mProviderTagid;
+
+ const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
+
+ hardware::camera::common::V1_0::CameraDeviceStatus mStatus;
+
+ bool hasFlashUnit() const { return mHasFlashUnit; }
+ virtual status_t setTorchMode(bool enabled) = 0;
+ virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
+ virtual bool isAPI1Compatible() const = 0;
+ virtual status_t getCameraCharacteristics(CameraMetadata *characteristics) const {
+ (void) characteristics;
+ return INVALID_OPERATION;
+ }
+
+ DeviceInfo(const std::string& name, const metadata_vendor_id_t tagId,
+ const std::string &id, const hardware::hidl_version& version,
+ const hardware::camera::common::V1_0::CameraResourceCost& resourceCost) :
+ mName(name), mId(id), mVersion(version), mProviderTagid(tagId),
+ mResourceCost(resourceCost),
+ mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
+ mHasFlashUnit(false) {}
+ virtual ~DeviceInfo();
+ protected:
+ bool mHasFlashUnit;
+
+ template<class InterfaceT>
+ static status_t setTorchMode(InterfaceT& interface, bool enabled);
+ };
+ std::vector<std::unique_ptr<DeviceInfo>> mDevices;
+ std::set<std::string> mUniqueCameraIds;
+ int mUniqueDeviceCount;
+ std::set<std::string> mUniqueAPI1CompatibleCameraIds;
+
+ // HALv1-specific camera fields, including the actual device interface
+ struct DeviceInfo1 : public DeviceInfo {
+ typedef hardware::camera::device::V1_0::ICameraDevice InterfaceT;
+ const sp<InterfaceT> mInterface;
+
+ virtual status_t setTorchMode(bool enabled) override;
+ virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
+ //In case of Device1Info assume that we are always API1 compatible
+ virtual bool isAPI1Compatible() const override { return true; }
+ DeviceInfo1(const std::string& name, const metadata_vendor_id_t tagId,
+ const std::string &id, uint16_t minorVersion,
+ const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+ sp<InterfaceT> interface);
+ virtual ~DeviceInfo1();
+ private:
+ CameraParameters2 mDefaultParameters;
+ };
+
+ // HALv3-specific camera fields, including the actual device interface
+ struct DeviceInfo3 : public DeviceInfo {
+ typedef hardware::camera::device::V3_2::ICameraDevice InterfaceT;
+ const sp<InterfaceT> mInterface;
+
+ virtual status_t setTorchMode(bool enabled) override;
+ virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
+ virtual bool isAPI1Compatible() const override;
+ virtual status_t getCameraCharacteristics(
+ CameraMetadata *characteristics) const override;
+
+ DeviceInfo3(const std::string& name, const metadata_vendor_id_t tagId,
+ const std::string &id, uint16_t minorVersion,
+ const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
+ sp<InterfaceT> interface);
+ virtual ~DeviceInfo3();
+ private:
+ CameraMetadata mCameraCharacteristics;
+ };
+
+ private:
+ std::string mType;
+ uint32_t mId;
+
+ std::mutex mLock;
+
+ CameraProviderManager *mManager;
+
+ // Templated method to instantiate the right kind of DeviceInfo and call the
+ // right CameraProvider getCameraDeviceInterface_* method.
+ template<class DeviceInfoT>
+ std::unique_ptr<DeviceInfo> initializeDeviceInfo(const std::string &name,
+ const metadata_vendor_id_t tagId, const std::string &id,
+ uint16_t minorVersion) const;
+
+ // Helper for initializeDeviceInfo to use the right CameraProvider get method.
+ template<class InterfaceT>
+ sp<InterfaceT> getDeviceInterface(const std::string &name) const;
+
+ // Parse provider instance name for type and id
+ static status_t parseProviderName(const std::string& name,
+ std::string *type, uint32_t *id);
+
+ // Parse device instance name for device version, type, and id.
+ static status_t parseDeviceName(const std::string& name,
+ uint16_t *major, uint16_t *minor, std::string *type, std::string *id);
+
+ // Generate vendor tag id
+ static metadata_vendor_id_t generateVendorTagId(const std::string &name);
+ };
+
+ // Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
+ // and the calling code doesn't mutate the list of providers or their lists of devices.
+ // Finds the first device of the given ID that falls within the requested version range
+ // minVersion <= deviceVersion < maxVersion
+ // No guarantees on the order of traversal
+ ProviderInfo::DeviceInfo* findDeviceInfoLocked(const std::string& id,
+ hardware::hidl_version minVersion = hardware::hidl_version{0,0},
+ hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
+
+ status_t addProviderLocked(const std::string& newProvider, bool expected = true);
+
+ status_t removeProvider(const std::string& provider);
+ sp<StatusListener> getStatusListener() const;
+
+ bool isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const;
+
+ std::vector<sp<ProviderInfo>> mProviders;
+
+ static const char* deviceStatusToString(
+ const hardware::camera::common::V1_0::CameraDeviceStatus&);
+ static const char* torchStatusToString(
+ const hardware::camera::common::V1_0::TorchModeStatus&);
+
+};
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index 2ef3057..41c953b 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -32,8 +32,7 @@
mDevice(device),
mNumPartialResults(1) {
sp<CameraDeviceBase> cameraDevice = device.promote();
- if (cameraDevice != 0 &&
- cameraDevice->getDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+ if (cameraDevice != 0) {
CameraMetadata staticInfo = cameraDevice->info();
camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
if (entry.count > 0) {
@@ -123,7 +122,7 @@
ATRACE_CALL();
CaptureResult result;
- ALOGV("%s: Camera %d: Process new frames", __FUNCTION__, device->getId());
+ ALOGV("%s: Camera %s: Process new frames", __FUNCTION__, device->getId().string());
while ( (res = device->getNextResult(&result)) == OK) {
@@ -133,8 +132,8 @@
entry = result.mMetadata.find(ANDROID_REQUEST_FRAME_COUNT);
if (entry.count == 0) {
- ALOGE("%s: Camera %d: Error reading frame number",
- __FUNCTION__, device->getId());
+ ALOGE("%s: Camera %s: Error reading frame number",
+ __FUNCTION__, device->getId().string());
break;
}
ATRACE_INT("cam2_frame", entry.data.i32[0]);
@@ -149,8 +148,8 @@
}
}
if (res != NOT_ENOUGH_DATA) {
- ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
- __FUNCTION__, device->getId(), strerror(-res), res);
+ ALOGE("%s: Camera %s: Error getting next frame: %s (%d)",
+ __FUNCTION__, device->getId().string(), strerror(-res), res);
return;
}
@@ -159,8 +158,8 @@
bool FrameProcessorBase::processSingleFrame(CaptureResult &result,
const sp<CameraDeviceBase> &device) {
- ALOGV("%s: Camera %d: Process single frame (is empty? %d)",
- __FUNCTION__, device->getId(), result.mMetadata.isEmpty());
+ ALOGV("%s: Camera %s: Process single frame (is empty? %d)",
+ __FUNCTION__, device->getId().string(), result.mMetadata.isEmpty());
return processListeners(result, device) == OK;
}
@@ -171,18 +170,8 @@
camera_metadata_ro_entry_t entry;
// Check if this result is partial.
- bool isPartialResult = false;
- if (device->getDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
- isPartialResult = result.mResultExtras.partialResultCount < mNumPartialResults;
- } else {
- entry = result.mMetadata.find(ANDROID_QUIRKS_PARTIAL_RESULT);
- if (entry.count != 0 &&
- entry.data.u8[0] == ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
- ALOGV("%s: Camera %d: This is a partial result",
- __FUNCTION__, device->getId());
- isPartialResult = true;
- }
- }
+ bool isPartialResult =
+ result.mResultExtras.partialResultCount < mNumPartialResults;
// TODO: instead of getting requestID from CameraMetadata, we should get it
// from CaptureResultExtras. This will require changing Camera2Device.
@@ -190,7 +179,7 @@
// include CaptureResultExtras.
entry = result.mMetadata.find(ANDROID_REQUEST_ID);
if (entry.count == 0) {
- ALOGE("%s: Camera %d: Error reading frame id", __FUNCTION__, device->getId());
+ ALOGE("%s: Camera %s: Error reading frame id", __FUNCTION__, device->getId().string());
return BAD_VALUE;
}
int32_t requestId = entry.data.i32[0];
@@ -215,8 +204,8 @@
item++;
}
}
- ALOGV("%s: Camera %d: Got %zu range listeners out of %zu", __FUNCTION__,
- device->getId(), listeners.size(), mRangeListeners.size());
+ ALOGV("%s: Camera %s: Got %zu range listeners out of %zu", __FUNCTION__,
+ device->getId().string(), listeners.size(), mRangeListeners.size());
List<sp<FilteredListener> >::iterator item = listeners.begin();
for (; item != listeners.end(); item++) {
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
new file mode 100644
index 0000000..469c86c
--- /dev/null
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
@@ -0,0 +1,1044 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "CameraHardwareInterface"
+//#define LOG_NDEBUG 0
+
+#include <inttypes.h>
+#include <media/hardware/HardwareAPI.h> // For VideoNativeHandleMetadata
+#include "CameraHardwareInterface.h"
+
+namespace android {
+
+using namespace hardware::camera::device::V1_0;
+using namespace hardware::camera::common::V1_0;
+using hardware::hidl_handle;
+
+CameraHardwareInterface::~CameraHardwareInterface()
+{
+ ALOGI("Destroying camera %s", mName.string());
+ if (mDevice) {
+ int rc = mDevice->common.close(&mDevice->common);
+ if (rc != OK)
+ ALOGE("Could not close camera %s: %d", mName.string(), rc);
+ }
+ if (mHidlDevice != nullptr) {
+ mHidlDevice->close();
+ mHidlDevice.clear();
+ cleanupCirculatingBuffers();
+ }
+}
+
+status_t CameraHardwareInterface::initialize(sp<CameraProviderManager> manager) {
+ if (mDevice) {
+ ALOGE("%s: camera hardware interface has been initialized to libhardware path!",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ ALOGI("Opening camera %s", mName.string());
+
+ status_t ret = manager->openSession(mName.string(), this, &mHidlDevice);
+ if (ret != OK) {
+ ALOGE("%s: openSession failed! %s (%d)", __FUNCTION__, strerror(-ret), ret);
+ }
+ return ret;
+}
+
+status_t CameraHardwareInterface::setPreviewScalingMode(int scalingMode)
+{
+ int rc = OK;
+ mPreviewScalingMode = scalingMode;
+ if (mPreviewWindow != nullptr) {
+ rc = native_window_set_scaling_mode(mPreviewWindow.get(),
+ scalingMode);
+ }
+ return rc;
+}
+
+status_t CameraHardwareInterface::setPreviewTransform(int transform) {
+ int rc = OK;
+ mPreviewTransform = transform;
+ if (mPreviewWindow != nullptr) {
+ rc = native_window_set_buffers_transform(mPreviewWindow.get(),
+ mPreviewTransform);
+ }
+ return rc;
+}
+
+/**
+ * Implementation of android::hardware::camera::device::V1_0::ICameraDeviceCallback
+ */
+hardware::Return<void> CameraHardwareInterface::notifyCallback(
+ NotifyCallbackMsg msgType, int32_t ext1, int32_t ext2) {
+ sNotifyCb((int32_t) msgType, ext1, ext2, (void*) this);
+ return hardware::Void();
+}
+
+hardware::Return<uint32_t> CameraHardwareInterface::registerMemory(
+ const hardware::hidl_handle& descriptor,
+ uint32_t bufferSize, uint32_t bufferCount) {
+ if (descriptor->numFds != 1) {
+ ALOGE("%s: camera memory descriptor has numFds %d (expect 1)",
+ __FUNCTION__, descriptor->numFds);
+ return 0;
+ }
+ if (descriptor->data[0] < 0) {
+ ALOGE("%s: camera memory descriptor has FD %d (expect >= 0)",
+ __FUNCTION__, descriptor->data[0]);
+ return 0;
+ }
+
+ camera_memory_t* mem = sGetMemory(descriptor->data[0], bufferSize, bufferCount, this);
+ sp<CameraHeapMemory> camMem(static_cast<CameraHeapMemory *>(mem->handle));
+ int memPoolId = camMem->mHeap->getHeapID();
+ if (memPoolId < 0) {
+ ALOGE("%s: CameraHeapMemory has FD %d (expect >= 0)", __FUNCTION__, memPoolId);
+ return 0;
+ }
+ mHidlMemPoolMap.insert(std::make_pair(memPoolId, mem));
+ return memPoolId;
+}
+
+hardware::Return<void> CameraHardwareInterface::unregisterMemory(uint32_t memId) {
+ if (mHidlMemPoolMap.count(memId) == 0) {
+ ALOGE("%s: memory pool ID %d not found", __FUNCTION__, memId);
+ return hardware::Void();
+ }
+ camera_memory_t* mem = mHidlMemPoolMap.at(memId);
+ sPutMemory(mem);
+ mHidlMemPoolMap.erase(memId);
+ return hardware::Void();
+}
+
+hardware::Return<void> CameraHardwareInterface::dataCallback(
+ DataCallbackMsg msgType, uint32_t data, uint32_t bufferIndex,
+ const hardware::camera::device::V1_0::CameraFrameMetadata& metadata) {
+ if (mHidlMemPoolMap.count(data) == 0) {
+ ALOGE("%s: memory pool ID %d not found", __FUNCTION__, data);
+ return hardware::Void();
+ }
+ camera_frame_metadata_t md;
+ md.number_of_faces = metadata.faces.size();
+ md.faces = (camera_face_t*) metadata.faces.data();
+ sDataCb((int32_t) msgType, mHidlMemPoolMap.at(data), bufferIndex, &md, this);
+ return hardware::Void();
+}
+
+hardware::Return<void> CameraHardwareInterface::dataCallbackTimestamp(
+ DataCallbackMsg msgType, uint32_t data,
+ uint32_t bufferIndex, int64_t timestamp) {
+ if (mHidlMemPoolMap.count(data) == 0) {
+ ALOGE("%s: memory pool ID %d not found", __FUNCTION__, data);
+ return hardware::Void();
+ }
+ sDataCbTimestamp(timestamp, (int32_t) msgType, mHidlMemPoolMap.at(data), bufferIndex, this);
+ return hardware::Void();
+}
+
+hardware::Return<void> CameraHardwareInterface::handleCallbackTimestamp(
+ DataCallbackMsg msgType, const hidl_handle& frameData, uint32_t data,
+ uint32_t bufferIndex, int64_t timestamp) {
+ if (mHidlMemPoolMap.count(data) == 0) {
+ ALOGE("%s: memory pool ID %d not found", __FUNCTION__, data);
+ return hardware::Void();
+ }
+ sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(mHidlMemPoolMap.at(data)->handle));
+ VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
+ mem->mBuffers[bufferIndex]->pointer();
+ md->pHandle = const_cast<native_handle_t*>(frameData.getNativeHandle());
+ sDataCbTimestamp(timestamp, (int32_t) msgType, mHidlMemPoolMap.at(data), bufferIndex, this);
+ return hardware::Void();
+}
+
+hardware::Return<void> CameraHardwareInterface::handleCallbackTimestampBatch(
+ DataCallbackMsg msgType,
+ const hardware::hidl_vec<hardware::camera::device::V1_0::HandleTimestampMessage>& messages) {
+ std::vector<android::HandleTimestampMessage> msgs;
+ msgs.reserve(messages.size());
+
+ for (const auto& hidl_msg : messages) {
+ if (mHidlMemPoolMap.count(hidl_msg.data) == 0) {
+ ALOGE("%s: memory pool ID %d not found", __FUNCTION__, hidl_msg.data);
+ return hardware::Void();
+ }
+ sp<CameraHeapMemory> mem(
+ static_cast<CameraHeapMemory *>(mHidlMemPoolMap.at(hidl_msg.data)->handle));
+
+ if (hidl_msg.bufferIndex >= mem->mNumBufs) {
+ ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
+ hidl_msg.bufferIndex, mem->mNumBufs);
+ return hardware::Void();
+ }
+ VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
+ mem->mBuffers[hidl_msg.bufferIndex]->pointer();
+ md->pHandle = const_cast<native_handle_t*>(hidl_msg.frameData.getNativeHandle());
+
+ msgs.push_back({hidl_msg.timestamp, mem->mBuffers[hidl_msg.bufferIndex]});
+ }
+
+ mDataCbTimestampBatch((int32_t) msgType, msgs, mCbUser);
+ return hardware::Void();
+}
+
+std::pair<bool, uint64_t> CameraHardwareInterface::getBufferId(
+ ANativeWindowBuffer* anb) {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+
+ buffer_handle_t& buf = anb->handle;
+ auto it = mBufferIdMap.find(buf);
+ if (it == mBufferIdMap.end()) {
+ uint64_t bufId = mNextBufferId++;
+ mBufferIdMap[buf] = bufId;
+ mReversedBufMap[bufId] = anb;
+ return std::make_pair(true, bufId);
+ } else {
+ return std::make_pair(false, it->second);
+ }
+}
+
+void CameraHardwareInterface::cleanupCirculatingBuffers() {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+ mBufferIdMap.clear();
+ mReversedBufMap.clear();
+}
+
+hardware::Return<void>
+CameraHardwareInterface::dequeueBuffer(dequeueBuffer_cb _hidl_cb) {
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return hardware::Void();
+ }
+ ANativeWindowBuffer* anb;
+ int rc = native_window_dequeue_buffer_and_wait(a, &anb);
+ Status s = Status::INTERNAL_ERROR;
+ uint64_t bufferId = 0;
+ uint32_t stride = 0;
+ hidl_handle buf = nullptr;
+ if (rc == OK) {
+ s = Status::OK;
+ auto pair = getBufferId(anb);
+ buf = (pair.first) ? anb->handle : nullptr;
+ bufferId = pair.second;
+ stride = anb->stride;
+ }
+
+ _hidl_cb(s, bufferId, buf, stride);
+ return hardware::Void();
+}
+
+hardware::Return<Status>
+CameraHardwareInterface::enqueueBuffer(uint64_t bufferId) {
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return Status::INTERNAL_ERROR;
+ }
+ if (mReversedBufMap.count(bufferId) == 0) {
+ ALOGE("%s: bufferId %" PRIu64 " not found", __FUNCTION__, bufferId);
+ return Status::ILLEGAL_ARGUMENT;
+ }
+ int rc = a->queueBuffer(a, mReversedBufMap.at(bufferId), -1);
+ if (rc == 0) {
+ return Status::OK;
+ }
+ return Status::INTERNAL_ERROR;
+}
+
+hardware::Return<Status>
+CameraHardwareInterface::cancelBuffer(uint64_t bufferId) {
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return Status::INTERNAL_ERROR;
+ }
+ if (mReversedBufMap.count(bufferId) == 0) {
+ ALOGE("%s: bufferId %" PRIu64 " not found", __FUNCTION__, bufferId);
+ return Status::ILLEGAL_ARGUMENT;
+ }
+ int rc = a->cancelBuffer(a, mReversedBufMap.at(bufferId), -1);
+ if (rc == 0) {
+ return Status::OK;
+ }
+ return Status::INTERNAL_ERROR;
+}
+
+hardware::Return<Status>
+CameraHardwareInterface::setBufferCount(uint32_t count) {
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a != nullptr) {
+ // Workaround for b/27039775
+ // Previously, setting the buffer count would reset the buffer
+ // queue's flag that allows for all buffers to be dequeued on the
+ // producer side, instead of just the producer's declared max count,
+ // if no filled buffers have yet been queued by the producer. This
+ // reset no longer happens, but some HALs depend on this behavior,
+ // so it needs to be maintained for HAL backwards compatibility.
+ // Simulate the prior behavior by disconnecting/reconnecting to the
+ // window and setting the values again. This has the drawback of
+ // actually causing memory reallocation, which may not have happened
+ // in the past.
+ native_window_api_disconnect(a, NATIVE_WINDOW_API_CAMERA);
+ native_window_api_connect(a, NATIVE_WINDOW_API_CAMERA);
+ if (mPreviewScalingMode != NOT_SET) {
+ native_window_set_scaling_mode(a, mPreviewScalingMode);
+ }
+ if (mPreviewTransform != NOT_SET) {
+ native_window_set_buffers_transform(a, mPreviewTransform);
+ }
+ if (mPreviewWidth != NOT_SET) {
+ native_window_set_buffers_dimensions(a,
+ mPreviewWidth, mPreviewHeight);
+ native_window_set_buffers_format(a, mPreviewFormat);
+ }
+ if (mPreviewUsage != 0) {
+ native_window_set_usage(a, mPreviewUsage);
+ }
+ if (mPreviewSwapInterval != NOT_SET) {
+ a->setSwapInterval(a, mPreviewSwapInterval);
+ }
+ if (mPreviewCrop.left != NOT_SET) {
+ native_window_set_crop(a, &(mPreviewCrop));
+ }
+ }
+ int rc = native_window_set_buffer_count(a, count);
+ if (rc == OK) {
+ cleanupCirculatingBuffers();
+ return Status::OK;
+ }
+ return Status::INTERNAL_ERROR;
+}
+
+hardware::Return<Status>
+CameraHardwareInterface::setBuffersGeometry(
+ uint32_t w, uint32_t h, hardware::graphics::common::V1_0::PixelFormat format) {
+ Status s = Status::INTERNAL_ERROR;
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return s;
+ }
+ mPreviewWidth = w;
+ mPreviewHeight = h;
+ mPreviewFormat = (int) format;
+ int rc = native_window_set_buffers_dimensions(a, w, h);
+ if (rc == OK) {
+ rc = native_window_set_buffers_format(a, mPreviewFormat);
+ }
+ if (rc == OK) {
+ cleanupCirculatingBuffers();
+ s = Status::OK;
+ }
+ return s;
+}
+
+hardware::Return<Status>
+CameraHardwareInterface::setCrop(int32_t left, int32_t top, int32_t right, int32_t bottom) {
+ Status s = Status::INTERNAL_ERROR;
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return s;
+ }
+ mPreviewCrop.left = left;
+ mPreviewCrop.top = top;
+ mPreviewCrop.right = right;
+ mPreviewCrop.bottom = bottom;
+ int rc = native_window_set_crop(a, &mPreviewCrop);
+ if (rc == OK) {
+ s = Status::OK;
+ }
+ return s;
+}
+
+hardware::Return<Status>
+CameraHardwareInterface::setUsage(hardware::graphics::common::V1_0::BufferUsage usage) {
+ Status s = Status::INTERNAL_ERROR;
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return s;
+ }
+ mPreviewUsage = (int) usage;
+ int rc = native_window_set_usage(a, mPreviewUsage);
+ if (rc == OK) {
+ cleanupCirculatingBuffers();
+ s = Status::OK;
+ }
+ return s;
+}
+
+hardware::Return<Status>
+CameraHardwareInterface::setSwapInterval(int32_t interval) {
+ Status s = Status::INTERNAL_ERROR;
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return s;
+ }
+ mPreviewSwapInterval = interval;
+ int rc = a->setSwapInterval(a, interval);
+ if (rc == OK) {
+ s = Status::OK;
+ }
+ return s;
+}
+
+hardware::Return<void>
+CameraHardwareInterface::getMinUndequeuedBufferCount(getMinUndequeuedBufferCount_cb _hidl_cb) {
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return hardware::Void();
+ }
+ int count = 0;
+ int rc = a->query(a, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &count);
+ Status s = Status::INTERNAL_ERROR;
+ if (rc == OK) {
+ s = Status::OK;
+ }
+ _hidl_cb(s, count);
+ return hardware::Void();
+}
+
+hardware::Return<Status>
+CameraHardwareInterface::setTimestamp(int64_t timestamp) {
+ Status s = Status::INTERNAL_ERROR;
+ ANativeWindow *a = mPreviewWindow.get();
+ if (a == nullptr) {
+ ALOGE("%s: preview window is null", __FUNCTION__);
+ return s;
+ }
+ int rc = native_window_set_buffers_timestamp(a, timestamp);
+ if (rc == OK) {
+ s = Status::OK;
+ }
+ return s;
+}
+
+status_t CameraHardwareInterface::setPreviewWindow(const sp<ANativeWindow>& buf)
+{
+ ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ mPreviewWindow = buf;
+ if (buf != nullptr) {
+ if (mPreviewScalingMode != NOT_SET) {
+ setPreviewScalingMode(mPreviewScalingMode);
+ }
+ if (mPreviewTransform != NOT_SET) {
+ setPreviewTransform(mPreviewTransform);
+ }
+ }
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->setPreviewWindow(buf.get() ? this : nullptr));
+ } else if (mDevice) {
+ if (mDevice->ops->set_preview_window) {
+ mPreviewWindow = buf;
+ if (buf != nullptr) {
+ if (mPreviewScalingMode != NOT_SET) {
+ setPreviewScalingMode(mPreviewScalingMode);
+ }
+ if (mPreviewTransform != NOT_SET) {
+ setPreviewTransform(mPreviewTransform);
+ }
+ }
+ mHalPreviewWindow.user = this;
+ ALOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p",__FUNCTION__,
+ &mHalPreviewWindow, mHalPreviewWindow.user);
+ return mDevice->ops->set_preview_window(mDevice,
+ buf.get() ? &mHalPreviewWindow.nw : 0);
+ }
+ }
+ return INVALID_OPERATION;
+}
+
+void CameraHardwareInterface::setCallbacks(notify_callback notify_cb,
+ data_callback data_cb,
+ data_callback_timestamp data_cb_timestamp,
+ data_callback_timestamp_batch data_cb_timestamp_batch,
+ void* user)
+{
+ mNotifyCb = notify_cb;
+ mDataCb = data_cb;
+ mDataCbTimestamp = data_cb_timestamp;
+ mDataCbTimestampBatch = data_cb_timestamp_batch;
+ mCbUser = user;
+
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+
+ if (mDevice && mDevice->ops->set_callbacks) {
+ mDevice->ops->set_callbacks(mDevice,
+ sNotifyCb,
+ sDataCb,
+ sDataCbTimestamp,
+ sGetMemory,
+ this);
+ }
+}
+
+void CameraHardwareInterface::enableMsgType(int32_t msgType)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ mHidlDevice->enableMsgType(msgType);
+ } else if (mDevice && mDevice->ops->enable_msg_type) {
+ mDevice->ops->enable_msg_type(mDevice, msgType);
+ }
+}
+
+void CameraHardwareInterface::disableMsgType(int32_t msgType)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ mHidlDevice->disableMsgType(msgType);
+ } else if (mDevice && mDevice->ops->disable_msg_type) {
+ mDevice->ops->disable_msg_type(mDevice, msgType);
+ }
+}
+
+int CameraHardwareInterface::msgTypeEnabled(int32_t msgType)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return mHidlDevice->msgTypeEnabled(msgType);
+ } else if (mDevice && mDevice->ops->msg_type_enabled) {
+ return mDevice->ops->msg_type_enabled(mDevice, msgType);
+ }
+ return false;
+}
+
+status_t CameraHardwareInterface::startPreview()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->startPreview());
+ } else if (mDevice && mDevice->ops->start_preview) {
+ return mDevice->ops->start_preview(mDevice);
+ }
+ return INVALID_OPERATION;
+}
+
+void CameraHardwareInterface::stopPreview()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ mHidlDevice->stopPreview();
+ } else if (mDevice && mDevice->ops->stop_preview) {
+ mDevice->ops->stop_preview(mDevice);
+ }
+}
+
+int CameraHardwareInterface::previewEnabled()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return mHidlDevice->previewEnabled();
+ } else if (mDevice && mDevice->ops->preview_enabled) {
+ return mDevice->ops->preview_enabled(mDevice);
+ }
+ return false;
+}
+
+status_t CameraHardwareInterface::storeMetaDataInBuffers(int enable)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->storeMetaDataInBuffers(enable));
+ } else if (mDevice && mDevice->ops->store_meta_data_in_buffers) {
+ return mDevice->ops->store_meta_data_in_buffers(mDevice, enable);
+ }
+ return enable ? INVALID_OPERATION: OK;
+}
+
+status_t CameraHardwareInterface::startRecording()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->startRecording());
+ } else if (mDevice && mDevice->ops->start_recording) {
+ return mDevice->ops->start_recording(mDevice);
+ }
+ return INVALID_OPERATION;
+}
+
+/**
+ * Stop a previously started recording.
+ */
+void CameraHardwareInterface::stopRecording()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ mHidlDevice->stopRecording();
+ } else if (mDevice && mDevice->ops->stop_recording) {
+ mDevice->ops->stop_recording(mDevice);
+ }
+}
+
+/**
+ * Returns true if recording is enabled.
+ */
+int CameraHardwareInterface::recordingEnabled()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return mHidlDevice->recordingEnabled();
+ } else if (mDevice && mDevice->ops->recording_enabled) {
+ return mDevice->ops->recording_enabled(mDevice);
+ }
+ return false;
+}
+
+void CameraHardwareInterface::releaseRecordingFrame(const sp<IMemory>& mem)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ ssize_t offset;
+ size_t size;
+ sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+ int heapId = heap->getHeapID();
+ int bufferIndex = offset / size;
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ if (size == sizeof(VideoNativeHandleMetadata)) {
+ VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->pointer();
+ // Caching the handle here because md->pHandle will be subject to HAL's edit
+ native_handle_t* nh = md->pHandle;
+ hidl_handle frame = nh;
+ mHidlDevice->releaseRecordingFrameHandle(heapId, bufferIndex, frame);
+ native_handle_close(nh);
+ native_handle_delete(nh);
+ } else {
+ mHidlDevice->releaseRecordingFrame(heapId, bufferIndex);
+ }
+ } else if (mDevice && mDevice->ops->release_recording_frame) {
+ void *data = ((uint8_t *)heap->base()) + offset;
+ return mDevice->ops->release_recording_frame(mDevice, data);
+ }
+}
+
+void CameraHardwareInterface::releaseRecordingFrameBatch(const std::vector<sp<IMemory>>& frames)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ size_t n = frames.size();
+ std::vector<VideoFrameMessage> msgs;
+ msgs.reserve(n);
+ for (auto& mem : frames) {
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ ssize_t offset;
+ size_t size;
+ sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+ if (size == sizeof(VideoNativeHandleMetadata)) {
+ uint32_t heapId = heap->getHeapID();
+ uint32_t bufferIndex = offset / size;
+ VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->pointer();
+ // Caching the handle here because md->pHandle will be subject to HAL's edit
+ native_handle_t* nh = md->pHandle;
+ VideoFrameMessage msg;
+ msgs.push_back({nh, heapId, bufferIndex});
+ } else {
+ ALOGE("%s only supports VideoNativeHandleMetadata mode", __FUNCTION__);
+ return;
+ }
+ } else {
+ ALOGE("Non HIDL mode do not support %s", __FUNCTION__);
+ return;
+ }
+ }
+
+ mHidlDevice->releaseRecordingFrameHandleBatch(msgs);
+
+ for (auto& msg : msgs) {
+ native_handle_t* nh = const_cast<native_handle_t*>(msg.frameData.getNativeHandle());
+ native_handle_close(nh);
+ native_handle_delete(nh);
+ }
+}
+
+status_t CameraHardwareInterface::autoFocus()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->autoFocus());
+ } else if (mDevice && mDevice->ops->auto_focus) {
+ return mDevice->ops->auto_focus(mDevice);
+ }
+ return INVALID_OPERATION;
+}
+
+status_t CameraHardwareInterface::cancelAutoFocus()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->cancelAutoFocus());
+ } else if (mDevice && mDevice->ops->cancel_auto_focus) {
+ return mDevice->ops->cancel_auto_focus(mDevice);
+ }
+ return INVALID_OPERATION;
+}
+
+status_t CameraHardwareInterface::takePicture()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->takePicture());
+ } else if (mDevice && mDevice->ops->take_picture) {
+ return mDevice->ops->take_picture(mDevice);
+ }
+ return INVALID_OPERATION;
+}
+
+status_t CameraHardwareInterface::cancelPicture()
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->cancelPicture());
+ } else if (mDevice && mDevice->ops->cancel_picture) {
+ return mDevice->ops->cancel_picture(mDevice);
+ }
+ return INVALID_OPERATION;
+}
+
+status_t CameraHardwareInterface::setParameters(const CameraParameters ¶ms)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->setParameters(params.flatten().string()));
+ } else if (mDevice && mDevice->ops->set_parameters) {
+ return mDevice->ops->set_parameters(mDevice, params.flatten().string());
+ }
+ return INVALID_OPERATION;
+}
+
+CameraParameters CameraHardwareInterface::getParameters() const
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ CameraParameters parms;
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ hardware::hidl_string outParam;
+ mHidlDevice->getParameters(
+ [&outParam](const auto& outStr) {
+ outParam = outStr;
+ });
+ String8 tmp(outParam.c_str());
+ parms.unflatten(tmp);
+ } else if (mDevice && mDevice->ops->get_parameters) {
+ char *temp = mDevice->ops->get_parameters(mDevice);
+ String8 str_parms(temp);
+ if (mDevice->ops->put_parameters)
+ mDevice->ops->put_parameters(mDevice, temp);
+ else
+ free(temp);
+ parms.unflatten(str_parms);
+ }
+ return parms;
+}
+
+status_t CameraHardwareInterface::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ return CameraProviderManager::mapToStatusT(
+ mHidlDevice->sendCommand((CommandType) cmd, arg1, arg2));
+ } else if (mDevice && mDevice->ops->send_command) {
+ return mDevice->ops->send_command(mDevice, cmd, arg1, arg2);
+ }
+ return INVALID_OPERATION;
+}
+
+/**
+ * Release the hardware resources owned by this object. Note that this is
+ * *not* done in the destructor.
+ */
+void CameraHardwareInterface::release() {
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ mHidlDevice->close();
+ mHidlDevice.clear();
+ } else if (mDevice && mDevice->ops->release) {
+ mDevice->ops->release(mDevice);
+ }
+}
+
+/**
+ * Dump state of the camera hardware
+ */
+status_t CameraHardwareInterface::dump(int fd, const Vector<String16>& /*args*/) const
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ native_handle_t* handle = native_handle_create(1,0);
+ handle->data[0] = fd;
+ Status s = mHidlDevice->dumpState(handle);
+ native_handle_delete(handle);
+ return CameraProviderManager::mapToStatusT(s);
+ } else if (mDevice && mDevice->ops->dump) {
+ return mDevice->ops->dump(mDevice, fd);
+ }
+ return OK; // It's fine if the HAL doesn't implement dump()
+}
+
+/**
+ * Methods for legacy (non-HIDL) path follows
+ */
+void CameraHardwareInterface::sNotifyCb(int32_t msg_type, int32_t ext1,
+ int32_t ext2, void *user)
+{
+ ALOGV("%s", __FUNCTION__);
+ CameraHardwareInterface *object =
+ static_cast<CameraHardwareInterface *>(user);
+ object->mNotifyCb(msg_type, ext1, ext2, object->mCbUser);
+}
+
+void CameraHardwareInterface::sDataCb(int32_t msg_type,
+ const camera_memory_t *data, unsigned int index,
+ camera_frame_metadata_t *metadata,
+ void *user)
+{
+ ALOGV("%s", __FUNCTION__);
+ CameraHardwareInterface *object =
+ static_cast<CameraHardwareInterface *>(user);
+ sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
+ if (index >= mem->mNumBufs) {
+ ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
+ index, mem->mNumBufs);
+ return;
+ }
+ object->mDataCb(msg_type, mem->mBuffers[index], metadata, object->mCbUser);
+}
+
+void CameraHardwareInterface::sDataCbTimestamp(nsecs_t timestamp, int32_t msg_type,
+ const camera_memory_t *data, unsigned index,
+ void *user)
+{
+ ALOGV("%s", __FUNCTION__);
+ CameraHardwareInterface *object =
+ static_cast<CameraHardwareInterface *>(user);
+ // Start refcounting the heap object from here on. When the clients
+ // drop all references, it will be destroyed (as well as the enclosed
+ // MemoryHeapBase.
+ sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
+ if (index >= mem->mNumBufs) {
+ ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
+ index, mem->mNumBufs);
+ return;
+ }
+ object->mDataCbTimestamp(timestamp, msg_type, mem->mBuffers[index], object->mCbUser);
+}
+
+camera_memory_t* CameraHardwareInterface::sGetMemory(
+ int fd, size_t buf_size, uint_t num_bufs,
+ void *user __attribute__((unused)))
+{
+ CameraHeapMemory *mem;
+ if (fd < 0) {
+ mem = new CameraHeapMemory(buf_size, num_bufs);
+ } else {
+ mem = new CameraHeapMemory(fd, buf_size, num_bufs);
+ }
+ mem->incStrong(mem);
+ return &mem->handle;
+}
+
+void CameraHardwareInterface::sPutMemory(camera_memory_t *data)
+{
+ if (!data) {
+ return;
+ }
+
+ CameraHeapMemory *mem = static_cast<CameraHeapMemory *>(data->handle);
+ mem->decStrong(mem);
+}
+
+ANativeWindow* CameraHardwareInterface::sToAnw(void *user)
+{
+ CameraHardwareInterface *object =
+ reinterpret_cast<CameraHardwareInterface *>(user);
+ return object->mPreviewWindow.get();
+}
+#define anw(n) sToAnw(((struct camera_preview_window *)(n))->user)
+#define hwi(n) reinterpret_cast<CameraHardwareInterface *>(\
+ ((struct camera_preview_window *)(n))->user)
+
+int CameraHardwareInterface::sDequeueBuffer(struct preview_stream_ops* w,
+ buffer_handle_t** buffer, int *stride)
+{
+ int rc;
+ ANativeWindow *a = anw(w);
+ ANativeWindowBuffer* anb;
+ rc = native_window_dequeue_buffer_and_wait(a, &anb);
+ if (rc == OK) {
+ *buffer = &anb->handle;
+ *stride = anb->stride;
+ }
+ return rc;
+}
+
+#ifndef container_of
+#define container_of(ptr, type, member) ({ \
+ const __typeof__(((type *) 0)->member) *__mptr = (ptr); \
+ (type *) ((char *) __mptr - (char *)(&((type *)0)->member)); })
+#endif
+
+int CameraHardwareInterface::sLockBuffer(struct preview_stream_ops* w,
+ buffer_handle_t* /*buffer*/)
+{
+ ANativeWindow *a = anw(w);
+ (void)a;
+ return 0;
+}
+
+int CameraHardwareInterface::sEnqueueBuffer(struct preview_stream_ops* w,
+ buffer_handle_t* buffer)
+{
+ ANativeWindow *a = anw(w);
+ return a->queueBuffer(a,
+ container_of(buffer, ANativeWindowBuffer, handle), -1);
+}
+
+int CameraHardwareInterface::sCancelBuffer(struct preview_stream_ops* w,
+ buffer_handle_t* buffer)
+{
+ ANativeWindow *a = anw(w);
+ return a->cancelBuffer(a,
+ container_of(buffer, ANativeWindowBuffer, handle), -1);
+}
+
+int CameraHardwareInterface::sSetBufferCount(struct preview_stream_ops* w, int count)
+{
+ ANativeWindow *a = anw(w);
+
+ if (a != nullptr) {
+ // Workaround for b/27039775
+ // Previously, setting the buffer count would reset the buffer
+ // queue's flag that allows for all buffers to be dequeued on the
+ // producer side, instead of just the producer's declared max count,
+ // if no filled buffers have yet been queued by the producer. This
+ // reset no longer happens, but some HALs depend on this behavior,
+ // so it needs to be maintained for HAL backwards compatibility.
+ // Simulate the prior behavior by disconnecting/reconnecting to the
+ // window and setting the values again. This has the drawback of
+ // actually causing memory reallocation, which may not have happened
+ // in the past.
+ CameraHardwareInterface *hw = hwi(w);
+ native_window_api_disconnect(a, NATIVE_WINDOW_API_CAMERA);
+ native_window_api_connect(a, NATIVE_WINDOW_API_CAMERA);
+ if (hw->mPreviewScalingMode != NOT_SET) {
+ native_window_set_scaling_mode(a, hw->mPreviewScalingMode);
+ }
+ if (hw->mPreviewTransform != NOT_SET) {
+ native_window_set_buffers_transform(a, hw->mPreviewTransform);
+ }
+ if (hw->mPreviewWidth != NOT_SET) {
+ native_window_set_buffers_dimensions(a,
+ hw->mPreviewWidth, hw->mPreviewHeight);
+ native_window_set_buffers_format(a, hw->mPreviewFormat);
+ }
+ if (hw->mPreviewUsage != 0) {
+ native_window_set_usage(a, hw->mPreviewUsage);
+ }
+ if (hw->mPreviewSwapInterval != NOT_SET) {
+ a->setSwapInterval(a, hw->mPreviewSwapInterval);
+ }
+ if (hw->mPreviewCrop.left != NOT_SET) {
+ native_window_set_crop(a, &(hw->mPreviewCrop));
+ }
+ }
+
+ return native_window_set_buffer_count(a, count);
+}
+
+int CameraHardwareInterface::sSetBuffersGeometry(struct preview_stream_ops* w,
+ int width, int height, int format)
+{
+ int rc;
+ ANativeWindow *a = anw(w);
+ CameraHardwareInterface *hw = hwi(w);
+ hw->mPreviewWidth = width;
+ hw->mPreviewHeight = height;
+ hw->mPreviewFormat = format;
+ rc = native_window_set_buffers_dimensions(a, width, height);
+ if (rc == OK) {
+ rc = native_window_set_buffers_format(a, format);
+ }
+ return rc;
+}
+
+int CameraHardwareInterface::sSetCrop(struct preview_stream_ops *w,
+ int left, int top, int right, int bottom)
+{
+ ANativeWindow *a = anw(w);
+ CameraHardwareInterface *hw = hwi(w);
+ hw->mPreviewCrop.left = left;
+ hw->mPreviewCrop.top = top;
+ hw->mPreviewCrop.right = right;
+ hw->mPreviewCrop.bottom = bottom;
+ return native_window_set_crop(a, &(hw->mPreviewCrop));
+}
+
+int CameraHardwareInterface::sSetTimestamp(struct preview_stream_ops *w,
+ int64_t timestamp) {
+ ANativeWindow *a = anw(w);
+ return native_window_set_buffers_timestamp(a, timestamp);
+}
+
+int CameraHardwareInterface::sSetUsage(struct preview_stream_ops* w, int usage)
+{
+ ANativeWindow *a = anw(w);
+ CameraHardwareInterface *hw = hwi(w);
+ hw->mPreviewUsage = usage;
+ return native_window_set_usage(a, usage);
+}
+
+int CameraHardwareInterface::sSetSwapInterval(struct preview_stream_ops *w, int interval)
+{
+ ANativeWindow *a = anw(w);
+ CameraHardwareInterface *hw = hwi(w);
+ hw->mPreviewSwapInterval = interval;
+ return a->setSwapInterval(a, interval);
+}
+
+int CameraHardwareInterface::sGetMinUndequeuedBufferCount(
+ const struct preview_stream_ops *w,
+ int *count)
+{
+ ANativeWindow *a = anw(w);
+ return a->query(a, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, count);
+}
+
+void CameraHardwareInterface::initHalPreviewWindow()
+{
+ mHalPreviewWindow.nw.cancel_buffer = sCancelBuffer;
+ mHalPreviewWindow.nw.lock_buffer = sLockBuffer;
+ mHalPreviewWindow.nw.dequeue_buffer = sDequeueBuffer;
+ mHalPreviewWindow.nw.enqueue_buffer = sEnqueueBuffer;
+ mHalPreviewWindow.nw.set_buffer_count = sSetBufferCount;
+ mHalPreviewWindow.nw.set_buffers_geometry = sSetBuffersGeometry;
+ mHalPreviewWindow.nw.set_crop = sSetCrop;
+ mHalPreviewWindow.nw.set_timestamp = sSetTimestamp;
+ mHalPreviewWindow.nw.set_usage = sSetUsage;
+ mHalPreviewWindow.nw.set_swap_interval = sSetSwapInterval;
+
+ mHalPreviewWindow.nw.get_min_undequeued_buffer_count =
+ sGetMinUndequeuedBufferCount;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 952bae1..1c38d00 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_HARDWARE_CAMERA_HARDWARE_INTERFACE_H
#define ANDROID_HARDWARE_CAMERA_HARDWARE_INTERFACE_H
+#include <unordered_map>
#include <binder/IMemory.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
@@ -27,6 +28,8 @@
#include <system/window.h>
#include <hardware/camera.h>
+#include <common/CameraProviderManager.h>
+
namespace android {
typedef void (*notify_callback)(int32_t msgType,
@@ -44,6 +47,15 @@
const sp<IMemory> &dataPtr,
void *user);
+struct HandleTimestampMessage {
+ nsecs_t timestamp;
+ const sp<IMemory> dataPtr;
+};
+
+typedef void (*data_callback_timestamp_batch)(
+ int32_t msgType,
+ const std::vector<HandleTimestampMessage>&, void* user);
+
/**
* CameraHardwareInterface.h defines the interface to the
* camera hardware abstraction layer, used for setting and getting
@@ -71,10 +83,15 @@
* provided in a data callback must be copied if it's needed after returning.
*/
-class CameraHardwareInterface : public virtual RefBase {
+class CameraHardwareInterface :
+ public virtual RefBase,
+ public virtual hardware::camera::device::V1_0::ICameraDeviceCallback,
+ public virtual hardware::camera::device::V1_0::ICameraDevicePreviewCallback {
+
public:
explicit CameraHardwareInterface(const char *name):
mDevice(nullptr),
+ mHidlDevice(nullptr),
mName(name),
mPreviewScalingMode(NOT_SET),
mPreviewTransform(NOT_SET),
@@ -87,109 +104,23 @@
{
}
- ~CameraHardwareInterface()
- {
- ALOGI("Destroying camera %s", mName.string());
- if(mDevice) {
- int rc = mDevice->common.close(&mDevice->common);
- if (rc != OK)
- ALOGE("Could not close camera %s: %d", mName.string(), rc);
- }
- }
+ ~CameraHardwareInterface();
- status_t initialize(CameraModule *module)
- {
- ALOGI("Opening camera %s", mName.string());
- camera_info info;
- status_t res = module->getCameraInfo(atoi(mName.string()), &info);
- if (res != OK) {
- return res;
- }
-
- int rc = OK;
- if (module->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_3 &&
- info.device_version > CAMERA_DEVICE_API_VERSION_1_0) {
- // Open higher version camera device as HAL1.0 device.
- rc = module->openLegacy(mName.string(),
- CAMERA_DEVICE_API_VERSION_1_0,
- (hw_device_t **)&mDevice);
- } else {
- rc = module->open(mName.string(), (hw_device_t **)&mDevice);
- }
- if (rc != OK) {
- ALOGE("Could not open camera %s: %d", mName.string(), rc);
- return rc;
- }
- initHalPreviewWindow();
- return rc;
- }
+ status_t initialize(sp<CameraProviderManager> manager);
/** Set the ANativeWindow to which preview frames are sent */
- status_t setPreviewWindow(const sp<ANativeWindow>& buf)
- {
- ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());
- if (mDevice->ops->set_preview_window) {
- mPreviewWindow = buf;
- if (buf != nullptr) {
- if (mPreviewScalingMode != NOT_SET) {
- setPreviewScalingMode(mPreviewScalingMode);
- }
- if (mPreviewTransform != NOT_SET) {
- setPreviewTransform(mPreviewTransform);
- }
- }
- mHalPreviewWindow.user = this;
- ALOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p", __FUNCTION__,
- &mHalPreviewWindow, mHalPreviewWindow.user);
- return mDevice->ops->set_preview_window(mDevice,
- buf.get() ? &mHalPreviewWindow.nw : 0);
- }
- return INVALID_OPERATION;
- }
+ status_t setPreviewWindow(const sp<ANativeWindow>& buf);
- status_t setPreviewScalingMode(int scalingMode)
- {
- int rc = OK;
- mPreviewScalingMode = scalingMode;
- if (mPreviewWindow != nullptr) {
- rc = native_window_set_scaling_mode(mPreviewWindow.get(),
- scalingMode);
- }
- return rc;
- }
+ status_t setPreviewScalingMode(int scalingMode);
- status_t setPreviewTransform(int transform) {
- int rc = OK;
- mPreviewTransform = transform;
- if (mPreviewWindow != nullptr) {
- rc = native_window_set_buffers_transform(mPreviewWindow.get(),
- mPreviewTransform);
- }
- return rc;
- }
+ status_t setPreviewTransform(int transform);
/** Set the notification and data callbacks */
void setCallbacks(notify_callback notify_cb,
data_callback data_cb,
data_callback_timestamp data_cb_timestamp,
- void* user)
- {
- mNotifyCb = notify_cb;
- mDataCb = data_cb;
- mDataCbTimestamp = data_cb_timestamp;
- mCbUser = user;
-
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
-
- if (mDevice->ops->set_callbacks) {
- mDevice->ops->set_callbacks(mDevice,
- __notify_cb,
- __data_cb,
- __data_cb_timestamp,
- __get_memory,
- this);
- }
- }
+ data_callback_timestamp_batch data_cb_timestamp_batch,
+ void* user);
/**
* The following three functions all take a msgtype,
@@ -200,12 +131,7 @@
/**
* Enable a message, or set of messages.
*/
- void enableMsgType(int32_t msgType)
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->enable_msg_type)
- mDevice->ops->enable_msg_type(mDevice, msgType);
- }
+ void enableMsgType(int32_t msgType);
/**
* Disable a message, or a set of messages.
@@ -217,57 +143,29 @@
* modify/access any video recording frame after calling
* disableMsgType(CAMERA_MSG_VIDEO_FRAME).
*/
- void disableMsgType(int32_t msgType)
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->disable_msg_type)
- mDevice->ops->disable_msg_type(mDevice, msgType);
- }
+ void disableMsgType(int32_t msgType);
/**
* Query whether a message, or a set of messages, is enabled.
* Note that this is operates as an AND, if any of the messages
* queried are off, this will return false.
*/
- int msgTypeEnabled(int32_t msgType)
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->msg_type_enabled)
- return mDevice->ops->msg_type_enabled(mDevice, msgType);
- return false;
- }
+ int msgTypeEnabled(int32_t msgType);
/**
* Start preview mode.
*/
- status_t startPreview()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->start_preview)
- return mDevice->ops->start_preview(mDevice);
- return INVALID_OPERATION;
- }
+ status_t startPreview();
/**
* Stop a previously started preview.
*/
- void stopPreview()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->stop_preview)
- mDevice->ops->stop_preview(mDevice);
- }
+ void stopPreview();
/**
* Returns true if preview is enabled.
*/
- int previewEnabled()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->preview_enabled)
- return mDevice->ops->preview_enabled(mDevice);
- return false;
- }
+ int previewEnabled();
/**
* Request the camera hal to store meta data or real YUV data in
@@ -301,13 +199,7 @@
* @return OK on success.
*/
- status_t storeMetaDataInBuffers(int enable)
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->store_meta_data_in_buffers)
- return mDevice->ops->store_meta_data_in_buffers(mDevice, enable);
- return enable ? INVALID_OPERATION: OK;
- }
+ status_t storeMetaDataInBuffers(int enable);
/**
* Start record mode. When a record image is available a CAMERA_MSG_VIDEO_FRAME
@@ -318,34 +210,17 @@
* to manage the life-cycle of the video recording frames, and the client must
* not modify/access any video recording frames.
*/
- status_t startRecording()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->start_recording)
- return mDevice->ops->start_recording(mDevice);
- return INVALID_OPERATION;
- }
+ status_t startRecording();
/**
* Stop a previously started recording.
*/
- void stopRecording()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->stop_recording)
- mDevice->ops->stop_recording(mDevice);
- }
+ void stopRecording();
/**
* Returns true if recording is enabled.
*/
- int recordingEnabled()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->recording_enabled)
- return mDevice->ops->recording_enabled(mDevice);
- return false;
- }
+ int recordingEnabled();
/**
* Release a record frame previously returned by CAMERA_MSG_VIDEO_FRAME.
@@ -357,30 +232,28 @@
* responsibility of managing the life-cycle of the video recording
* frames.
*/
- void releaseRecordingFrame(const sp<IMemory>& mem)
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->release_recording_frame) {
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
- void *data = ((uint8_t *)heap->base()) + offset;
- return mDevice->ops->release_recording_frame(mDevice, data);
- }
- }
+ void releaseRecordingFrame(const sp<IMemory>& mem);
+
+ /**
+ * Release a batch of recording frames previously returned by
+ * CAMERA_MSG_VIDEO_FRAME. This method only supports frames that are
+ * stored as VideoNativeHandleMetadata.
+ *
+ * It is camera hal client's responsibility to release video recording
+ * frames sent out by the camera hal before the camera hal receives
+ * a call to disableMsgType(CAMERA_MSG_VIDEO_FRAME). After it receives
+ * the call to disableMsgType(CAMERA_MSG_VIDEO_FRAME), it is camera hal's
+ * responsibility of managing the life-cycle of the video recording
+ * frames.
+ */
+ void releaseRecordingFrameBatch(const std::vector<sp<IMemory>>& frames);
/**
* Start auto focus, the notification callback routine is called
* with CAMERA_MSG_FOCUS once when focusing is complete. autoFocus()
* will be called again if another auto focus is needed.
*/
- status_t autoFocus()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->auto_focus)
- return mDevice->ops->auto_focus(mDevice);
- return INVALID_OPERATION;
- }
+ status_t autoFocus();
/**
* Cancels auto-focus function. If the auto-focus is still in progress,
@@ -388,151 +261,63 @@
* or not, this function will return the focus position to the default.
* If the camera does not support auto-focus, this is a no-op.
*/
- status_t cancelAutoFocus()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->cancel_auto_focus)
- return mDevice->ops->cancel_auto_focus(mDevice);
- return INVALID_OPERATION;
- }
+ status_t cancelAutoFocus();
/**
* Take a picture.
*/
- status_t takePicture()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->take_picture)
- return mDevice->ops->take_picture(mDevice);
- return INVALID_OPERATION;
- }
+ status_t takePicture();
/**
* Cancel a picture that was started with takePicture. Calling this
* method when no picture is being taken is a no-op.
*/
- status_t cancelPicture()
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->cancel_picture)
- return mDevice->ops->cancel_picture(mDevice);
- return INVALID_OPERATION;
- }
+ status_t cancelPicture();
/**
* Set the camera parameters. This returns BAD_VALUE if any parameter is
* invalid or not supported. */
- status_t setParameters(const CameraParameters ¶ms)
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->set_parameters)
- return mDevice->ops->set_parameters(mDevice,
- params.flatten().string());
- return INVALID_OPERATION;
- }
+ status_t setParameters(const CameraParameters ¶ms);
/** Return the camera parameters. */
- CameraParameters getParameters() const
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- CameraParameters parms;
- if (mDevice->ops->get_parameters) {
- char *temp = mDevice->ops->get_parameters(mDevice);
- String8 str_parms(temp);
- if (mDevice->ops->put_parameters)
- mDevice->ops->put_parameters(mDevice, temp);
- else
- free(temp);
- parms.unflatten(str_parms);
- }
- return parms;
- }
+ CameraParameters getParameters() const;
/**
* Send command to camera driver.
*/
- status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->send_command)
- return mDevice->ops->send_command(mDevice, cmd, arg1, arg2);
- return INVALID_OPERATION;
- }
+ status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
/**
* Release the hardware resources owned by this object. Note that this is
* *not* done in the destructor.
*/
- void release() {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->release)
- mDevice->ops->release(mDevice);
- }
+ void release();
/**
* Dump state of the camera hardware
*/
- status_t dump(int fd, const Vector<String16>& /*args*/) const
- {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (mDevice->ops->dump)
- return mDevice->ops->dump(mDevice, fd);
- return OK; // It's fine if the HAL doesn't implement dump()
- }
+ status_t dump(int fd, const Vector<String16>& /*args*/) const;
private:
camera_device_t *mDevice;
+ sp<hardware::camera::device::V1_0::ICameraDevice> mHidlDevice;
String8 mName;
- static void __notify_cb(int32_t msg_type, int32_t ext1,
- int32_t ext2, void *user)
- {
- ALOGV("%s", __FUNCTION__);
- CameraHardwareInterface *__this =
- static_cast<CameraHardwareInterface *>(user);
- __this->mNotifyCb(msg_type, ext1, ext2, __this->mCbUser);
- }
+ static void sNotifyCb(int32_t msg_type, int32_t ext1,
+ int32_t ext2, void *user);
- static void __data_cb(int32_t msg_type,
+ static void sDataCb(int32_t msg_type,
const camera_memory_t *data, unsigned int index,
camera_frame_metadata_t *metadata,
- void *user)
- {
- ALOGV("%s", __FUNCTION__);
- CameraHardwareInterface *__this =
- static_cast<CameraHardwareInterface *>(user);
- sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
- if (index >= mem->mNumBufs) {
- ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
- index, mem->mNumBufs);
- return;
- }
- __this->mDataCb(msg_type, mem->mBuffers[index], metadata, __this->mCbUser);
- }
+ void *user);
- static void __data_cb_timestamp(nsecs_t timestamp, int32_t msg_type,
+ static void sDataCbTimestamp(nsecs_t timestamp, int32_t msg_type,
const camera_memory_t *data, unsigned index,
- void *user)
- {
- ALOGV("%s", __FUNCTION__);
- CameraHardwareInterface *__this =
- static_cast<CameraHardwareInterface *>(user);
- // Start refcounting the heap object from here on. When the clients
- // drop all references, it will be destroyed (as well as the enclosed
- // MemoryHeapBase.
- sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
- if (index >= mem->mNumBufs) {
- ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
- index, mem->mNumBufs);
- return;
- }
- __this->mDataCbTimestamp(timestamp, msg_type, mem->mBuffers[index], __this->mCbUser);
- }
+ void *user);
// This is a utility class that combines a MemoryHeapBase and a MemoryBase
// in one. Since we tend to use them in a one-to-one relationship, this is
// handy.
-
class CameraHeapMemory : public RefBase {
public:
CameraHeapMemory(int fd, size_t buf_size, uint_t num_buffers = 1) :
@@ -563,7 +348,7 @@
i * mBufSize,
mBufSize);
- handle.release = __put_memory;
+ handle.release = sPutMemory;
}
virtual ~CameraHeapMemory()
@@ -579,199 +364,98 @@
camera_memory_t handle;
};
- static camera_memory_t* __get_memory(int fd, size_t buf_size, uint_t num_bufs,
- void *user __attribute__((unused)))
- {
- CameraHeapMemory *mem;
- if (fd < 0)
- mem = new CameraHeapMemory(buf_size, num_bufs);
- else
- mem = new CameraHeapMemory(fd, buf_size, num_bufs);
- mem->incStrong(mem);
- return &mem->handle;
- }
+ static camera_memory_t* sGetMemory(int fd, size_t buf_size, uint_t num_bufs,
+ void *user __attribute__((unused)));
- static void __put_memory(camera_memory_t *data)
- {
- if (!data)
- return;
+ static void sPutMemory(camera_memory_t *data);
- CameraHeapMemory *mem = static_cast<CameraHeapMemory *>(data->handle);
- mem->decStrong(mem);
- }
+ static ANativeWindow *sToAnw(void *user);
- static ANativeWindow *__to_anw(void *user)
- {
- CameraHardwareInterface *__this =
- reinterpret_cast<CameraHardwareInterface *>(user);
- return __this->mPreviewWindow.get();
- }
-#define anw(n) __to_anw(((struct camera_preview_window *)(n))->user)
-#define hwi(n) reinterpret_cast<CameraHardwareInterface *>(\
- ((struct camera_preview_window *)(n))->user)
+ static int sDequeueBuffer(struct preview_stream_ops* w,
+ buffer_handle_t** buffer, int *stride);
- static int __dequeue_buffer(struct preview_stream_ops* w,
- buffer_handle_t** buffer, int *stride)
- {
- int rc;
- ANativeWindow *a = anw(w);
- ANativeWindowBuffer* anb;
- rc = native_window_dequeue_buffer_and_wait(a, &anb);
- if (!rc) {
- *buffer = &anb->handle;
- *stride = anb->stride;
- }
- return rc;
- }
+ static int sLockBuffer(struct preview_stream_ops* w,
+ buffer_handle_t* /*buffer*/);
-#ifndef container_of
-#define container_of(ptr, type, member) ({ \
- const __typeof__(((type *) 0)->member) *__mptr = (ptr); \
- (type *) ((char *) __mptr - (char *)(&((type *)0)->member)); })
-#endif
+ static int sEnqueueBuffer(struct preview_stream_ops* w,
+ buffer_handle_t* buffer);
- static int __lock_buffer(struct preview_stream_ops* w,
- buffer_handle_t* /*buffer*/)
- {
- ANativeWindow *a = anw(w);
- (void)a;
- return 0;
- }
+ static int sCancelBuffer(struct preview_stream_ops* w,
+ buffer_handle_t* buffer);
- static int __enqueue_buffer(struct preview_stream_ops* w,
- buffer_handle_t* buffer)
- {
- ANativeWindow *a = anw(w);
- return a->queueBuffer(a,
- container_of(buffer, ANativeWindowBuffer, handle), -1);
- }
+ static int sSetBufferCount(struct preview_stream_ops* w, int count);
- static int __cancel_buffer(struct preview_stream_ops* w,
- buffer_handle_t* buffer)
- {
- ANativeWindow *a = anw(w);
- return a->cancelBuffer(a,
- container_of(buffer, ANativeWindowBuffer, handle), -1);
- }
+ static int sSetBuffersGeometry(struct preview_stream_ops* w,
+ int width, int height, int format);
- static int __set_buffer_count(struct preview_stream_ops* w, int count)
- {
- ANativeWindow *a = anw(w);
+ static int sSetCrop(struct preview_stream_ops *w,
+ int left, int top, int right, int bottom);
- if (a != nullptr) {
- // Workaround for b/27039775
- // Previously, setting the buffer count would reset the buffer
- // queue's flag that allows for all buffers to be dequeued on the
- // producer side, instead of just the producer's declared max count,
- // if no filled buffers have yet been queued by the producer. This
- // reset no longer happens, but some HALs depend on this behavior,
- // so it needs to be maintained for HAL backwards compatibility.
- // Simulate the prior behavior by disconnecting/reconnecting to the
- // window and setting the values again. This has the drawback of
- // actually causing memory reallocation, which may not have happened
- // in the past.
- CameraHardwareInterface *hw = hwi(w);
- native_window_api_disconnect(a, NATIVE_WINDOW_API_CAMERA);
- native_window_api_connect(a, NATIVE_WINDOW_API_CAMERA);
- if (hw->mPreviewScalingMode != NOT_SET) {
- native_window_set_scaling_mode(a, hw->mPreviewScalingMode);
- }
- if (hw->mPreviewTransform != NOT_SET) {
- native_window_set_buffers_transform(a, hw->mPreviewTransform);
- }
- if (hw->mPreviewWidth != NOT_SET) {
- native_window_set_buffers_dimensions(a,
- hw->mPreviewWidth, hw->mPreviewHeight);
- native_window_set_buffers_format(a, hw->mPreviewFormat);
- }
- if (hw->mPreviewUsage != 0) {
- native_window_set_usage(a, hw->mPreviewUsage);
- }
- if (hw->mPreviewSwapInterval != NOT_SET) {
- a->setSwapInterval(a, hw->mPreviewSwapInterval);
- }
- if (hw->mPreviewCrop.left != NOT_SET) {
- native_window_set_crop(a, &(hw->mPreviewCrop));
- }
- }
+ static int sSetTimestamp(struct preview_stream_ops *w,
+ int64_t timestamp);
- return native_window_set_buffer_count(a, count);
- }
+ static int sSetUsage(struct preview_stream_ops* w, int usage);
- static int __set_buffers_geometry(struct preview_stream_ops* w,
- int width, int height, int format)
- {
- int rc;
- ANativeWindow *a = anw(w);
- CameraHardwareInterface *hw = hwi(w);
- hw->mPreviewWidth = width;
- hw->mPreviewHeight = height;
- hw->mPreviewFormat = format;
- rc = native_window_set_buffers_dimensions(a, width, height);
- if (!rc) {
- rc = native_window_set_buffers_format(a, format);
- }
- return rc;
- }
+ static int sSetSwapInterval(struct preview_stream_ops *w, int interval);
- static int __set_crop(struct preview_stream_ops *w,
- int left, int top, int right, int bottom)
- {
- ANativeWindow *a = anw(w);
- CameraHardwareInterface *hw = hwi(w);
- hw->mPreviewCrop.left = left;
- hw->mPreviewCrop.top = top;
- hw->mPreviewCrop.right = right;
- hw->mPreviewCrop.bottom = bottom;
- return native_window_set_crop(a, &(hw->mPreviewCrop));
- }
-
- static int __set_timestamp(struct preview_stream_ops *w,
- int64_t timestamp) {
- ANativeWindow *a = anw(w);
- return native_window_set_buffers_timestamp(a, timestamp);
- }
-
- static int __set_usage(struct preview_stream_ops* w, int usage)
- {
- ANativeWindow *a = anw(w);
- CameraHardwareInterface *hw = hwi(w);
- hw->mPreviewUsage = usage;
- return native_window_set_usage(a, usage);
- }
-
- static int __set_swap_interval(struct preview_stream_ops *w, int interval)
- {
- ANativeWindow *a = anw(w);
- CameraHardwareInterface *hw = hwi(w);
- hw->mPreviewSwapInterval = interval;
- return a->setSwapInterval(a, interval);
- }
-
- static int __get_min_undequeued_buffer_count(
+ static int sGetMinUndequeuedBufferCount(
const struct preview_stream_ops *w,
- int *count)
- {
- ANativeWindow *a = anw(w);
- return a->query(a, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, count);
- }
+ int *count);
- void initHalPreviewWindow()
- {
- mHalPreviewWindow.nw.cancel_buffer = __cancel_buffer;
- mHalPreviewWindow.nw.lock_buffer = __lock_buffer;
- mHalPreviewWindow.nw.dequeue_buffer = __dequeue_buffer;
- mHalPreviewWindow.nw.enqueue_buffer = __enqueue_buffer;
- mHalPreviewWindow.nw.set_buffer_count = __set_buffer_count;
- mHalPreviewWindow.nw.set_buffers_geometry = __set_buffers_geometry;
- mHalPreviewWindow.nw.set_crop = __set_crop;
- mHalPreviewWindow.nw.set_timestamp = __set_timestamp;
- mHalPreviewWindow.nw.set_usage = __set_usage;
- mHalPreviewWindow.nw.set_swap_interval = __set_swap_interval;
+ void initHalPreviewWindow();
- mHalPreviewWindow.nw.get_min_undequeued_buffer_count =
- __get_min_undequeued_buffer_count;
- }
+ std::pair<bool, uint64_t> getBufferId(ANativeWindowBuffer* anb);
+ void cleanupCirculatingBuffers();
+
+ /**
+ * Implementation of android::hardware::camera::device::V1_0::ICameraDeviceCallback
+ */
+ hardware::Return<void> notifyCallback(
+ hardware::camera::device::V1_0::NotifyCallbackMsg msgType,
+ int32_t ext1, int32_t ext2) override;
+ hardware::Return<uint32_t> registerMemory(
+ const hardware::hidl_handle& descriptor,
+ uint32_t bufferSize, uint32_t bufferCount) override;
+ hardware::Return<void> unregisterMemory(uint32_t memId) override;
+ hardware::Return<void> dataCallback(
+ hardware::camera::device::V1_0::DataCallbackMsg msgType,
+ uint32_t data, uint32_t bufferIndex,
+ const hardware::camera::device::V1_0::CameraFrameMetadata& metadata) override;
+ hardware::Return<void> dataCallbackTimestamp(
+ hardware::camera::device::V1_0::DataCallbackMsg msgType,
+ uint32_t data, uint32_t bufferIndex, int64_t timestamp) override;
+ hardware::Return<void> handleCallbackTimestamp(
+ hardware::camera::device::V1_0::DataCallbackMsg msgType,
+ const hardware::hidl_handle& frameData, uint32_t data,
+ uint32_t bufferIndex, int64_t timestamp) override;
+ hardware::Return<void> handleCallbackTimestampBatch(
+ hardware::camera::device::V1_0::DataCallbackMsg msgType,
+ const hardware::hidl_vec<
+ hardware::camera::device::V1_0::HandleTimestampMessage>&) override;
+
+ /**
+ * Implementation of android::hardware::camera::device::V1_0::ICameraDevicePreviewCallback
+ */
+ hardware::Return<void> dequeueBuffer(dequeueBuffer_cb _hidl_cb) override;
+ hardware::Return<hardware::camera::common::V1_0::Status>
+ enqueueBuffer(uint64_t bufferId) override;
+ hardware::Return<hardware::camera::common::V1_0::Status>
+ cancelBuffer(uint64_t bufferId) override;
+ hardware::Return<hardware::camera::common::V1_0::Status>
+ setBufferCount(uint32_t count) override;
+ hardware::Return<hardware::camera::common::V1_0::Status>
+ setBuffersGeometry(uint32_t w, uint32_t h,
+ hardware::graphics::common::V1_0::PixelFormat format) override;
+ hardware::Return<hardware::camera::common::V1_0::Status>
+ setCrop(int32_t left, int32_t top, int32_t right, int32_t bottom) override;
+ hardware::Return<hardware::camera::common::V1_0::Status>
+ setUsage(hardware::graphics::common::V1_0::BufferUsage usage) override;
+ hardware::Return<hardware::camera::common::V1_0::Status>
+ setSwapInterval(int32_t interval) override;
+ hardware::Return<void> getMinUndequeuedBufferCount(
+ getMinUndequeuedBufferCount_cb _hidl_cb) override;
+ hardware::Return<hardware::camera::common::V1_0::Status>
+ setTimestamp(int64_t timestamp) override;
sp<ANativeWindow> mPreviewWindow;
@@ -782,9 +466,10 @@
struct camera_preview_window mHalPreviewWindow;
- notify_callback mNotifyCb;
- data_callback mDataCb;
- data_callback_timestamp mDataCbTimestamp;
+ notify_callback mNotifyCb;
+ data_callback mDataCb;
+ data_callback_timestamp mDataCbTimestamp;
+ data_callback_timestamp_batch mDataCbTimestampBatch;
void *mCbUser;
// Cached values for preview stream parameters
@@ -797,6 +482,48 @@
int mPreviewUsage;
int mPreviewSwapInterval;
android_native_rect_t mPreviewCrop;
+
+ struct BufferHasher {
+ size_t operator()(const buffer_handle_t& buf) const {
+ if (buf == nullptr)
+ return 0;
+
+ size_t result = 1;
+ result = 31 * result + buf->numFds;
+ result = 31 * result + buf->numInts;
+ int length = buf->numFds + buf->numInts;
+ for (int i = 0; i < length; i++) {
+ result = 31 * result + buf->data[i];
+ }
+ return result;
+ }
+ };
+
+ struct BufferComparator {
+ bool operator()(const buffer_handle_t& buf1, const buffer_handle_t& buf2) const {
+ if (buf1->numFds == buf2->numFds && buf1->numInts == buf2->numInts) {
+ int length = buf1->numFds + buf1->numInts;
+ for (int i = 0; i < length; i++) {
+ if (buf1->data[i] != buf2->data[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+ };
+
+ std::mutex mBufferIdMapLock; // protecting mBufferIdMap and mNextBufferId
+ typedef std::unordered_map<const buffer_handle_t, uint64_t,
+ BufferHasher, BufferComparator> BufferIdMap;
+ // stream ID -> per stream buffer ID map
+ BufferIdMap mBufferIdMap;
+ std::unordered_map<uint64_t, ANativeWindowBuffer*> mReversedBufMap;
+ uint64_t mNextBufferId = 1;
+ static const uint64_t BUFFER_ID_NO_BUFFER = 0;
+
+ std::unordered_map<int, camera_memory_t*> mHidlMemPoolMap;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
index 1f01144..8c8b97a 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
@@ -29,15 +29,7 @@
namespace camera3 {
-Camera3BufferManager::Camera3BufferManager(const sp<IGraphicBufferAlloc>& allocator) :
- mAllocator(allocator) {
- if (allocator == NULL) {
- sp<ISurfaceComposer> composer(ComposerService::getComposerService());
- mAllocator = composer->createGraphicBufferAlloc();
- if (mAllocator == NULL) {
- ALOGE("createGraphicBufferAlloc failed");
- }
- }
+Camera3BufferManager::Camera3BufferManager() {
}
Camera3BufferManager::~Camera3BufferManager() {
@@ -78,10 +70,6 @@
}
Mutex::Autolock l(mLock);
- if (mAllocator == NULL) {
- ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
- return INVALID_OPERATION;
- }
// Check if this stream was registered with different stream set ID, if so, error out.
for (size_t i = 0; i < mStreamSetMap.size(); i++) {
@@ -131,10 +119,6 @@
Mutex::Autolock l(mLock);
ALOGV("%s: unregister stream %d with stream set %d", __FUNCTION__,
streamId, streamSetId);
- if (mAllocator == NULL) {
- ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
- return INVALID_OPERATION;
- }
if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
ALOGE("%s: stream %d with set id %d wasn't properly registered to this buffer manager!",
@@ -144,11 +128,9 @@
// De-list all the buffers associated with this stream first.
StreamSet& currentSet = mStreamSetMap.editValueFor(streamSetId);
- BufferList& freeBufs = currentSet.freeBuffers;
BufferCountMap& handOutBufferCounts = currentSet.handoutBufferCountMap;
BufferCountMap& attachedBufferCounts = currentSet.attachedBufferCountMap;
InfoMap& infoMap = currentSet.streamInfoMap;
- removeBuffersFromBufferListLocked(freeBufs, streamId);
handOutBufferCounts.removeItem(streamId);
attachedBufferCounts.removeItem(streamId);
@@ -167,13 +149,93 @@
currentSet.allocatedBufferWaterMark = 0;
// Remove this stream set if all its streams have been removed.
- if (freeBufs.size() == 0 && handOutBufferCounts.size() == 0 && infoMap.size() == 0) {
+ if (handOutBufferCounts.size() == 0 && infoMap.size() == 0) {
mStreamSetMap.removeItem(streamSetId);
}
return OK;
}
+void Camera3BufferManager::notifyBufferRemoved(int streamId, int streamSetId) {
+ Mutex::Autolock l(mLock);
+ StreamSet &streamSet = mStreamSetMap.editValueFor(streamSetId);
+ size_t& attachedBufferCount =
+ streamSet.attachedBufferCountMap.editValueFor(streamId);
+ attachedBufferCount--;
+}
+
+status_t Camera3BufferManager::checkAndFreeBufferOnOtherStreamsLocked(
+ int streamId, int streamSetId) {
+ StreamId firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
+ StreamSet &streamSet = mStreamSetMap.editValueFor(streamSetId);
+ if (streamSet.streamInfoMap.size() == 1) {
+ ALOGV("StreamSet %d has no other stream available to free", streamSetId);
+ return OK;
+ }
+
+ bool freeBufferIsAttached = false;
+ for (size_t i = 0; i < streamSet.streamInfoMap.size(); i++) {
+ firstOtherStreamId = streamSet.streamInfoMap[i].streamId;
+ if (firstOtherStreamId != streamId) {
+
+ size_t otherBufferCount =
+ streamSet.handoutBufferCountMap.valueFor(firstOtherStreamId);
+ size_t otherAttachedBufferCount =
+ streamSet.attachedBufferCountMap.valueFor(firstOtherStreamId);
+ if (otherAttachedBufferCount > otherBufferCount) {
+ freeBufferIsAttached = true;
+ break;
+ }
+ }
+ firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
+ }
+ if (firstOtherStreamId == CAMERA3_STREAM_ID_INVALID || !freeBufferIsAttached) {
+ ALOGV("StreamSet %d has no buffer available to free", streamSetId);
+ return OK;
+ }
+
+
+ // This will drop the reference to one free buffer, which will effectively free one
+ // buffer (from the free buffer list) for the inactive streams.
+ size_t totalAllocatedBufferCount = 0;
+ for (size_t i = 0; i < streamSet.attachedBufferCountMap.size(); i++) {
+ totalAllocatedBufferCount += streamSet.attachedBufferCountMap[i];
+ }
+ if (totalAllocatedBufferCount > streamSet.allocatedBufferWaterMark) {
+ ALOGV("Stream %d: Freeing buffer: detach", firstOtherStreamId);
+ sp<Camera3OutputStream> stream =
+ mStreamMap.valueFor(firstOtherStreamId).promote();
+ if (stream == nullptr) {
+ ALOGE("%s: unable to promote stream %d to detach buffer", __FUNCTION__,
+ firstOtherStreamId);
+ return INVALID_OPERATION;
+ }
+
+ // Detach and then drop the buffer.
+ //
+ // Need to unlock because the stream may also be calling
+ // into the buffer manager in parallel to signal buffer
+ // release, or acquire a new buffer.
+ bool bufferFreed = false;
+ {
+ mLock.unlock();
+ sp<GraphicBuffer> buffer;
+ stream->detachBuffer(&buffer, /*fenceFd*/ nullptr);
+ mLock.lock();
+ if (buffer.get() != nullptr) {
+ bufferFreed = true;
+ }
+ }
+ if (bufferFreed) {
+ size_t& otherAttachedBufferCount =
+ streamSet.attachedBufferCountMap.editValueFor(firstOtherStreamId);
+ otherAttachedBufferCount--;
+ }
+ }
+
+ return OK;
+}
+
status_t Camera3BufferManager::getBufferForStream(int streamId, int streamSetId,
sp<GraphicBuffer>* gb, int* fenceFd) {
ATRACE_CALL();
@@ -181,10 +243,6 @@
Mutex::Autolock l(mLock);
ALOGV("%s: get buffer for stream %d with stream set %d", __FUNCTION__,
streamId, streamSetId);
- if (mAllocator == NULL) {
- ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
- return INVALID_OPERATION;
- }
if (!checkIfStreamRegisteredLocked(streamId, streamSetId)) {
ALOGE("%s: stream %d is not registered with stream set %d yet!!!",
@@ -211,27 +269,25 @@
}
ALOGV("Stream %d set %d: Get buffer for stream: Allocate new", streamId, streamSetId);
- GraphicBufferEntry buffer =
- getFirstBufferFromBufferListLocked(streamSet.freeBuffers, streamId);
-
if (mGrallocVersion < HARDWARE_DEVICE_API_VERSION(1,0)) {
- // Allocate one if there is no free buffer available.
- if (buffer.graphicBuffer == nullptr) {
- const StreamInfo& info = streamSet.streamInfoMap.valueFor(streamId);
- status_t res = OK;
- buffer.fenceFd = -1;
- buffer.graphicBuffer = mAllocator->createGraphicBuffer(
- info.width, info.height, info.format, info.combinedUsage, &res);
- ALOGV("%s: allocating a new graphic buffer (%dx%d, format 0x%x) %p with handle %p",
- __FUNCTION__, info.width, info.height, info.format,
- buffer.graphicBuffer.get(), buffer.graphicBuffer->handle);
- if (res != OK) {
- ALOGE("%s: graphic buffer allocation failed: (error %d %s) ",
- __FUNCTION__, res, strerror(-res));
- return res;
- }
- ALOGV("%s: allocation done", __FUNCTION__);
+ const StreamInfo& info = streamSet.streamInfoMap.valueFor(streamId);
+ GraphicBufferEntry buffer;
+ buffer.fenceFd = -1;
+ buffer.graphicBuffer = new GraphicBuffer(
+ info.width, info.height, PixelFormat(info.format), info.combinedUsage,
+ std::string("Camera3BufferManager pid [") +
+ std::to_string(getpid()) + "]");
+ status_t res = buffer.graphicBuffer->initCheck();
+
+ ALOGV("%s: allocating a new graphic buffer (%dx%d, format 0x%x) %p with handle %p",
+ __FUNCTION__, info.width, info.height, info.format,
+ buffer.graphicBuffer.get(), buffer.graphicBuffer->handle);
+ if (res < 0) {
+ ALOGE("%s: graphic buffer allocation failed: (error %d %s) ",
+ __FUNCTION__, res, strerror(-res));
+ return res;
}
+ ALOGV("%s: allocation done", __FUNCTION__);
// Increase the hand-out and attached buffer counts for tracking purposes.
bufferCount++;
@@ -252,69 +308,15 @@
// in returnBufferForStream() if we want to free buffer more quickly.
// TODO: probably should find out all the inactive stream IDs, and free the firstly found
// buffers for them.
- StreamId firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
- if (streamSet.streamInfoMap.size() > 1) {
- bool freeBufferIsAttached = false;
- for (size_t i = 0; i < streamSet.streamInfoMap.size(); i++) {
- firstOtherStreamId = streamSet.streamInfoMap[i].streamId;
- if (firstOtherStreamId != streamId) {
-
- size_t otherBufferCount =
- streamSet.handoutBufferCountMap.valueFor(firstOtherStreamId);
- size_t otherAttachedBufferCount =
- streamSet.attachedBufferCountMap.valueFor(firstOtherStreamId);
- if (otherAttachedBufferCount > otherBufferCount) {
- freeBufferIsAttached = true;
- break;
- }
- if (hasBufferForStreamLocked(streamSet.freeBuffers, firstOtherStreamId)) {
- freeBufferIsAttached = false;
- break;
- }
- }
- firstOtherStreamId = CAMERA3_STREAM_ID_INVALID;
- }
- if (firstOtherStreamId == CAMERA3_STREAM_ID_INVALID) {
- return OK;
- }
-
- // This will drop the reference to one free buffer, which will effectively free one
- // buffer (from the free buffer list) for the inactive streams.
- size_t totalAllocatedBufferCount = streamSet.freeBuffers.size();
- for (size_t i = 0; i < streamSet.attachedBufferCountMap.size(); i++) {
- totalAllocatedBufferCount += streamSet.attachedBufferCountMap[i];
- }
- if (totalAllocatedBufferCount > streamSet.allocatedBufferWaterMark) {
- ALOGV("%s: free a buffer from stream %d", __FUNCTION__, firstOtherStreamId);
- if (freeBufferIsAttached) {
- ALOGV("Stream %d: Freeing buffer: detach", firstOtherStreamId);
- sp<Camera3OutputStream> stream =
- mStreamMap.valueFor(firstOtherStreamId).promote();
- if (stream == nullptr) {
- ALOGE("%s: unable to promote stream %d to detach buffer", __FUNCTION__,
- firstOtherStreamId);
- return INVALID_OPERATION;
- }
-
- // Detach and then drop the buffer.
- //
- // Need to unlock because the stream may also be calling
- // into the buffer manager in parallel to signal buffer
- // release, or acquire a new buffer.
- {
- mLock.unlock();
- sp<GraphicBuffer> buffer;
- stream->detachBuffer(&buffer, /*fenceFd*/ nullptr);
- mLock.lock();
- }
- size_t& otherAttachedBufferCount =
- streamSet.attachedBufferCountMap.editValueFor(firstOtherStreamId);
- otherAttachedBufferCount--;
- } else {
- // Droppable buffer is in the free buffer list, grab and drop
- getFirstBufferFromBufferListLocked(streamSet.freeBuffers, firstOtherStreamId);
- }
- }
+ res = checkAndFreeBufferOnOtherStreamsLocked(streamId, streamSetId);
+ if (res != OK) {
+ return res;
+ }
+ // Since we just allocated one new buffer above, try free one more buffer from other streams
+ // to prevent total buffer count from growing
+ res = checkAndFreeBufferOnOtherStreamsLocked(streamId, streamSetId);
+ if (res != OK) {
+ return res;
}
} else {
// TODO: implement this.
@@ -324,16 +326,19 @@
return OK;
}
-status_t Camera3BufferManager::onBufferReleased(int streamId, int streamSetId) {
+status_t Camera3BufferManager::onBufferReleased(
+ int streamId, int streamSetId, bool* shouldFreeBuffer) {
ATRACE_CALL();
- Mutex::Autolock l(mLock);
- ALOGV("Stream %d set %d: Buffer released", streamId, streamSetId);
- if (mAllocator == NULL) {
- ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
- return INVALID_OPERATION;
+ if (shouldFreeBuffer == nullptr) {
+ ALOGE("%s: shouldFreeBuffer is null", __FUNCTION__);
+ return BAD_VALUE;
}
+ Mutex::Autolock l(mLock);
+ ALOGV("Stream %d set %d: Buffer released", streamId, streamSetId);
+ *shouldFreeBuffer = false;
+
if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
ALOGV("%s: signaling buffer release for an already unregistered stream "
"(stream %d with set id %d)", __FUNCTION__, streamId, streamSetId);
@@ -347,6 +352,36 @@
bufferCount--;
ALOGV("%s: Stream %d set %d: Buffer count now %zu", __FUNCTION__, streamId, streamSetId,
bufferCount);
+
+ size_t totalAllocatedBufferCount = 0;
+ size_t totalHandOutBufferCount = 0;
+ for (size_t i = 0; i < streamSet.attachedBufferCountMap.size(); i++) {
+ totalAllocatedBufferCount += streamSet.attachedBufferCountMap[i];
+ totalHandOutBufferCount += streamSet.handoutBufferCountMap[i];
+ }
+
+ size_t newWaterMark = totalHandOutBufferCount + BUFFER_WATERMARK_DEC_THRESHOLD;
+ if (totalAllocatedBufferCount > newWaterMark &&
+ streamSet.allocatedBufferWaterMark > newWaterMark) {
+ // BufferManager got more than enough buffers, so decrease watermark
+ // to trigger more buffers free operation.
+ streamSet.allocatedBufferWaterMark = newWaterMark;
+ ALOGV("%s: Stream %d set %d: watermark--; now %zu",
+ __FUNCTION__, streamId, streamSetId, streamSet.allocatedBufferWaterMark);
+ }
+
+ size_t attachedBufferCount = streamSet.attachedBufferCountMap.valueFor(streamId);
+ if (attachedBufferCount <= bufferCount) {
+ ALOGV("%s: stream %d has no buffer available to free.", __FUNCTION__, streamId);
+ }
+
+ bool freeBufferIsAttached = (attachedBufferCount > bufferCount);
+ if (freeBufferIsAttached &&
+ totalAllocatedBufferCount > streamSet.allocatedBufferWaterMark &&
+ attachedBufferCount > bufferCount + BUFFER_FREE_THRESHOLD) {
+ ALOGV("%s: free a buffer from stream %d", __FUNCTION__, streamId);
+ *shouldFreeBuffer = true;
+ }
} else {
// TODO: implement gralloc V1 support
return BAD_VALUE;
@@ -355,44 +390,42 @@
return OK;
}
-status_t Camera3BufferManager::returnBufferForStream(int streamId,
- int streamSetId, const sp<GraphicBuffer>& buffer, int fenceFd) {
+status_t Camera3BufferManager::onBuffersRemoved(int streamId, int streamSetId, size_t count) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
- ALOGV_IF(buffer != 0, "%s: return buffer (%p) with handle (%p) for stream %d and stream set %d",
- __FUNCTION__, buffer.get(), buffer->handle, streamId, streamSetId);
- if (mAllocator == NULL) {
- ALOGE("%s: allocator is NULL, buffer manager is bad state.", __FUNCTION__);
- return INVALID_OPERATION;
- }
+
+ ALOGV("Stream %d set %d: Buffer removed", streamId, streamSetId);
if (!checkIfStreamRegisteredLocked(streamId, streamSetId)){
- ALOGV("%s: returning buffer for an already unregistered stream (stream %d with set id %d),"
- "buffer will be dropped right away!", __FUNCTION__, streamId, streamSetId);
+ ALOGV("%s: signaling buffer removal for an already unregistered stream "
+ "(stream %d with set id %d)", __FUNCTION__, streamId, streamSetId);
return OK;
}
if (mGrallocVersion < HARDWARE_DEVICE_API_VERSION(1,0)) {
- // Add to the freeBuffer list.
StreamSet& streamSet = mStreamSetMap.editValueFor(streamSetId);
- if (buffer != 0) {
- BufferEntry entry;
- entry.add(streamId, GraphicBufferEntry(buffer, fenceFd));
- status_t res = addBufferToBufferListLocked(streamSet.freeBuffers, entry);
- if (res != OK) {
- ALOGE("%s: add buffer to free buffer list failed", __FUNCTION__);
- return res;
- }
+ BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
+ size_t& totalHandoutCount = handOutBufferCounts.editValueFor(streamId);
+ BufferCountMap& attachedBufferCounts = streamSet.attachedBufferCountMap;
+ size_t& totalAttachedCount = attachedBufferCounts.editValueFor(streamId);
+
+ if (count > totalHandoutCount) {
+ ALOGE("%s: Removed buffer count %zu greater than current handout count %zu",
+ __FUNCTION__, count, totalHandoutCount);
+ return BAD_VALUE;
+ }
+ if (count > totalAttachedCount) {
+ ALOGE("%s: Removed buffer count %zu greater than current attached count %zu",
+ __FUNCTION__, count, totalAttachedCount);
+ return BAD_VALUE;
}
- // Update the handed out and attached buffer count for this buffer.
- BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
- size_t& bufferCount = handOutBufferCounts.editValueFor(streamId);
- bufferCount--;
- size_t& attachedBufferCount = streamSet.attachedBufferCountMap.editValueFor(streamId);
- attachedBufferCount--;
+ totalHandoutCount -= count;
+ totalAttachedCount -= count;
+ ALOGV("%s: Stream %d set %d: Buffer count now %zu, attached buffer count now %zu",
+ __FUNCTION__, streamId, streamSetId, totalHandoutCount, totalAttachedCount);
} else {
- // TODO: implement this.
+ // TODO: implement gralloc V1 support
return BAD_VALUE;
}
@@ -428,17 +461,6 @@
lines.appendFormat(" stream id: %d, attached buffer count: %zu.\n",
streamId, bufferCount);
}
-
- lines.appendFormat(" Free buffer count: %zu\n",
- mStreamSetMap[i].freeBuffers.size());
- for (auto& bufEntry : mStreamSetMap[i].freeBuffers) {
- for (size_t m = 0; m < bufEntry.size(); m++) {
- const sp<GraphicBuffer>& buffer = bufEntry.valueAt(m).graphicBuffer;
- int streamId = bufEntry.keyAt(m);
- lines.appendFormat(" stream id: %d, buffer: %p, handle: %p.\n",
- streamId, buffer.get(), buffer->handle);
- }
- }
}
write(fd, lines.string(), lines.size());
}
@@ -468,67 +490,5 @@
return true;
}
-status_t Camera3BufferManager::addBufferToBufferListLocked(BufferList& bufList,
- const BufferEntry& buffer) {
- // TODO: need add some sanity check here.
- bufList.push_back(buffer);
-
- return OK;
-}
-
-status_t Camera3BufferManager::removeBuffersFromBufferListLocked(BufferList& bufferList,
- int streamId) {
- BufferList::iterator i = bufferList.begin();
- while (i != bufferList.end()) {
- ssize_t idx = i->indexOfKey(streamId);
- if (idx != NAME_NOT_FOUND) {
- ALOGV("%s: Remove a buffer for stream %d, free buffer total count: %zu",
- __FUNCTION__, streamId, bufferList.size());
- i->removeItem(streamId);
- if (i->isEmpty()) {
- i = bufferList.erase(i);
- }
- } else {
- i++;
- }
- }
-
- return OK;
-}
-
-bool Camera3BufferManager::hasBufferForStreamLocked(BufferList& buffers, int streamId) {
- BufferList::iterator i = buffers.begin();
- while (i != buffers.end()) {
- ssize_t idx = i->indexOfKey(streamId);
- if (idx != NAME_NOT_FOUND) {
- return true;
- }
- i++;
- }
-
- return false;
-}
-
-Camera3BufferManager::GraphicBufferEntry Camera3BufferManager::getFirstBufferFromBufferListLocked(
- BufferList& buffers, int streamId) {
- // Try to get the first buffer from the free buffer list if there is one.
- GraphicBufferEntry entry;
- BufferList::iterator i = buffers.begin();
- while (i != buffers.end()) {
- ssize_t idx = i->indexOfKey(streamId);
- if (idx != NAME_NOT_FOUND) {
- entry = GraphicBufferEntry(i->valueAt(idx));
- i = buffers.erase(i);
- break;
- } else {
- i++;
- }
- }
-
- ALOGV_IF(entry.graphicBuffer == 0, "%s: Unable to find free buffer for stream %d",
- __FUNCTION__, streamId);
- return entry;
-}
-
} // namespace camera3
} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.h b/services/camera/libcameraservice/device3/Camera3BufferManager.h
index b5b86a3..025062e 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.h
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.h
@@ -44,7 +44,7 @@
*/
class Camera3BufferManager: public virtual RefBase {
public:
- explicit Camera3BufferManager(const sp<IGraphicBufferAlloc>& allocator = NULL);
+ explicit Camera3BufferManager();
virtual ~Camera3BufferManager();
@@ -137,41 +137,41 @@
* buffer has been reused. The manager will call detachBuffer on the stream
* if it needs the released buffer otherwise.
*
+ * When shouldFreeBuffer is set to true, caller must detach and free one buffer from the
+ * buffer queue, and then call notifyBufferRemoved to update the manager.
+ *
* Return values:
*
* OK: Buffer release was processed succesfully
* BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID
* combination doesn't match what was registered, or this stream wasn't registered
- * to this buffer manager before.
+ * to this buffer manager before, or shouldFreeBuffer is null/
*/
- status_t onBufferReleased(int streamId, int streamSetId);
+ status_t onBufferReleased(int streamId, int streamSetId, /*out*/bool* shouldFreeBuffer);
/**
- * This method returns a buffer for a stream to this buffer manager.
+ * This method notifies the manager that certain buffers has been removed from the
+ * buffer queue by detachBuffer from the consumer.
*
- * When a buffer is returned, it is treated as a free buffer and may either be reused for future
- * getBufferForStream() calls, or freed if there total number of outstanding allocated buffers
- * is too large. The latter only applies to the case where the buffer are physically shared
- * between streams in the same stream set. A physically shared buffer is the buffer that has one
- * physical back store but multiple handles. Multiple stream can access the same physical memory
- * with their own handles. Physically shared buffer can only be supported by Gralloc HAL V1.
- * See hardware/libhardware/include/hardware/gralloc1.h for more details.
+ * The notification lets the manager update its internal handout buffer count and
+ * attached buffer counts accordingly. When buffers are detached from
+ * consumer, both handout and attached counts are decremented.
*
+ * Return values:
*
- * This call takes the ownership of the returned buffer if it was allocated by this buffer
- * manager; clients should not use this buffer after this call. Attempting to access this buffer
- * after this call will have undefined behavior. Holding a reference to this buffer after this
- * call may cause memory leakage. If a BufferQueue is used to track the buffers handed out by
- * this buffer queue, it is recommended to call detachNextBuffer() from the buffer queue after
- * BufferQueueProducer onBufferReleased callback is fired, and return it to this buffer manager.
- *
- * OK: Buffer return for this stream was successful.
- * BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID combination
- * doesn't match what was registered, or this stream wasn't registered to this
- * buffer manager before.
+ * OK: Buffer removal was processed succesfully
+ * BAD_VALUE: stream ID or streamSetId are invalid, or stream ID and stream set ID
+ * combination doesn't match what was registered, or this stream wasn't registered
+ * to this buffer manager before, or the removed buffer count is larger than
+ * current total handoutCount or attachedCount.
*/
- status_t returnBufferForStream(int streamId, int streamSetId, const sp<GraphicBuffer>& buffer,
- int fenceFd);
+ status_t onBuffersRemoved(int streamId, int streamSetId, size_t count);
+
+ /**
+ * This method notifiers the manager that a buffer is freed from the buffer queue, usually
+ * because onBufferReleased signals the caller to free a buffer via the shouldFreeBuffer flag.
+ */
+ void notifyBufferRemoved(int streamId, int streamSetId);
/**
* Dump the buffer manager statistics.
@@ -179,6 +179,18 @@
void dump(int fd, const Vector<String16> &args) const;
private:
+ // allocatedBufferWaterMark will be decreased when:
+ // numAllocatedBuffersThisSet > numHandoutBuffersThisSet + BUFFER_WATERMARK_DEC_THRESHOLD
+ // This allows the watermark go back down after a burst of buffer requests
+ static const int BUFFER_WATERMARK_DEC_THRESHOLD = 3;
+
+ // onBufferReleased will set shouldFreeBuffer to true when:
+ // numAllocatedBuffersThisSet > allocatedBufferWaterMark AND
+ // numAllocatedBuffersThisStream > numHandoutBuffersThisStream + BUFFER_FREE_THRESHOLD
+ // So after a burst of buffer requests and back to steady state, the buffer queue should have
+ // (BUFFER_FREE_THRESHOLD + steady state handout buffer count) buffers.
+ static const int BUFFER_FREE_THRESHOLD = 3;
+
/**
* Lock to synchronize the access to the methods of this class.
*/
@@ -186,12 +198,6 @@
static const size_t kMaxBufferCount = BufferQueueDefs::NUM_BUFFER_SLOTS;
- /**
- * mAllocator is the connection to SurfaceFlinger that is used to allocate new GraphicBuffer
- * objects.
- */
- sp<IGraphicBufferAlloc> mAllocator;
-
struct GraphicBufferEntry {
sp<GraphicBuffer> graphicBuffer;
int fenceFd;
@@ -262,11 +268,6 @@
*/
InfoMap streamInfoMap;
/**
- * The free buffer list for all the buffers belong to this set. The free buffers are
- * returned by the returnBufferForStream() call, and available for reuse.
- */
- BufferList freeBuffers;
- /**
* The count of the buffers that were handed out to the streams of this set.
*/
BufferCountMap handoutBufferCountMap;
@@ -300,37 +301,10 @@
bool checkIfStreamRegisteredLocked(int streamId, int streamSetId) const;
/**
- * Add a buffer entry to the BufferList. This method needs to be called with mLock held.
+ * Check if other streams in the stream set has extra buffer available to be freed, and
+ * free one if so.
*/
- status_t addBufferToBufferListLocked(BufferList &bufList, const BufferEntry &buffer);
-
- /**
- * Remove all buffers from the BufferList.
- *
- * Note that this doesn't mean that the buffers are freed after this call. A buffer is freed
- * only if all other references to it are dropped.
- *
- * This method needs to be called with mLock held.
- */
- status_t removeBuffersFromBufferListLocked(BufferList &bufList, int streamId);
-
- /**
- * Get the first available buffer from the buffer list for this stream. The graphicBuffer inside
- * this entry will be NULL if there is no any GraphicBufferEntry found. After this call, the
- * GraphicBufferEntry will be removed from the BufferList if a GraphicBufferEntry is found.
- *
- * This method needs to be called with mLock held.
- *
- */
- GraphicBufferEntry getFirstBufferFromBufferListLocked(BufferList& buffers, int streamId);
-
- /**
- * Check if there is any buffer associated with this stream in the given buffer list.
- *
- * This method needs to be called with mLock held.
- *
- */
- bool inline hasBufferForStreamLocked(BufferList& buffers, int streamId);
+ status_t checkAndFreeBufferOnOtherStreamsLocked(int streamId, int streamSetId);
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 495de44..e8b9b20 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -26,7 +26,7 @@
#endif
// Convenience macro for transient errors
-#define CLOGE(fmt, ...) ALOGE("Camera %d: %s: " fmt, mId, __FUNCTION__, \
+#define CLOGE(fmt, ...) ALOGE("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
##__VA_ARGS__)
// Convenience macros for transitioning to the error state
@@ -51,18 +51,20 @@
#include "device3/Camera3Device.h"
#include "device3/Camera3OutputStream.h"
#include "device3/Camera3InputStream.h"
-#include "device3/Camera3ZslStream.h"
#include "device3/Camera3DummyStream.h"
+#include "device3/Camera3SharedOutputStream.h"
#include "CameraService.h"
using namespace android::camera3;
+using namespace android::hardware::camera;
+using namespace android::hardware::camera::device::V3_2;
namespace android {
-Camera3Device::Camera3Device(int id):
+Camera3Device::Camera3Device(const String8 &id):
mId(id),
+ mOperatingMode(NO_MODE),
mIsConstrainedHighSpeedConfiguration(false),
- mHal3Device(NULL),
mStatus(STATUS_UNINITIALIZED),
mStatusWaiters(0),
mUsePartialResult(false),
@@ -72,100 +74,101 @@
mNextReprocessResultFrameNumber(0),
mNextShutterFrameNumber(0),
mNextReprocessShutterFrameNumber(0),
- mListener(NULL)
+ mListener(NULL),
+ mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
{
ATRACE_CALL();
camera3_callback_ops::notify = &sNotify;
camera3_callback_ops::process_capture_result = &sProcessCaptureResult;
- ALOGV("%s: Created device for camera %d", __FUNCTION__, id);
+ ALOGV("%s: Created device for camera %s", __FUNCTION__, mId.string());
}
Camera3Device::~Camera3Device()
{
ATRACE_CALL();
- ALOGV("%s: Tearing down for camera id %d", __FUNCTION__, mId);
+ ALOGV("%s: Tearing down for camera id %s", __FUNCTION__, mId.string());
disconnect();
}
-int Camera3Device::getId() const {
+const String8& Camera3Device::getId() const {
return mId;
}
-/**
- * CameraDeviceBase interface
- */
-
-status_t Camera3Device::initialize(CameraModule *module)
-{
+status_t Camera3Device::initialize(sp<CameraProviderManager> manager) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
+ ALOGV("%s: Initializing HIDL device for camera %s", __FUNCTION__, mId.string());
if (mStatus != STATUS_UNINITIALIZED) {
CLOGE("Already initialized!");
return INVALID_OPERATION;
}
+ if (manager == nullptr) return INVALID_OPERATION;
- /** Open HAL device */
-
- status_t res;
- String8 deviceName = String8::format("%d", mId);
-
- camera3_device_t *device;
-
- ATRACE_BEGIN("camera3->open");
- res = module->open(deviceName.string(),
- reinterpret_cast<hw_device_t**>(&device));
+ sp<ICameraDeviceSession> session;
+ ATRACE_BEGIN("CameraHal::openSession");
+ status_t res = manager->openSession(mId.string(), this,
+ /*out*/ &session);
ATRACE_END();
-
if (res != OK) {
- SET_ERR_L("Could not open camera: %s (%d)", strerror(-res), res);
+ SET_ERR_L("Could not open camera session: %s (%d)", strerror(-res), res);
return res;
}
- /** Cross-check device version */
- if (device->common.version < CAMERA_DEVICE_API_VERSION_3_0) {
- SET_ERR_L("Could not open camera: "
- "Camera device should be at least %x, reports %x instead",
- CAMERA_DEVICE_API_VERSION_3_0,
- device->common.version);
- device->common.close(&device->common);
- return BAD_VALUE;
- }
-
- camera_info info;
- res = module->getCameraInfo(mId, &info);
- if (res != OK) return res;
-
- if (info.device_version != device->common.version) {
- SET_ERR_L("HAL reporting mismatched camera_info version (%x)"
- " and device version (%x).",
- info.device_version, device->common.version);
- device->common.close(&device->common);
- return BAD_VALUE;
- }
-
- /** Initialize device with callback functions */
-
- ATRACE_BEGIN("camera3->initialize");
- res = device->ops->initialize(device, this);
- ATRACE_END();
-
+ res = manager->getCameraCharacteristics(mId.string(), &mDeviceInfo);
if (res != OK) {
- SET_ERR_L("Unable to initialize HAL device: %s (%d)",
- strerror(-res), res);
- device->common.close(&device->common);
- return BAD_VALUE;
+ SET_ERR_L("Could not retrive camera characteristics: %s (%d)", strerror(-res), res);
+ session->close();
+ return res;
}
+ std::shared_ptr<RequestMetadataQueue> queue;
+ auto requestQueueRet = session->getCaptureRequestMetadataQueue(
+ [&queue](const auto& descriptor) {
+ queue = std::make_shared<RequestMetadataQueue>(descriptor);
+ if (!queue->isValid() || queue->availableToWrite() <= 0) {
+ ALOGE("HAL returns empty request metadata fmq, not use it");
+ queue = nullptr;
+ // don't use the queue onwards.
+ }
+ });
+ if (!requestQueueRet.isOk()) {
+ ALOGE("Transaction error when getting request metadata fmq: %s, not use it",
+ requestQueueRet.description().c_str());
+ return DEAD_OBJECT;
+ }
+ auto resultQueueRet = session->getCaptureResultMetadataQueue(
+ [&queue = mResultMetadataQueue](const auto& descriptor) {
+ queue = std::make_unique<ResultMetadataQueue>(descriptor);
+ if (!queue->isValid() || queue->availableToWrite() <= 0) {
+ ALOGE("HAL returns empty result metadata fmq, not use it");
+ queue = nullptr;
+ // Don't use the queue onwards.
+ }
+ });
+ if (!resultQueueRet.isOk()) {
+ ALOGE("Transaction error when getting result metadata queue from camera session: %s",
+ resultQueueRet.description().c_str());
+ return DEAD_OBJECT;
+ }
+
+ mInterface = std::make_unique<HalInterface>(session, queue);
+ std::string providerType;
+ mVendorTagId = manager->getProviderTagIdLocked(mId.string());
+
+ return initializeCommonLocked();
+}
+
+status_t Camera3Device::initializeCommonLocked() {
+
/** Start up status tracker thread */
mStatusTracker = new StatusTracker(this);
- res = mStatusTracker->run(String8::format("C3Dev-%d-Status", mId).string());
+ status_t res = mStatusTracker->run(String8::format("C3Dev-%s-Status", mId.string()).string());
if (res != OK) {
SET_ERR_L("Unable to start status tracking thread: %s (%d)",
strerror(-res), res);
- device->common.close(&device->common);
+ mInterface->close();
mStatusTracker.clear();
return res;
}
@@ -176,42 +179,21 @@
/** Create buffer manager */
mBufferManager = new Camera3BufferManager();
- bool aeLockAvailable = false;
- camera_metadata_ro_entry aeLockAvailableEntry;
- res = find_camera_metadata_ro_entry(info.static_camera_characteristics,
- ANDROID_CONTROL_AE_LOCK_AVAILABLE, &aeLockAvailableEntry);
- if (res == OK && aeLockAvailableEntry.count > 0) {
- aeLockAvailable = (aeLockAvailableEntry.data.u8[0] ==
- ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE);
- }
+ mTagMonitor.initialize(mVendorTagId);
/** Start up request queue thread */
- mRequestThread = new RequestThread(this, mStatusTracker, device, aeLockAvailable);
- res = mRequestThread->run(String8::format("C3Dev-%d-ReqQueue", mId).string());
+ mRequestThread = new RequestThread(this, mStatusTracker, mInterface.get());
+ res = mRequestThread->run(String8::format("C3Dev-%s-ReqQueue", mId.string()).string());
if (res != OK) {
SET_ERR_L("Unable to start request queue thread: %s (%d)",
strerror(-res), res);
- device->common.close(&device->common);
+ mInterface->close();
mRequestThread.clear();
return res;
}
mPreparerThread = new PreparerThread();
- /** Everything is good to go */
-
- mDeviceVersion = device->common.version;
- mDeviceInfo = info.static_camera_characteristics;
- mHal3Device = device;
-
- // Determine whether we need to derive sensitivity boost values for older devices.
- // If post-RAW sensitivity boost range is listed, so should post-raw sensitivity control
- // be listed (as the default value 100)
- if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_4 &&
- mDeviceInfo.exists(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE)) {
- mDerivePostRawSensKey = true;
- }
-
internalUpdateStatusLocked(STATUS_UNCONFIGURED);
mNextStreamId = 0;
mDummyStreamId = NO_STREAM;
@@ -227,19 +209,11 @@
}
// Will the HAL be sending in early partial result metadata?
- if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- camera_metadata_entry partialResultsCount =
- mDeviceInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
- if (partialResultsCount.count > 0) {
- mNumPartialResults = partialResultsCount.data.i32[0];
- mUsePartialResult = (mNumPartialResults > 1);
- }
- } else {
- camera_metadata_entry partialResultsQuirk =
- mDeviceInfo.find(ANDROID_QUIRKS_USE_PARTIAL_RESULT);
- if (partialResultsQuirk.count > 0 && partialResultsQuirk.data.u8[0] == 1) {
- mUsePartialResult = true;
- }
+ camera_metadata_entry partialResultsCount =
+ mDeviceInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+ if (partialResultsCount.count > 0) {
+ mNumPartialResults = partialResultsCount.data.i32[0];
+ mUsePartialResult = (mNumPartialResults > 1);
}
camera_metadata_entry configs =
@@ -312,7 +286,7 @@
mStatusTracker->join();
}
- camera3_device_t *hal3Device;
+ HalInterface* interface;
{
Mutex::Autolock l(mLock);
@@ -320,20 +294,16 @@
mStatusTracker.clear();
mBufferManager.clear();
- hal3Device = mHal3Device;
+ interface = mInterface.get();
}
// Call close without internal mutex held, as the HAL close may need to
// wait on assorted callbacks,etc, to complete before it can return.
- if (hal3Device != NULL) {
- ATRACE_BEGIN("camera3->close");
- hal3Device->common.close(&hal3Device->common);
- ATRACE_END();
- }
+ interface->close();
{
Mutex::Autolock l(mLock);
- mHal3Device = NULL;
+ mInterface->clear();
internalUpdateStatusLocked(STATUS_UNINITIALIZED);
}
@@ -359,48 +329,32 @@
Camera3Device::Size Camera3Device::getMaxJpegResolution() const {
int32_t maxJpegWidth = 0, maxJpegHeight = 0;
- if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- const int STREAM_CONFIGURATION_SIZE = 4;
- const int STREAM_FORMAT_OFFSET = 0;
- const int STREAM_WIDTH_OFFSET = 1;
- const int STREAM_HEIGHT_OFFSET = 2;
- const int STREAM_IS_INPUT_OFFSET = 3;
- camera_metadata_ro_entry_t availableStreamConfigs =
- mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
- if (availableStreamConfigs.count == 0 ||
- availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) {
- return Size(0, 0);
- }
+ const int STREAM_CONFIGURATION_SIZE = 4;
+ const int STREAM_FORMAT_OFFSET = 0;
+ const int STREAM_WIDTH_OFFSET = 1;
+ const int STREAM_HEIGHT_OFFSET = 2;
+ const int STREAM_IS_INPUT_OFFSET = 3;
+ camera_metadata_ro_entry_t availableStreamConfigs =
+ mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+ if (availableStreamConfigs.count == 0 ||
+ availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) {
+ return Size(0, 0);
+ }
- // Get max jpeg size (area-wise).
- for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
- int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
- int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
- int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
- int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
- if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
- && format == HAL_PIXEL_FORMAT_BLOB &&
- (width * height > maxJpegWidth * maxJpegHeight)) {
- maxJpegWidth = width;
- maxJpegHeight = height;
- }
- }
- } else {
- camera_metadata_ro_entry availableJpegSizes =
- mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
- if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) {
- return Size(0, 0);
- }
-
- // Get max jpeg size (area-wise).
- for (size_t i = 0; i < availableJpegSizes.count; i += 2) {
- if ((availableJpegSizes.data.i32[i] * availableJpegSizes.data.i32[i + 1])
- > (maxJpegWidth * maxJpegHeight)) {
- maxJpegWidth = availableJpegSizes.data.i32[i];
- maxJpegHeight = availableJpegSizes.data.i32[i + 1];
- }
+ // Get max jpeg size (area-wise).
+ for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+ int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
+ int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
+ int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
+ int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
+ if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
+ && format == HAL_PIXEL_FORMAT_BLOB &&
+ (width * height > maxJpegWidth * maxJpegHeight)) {
+ maxJpegWidth = width;
+ maxJpegHeight = height;
}
}
+
return Size(maxJpegWidth, maxJpegHeight);
}
@@ -422,37 +376,86 @@
return measured;
}
-/**
- * Map Android N dataspace definitions back to Android M definitions, for
- * use with HALv3.3 or older.
- *
- * Only map where correspondences exist, and otherwise preserve the value.
- */
-android_dataspace Camera3Device::mapToLegacyDataspace(android_dataspace dataSpace) {
- switch (dataSpace) {
- case HAL_DATASPACE_V0_SRGB_LINEAR:
- return HAL_DATASPACE_SRGB_LINEAR;
- case HAL_DATASPACE_V0_SRGB:
- return HAL_DATASPACE_SRGB;
- case HAL_DATASPACE_V0_JFIF:
- return HAL_DATASPACE_JFIF;
- case HAL_DATASPACE_V0_BT601_625:
- return HAL_DATASPACE_BT601_625;
- case HAL_DATASPACE_V0_BT601_525:
- return HAL_DATASPACE_BT601_525;
- case HAL_DATASPACE_V0_BT709:
- return HAL_DATASPACE_BT709;
- default:
- return dataSpace;
+hardware::graphics::common::V1_0::PixelFormat Camera3Device::mapToPixelFormat(
+ int frameworkFormat) {
+ return (hardware::graphics::common::V1_0::PixelFormat) frameworkFormat;
+}
+
+DataspaceFlags Camera3Device::mapToHidlDataspace(
+ android_dataspace dataSpace) {
+ return dataSpace;
+}
+
+BufferUsageFlags Camera3Device::mapToConsumerUsage(
+ uint32_t usage) {
+ return usage;
+}
+
+StreamRotation Camera3Device::mapToStreamRotation(camera3_stream_rotation_t rotation) {
+ switch (rotation) {
+ case CAMERA3_STREAM_ROTATION_0:
+ return StreamRotation::ROTATION_0;
+ case CAMERA3_STREAM_ROTATION_90:
+ return StreamRotation::ROTATION_90;
+ case CAMERA3_STREAM_ROTATION_180:
+ return StreamRotation::ROTATION_180;
+ case CAMERA3_STREAM_ROTATION_270:
+ return StreamRotation::ROTATION_270;
}
+ ALOGE("%s: Unknown stream rotation %d", __FUNCTION__, rotation);
+ return StreamRotation::ROTATION_0;
+}
+
+status_t Camera3Device::mapToStreamConfigurationMode(
+ camera3_stream_configuration_mode_t operationMode, StreamConfigurationMode *mode) {
+ if (mode == nullptr) return BAD_VALUE;
+ if (operationMode < CAMERA3_VENDOR_STREAM_CONFIGURATION_MODE_START) {
+ switch(operationMode) {
+ case CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE:
+ *mode = StreamConfigurationMode::NORMAL_MODE;
+ break;
+ case CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE:
+ *mode = StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE;
+ break;
+ default:
+ ALOGE("%s: Unknown stream configuration mode %d", __FUNCTION__, operationMode);
+ return BAD_VALUE;
+ }
+ } else {
+ *mode = static_cast<StreamConfigurationMode>(operationMode);
+ }
+ return OK;
+}
+
+camera3_buffer_status_t Camera3Device::mapHidlBufferStatus(BufferStatus status) {
+ switch (status) {
+ case BufferStatus::OK: return CAMERA3_BUFFER_STATUS_OK;
+ case BufferStatus::ERROR: return CAMERA3_BUFFER_STATUS_ERROR;
+ }
+ return CAMERA3_BUFFER_STATUS_ERROR;
+}
+
+int Camera3Device::mapToFrameworkFormat(
+ hardware::graphics::common::V1_0::PixelFormat pixelFormat) {
+ return static_cast<uint32_t>(pixelFormat);
+}
+
+uint32_t Camera3Device::mapConsumerToFrameworkUsage(
+ BufferUsageFlags usage) {
+ return usage;
+}
+
+uint32_t Camera3Device::mapProducerToFrameworkUsage(
+ BufferUsageFlags usage) {
+ return usage;
}
ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
// Get max jpeg size (area-wise).
Size maxJpegResolution = getMaxJpegResolution();
if (maxJpegResolution.width == 0) {
- ALOGE("%s: Camera %d: Can't find valid available jpeg sizes in static metadata!",
- __FUNCTION__, mId);
+ ALOGE("%s: Camera %s: Can't find valid available jpeg sizes in static metadata!",
+ __FUNCTION__, mId.string());
return BAD_VALUE;
}
@@ -460,7 +463,8 @@
ssize_t maxJpegBufferSize = 0;
camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
if (jpegBufMaxSize.count == 0) {
- ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId);
+ ALOGE("%s: Camera %s: Can't find maximum JPEG size in static metadata!", __FUNCTION__,
+ mId.string());
return BAD_VALUE;
}
maxJpegBufferSize = jpegBufMaxSize.data.i32[0];
@@ -482,8 +486,8 @@
const int FLOATS_PER_POINT=4;
camera_metadata_ro_entry maxPointCount = mDeviceInfo.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
if (maxPointCount.count == 0) {
- ALOGE("%s: Camera %d: Can't find maximum depth point cloud size in static metadata!",
- __FUNCTION__, mId);
+ ALOGE("%s: Camera %s: Can't find maximum depth point cloud size in static metadata!",
+ __FUNCTION__, mId.string());
return BAD_VALUE;
}
ssize_t maxBytesForPointCloud = sizeof(android_depth_points) +
@@ -500,8 +504,8 @@
mDeviceInfo.find(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
size_t count = rawOpaqueSizes.count;
if (count == 0 || (count % PER_CONFIGURATION_SIZE)) {
- ALOGE("%s: Camera %d: bad opaque RAW size static metadata length(%zu)!",
- __FUNCTION__, mId, count);
+ ALOGE("%s: Camera %s: bad opaque RAW size static metadata length(%zu)!",
+ __FUNCTION__, mId.string(), count);
return BAD_VALUE;
}
@@ -512,8 +516,8 @@
}
}
- ALOGE("%s: Camera %d: cannot find size for %dx%d opaque RAW image!",
- __FUNCTION__, mId, width, height);
+ ALOGE("%s: Camera %s: cannot find size for %dx%d opaque RAW image!",
+ __FUNCTION__, mId.string(), width, height);
return BAD_VALUE;
}
@@ -527,11 +531,11 @@
bool gotLock = tryLockSpinRightRound(mLock);
ALOGW_IF(!gotInterfaceLock,
- "Camera %d: %s: Unable to lock interface lock, proceeding anyway",
- mId, __FUNCTION__);
+ "Camera %s: %s: Unable to lock interface lock, proceeding anyway",
+ mId.string(), __FUNCTION__);
ALOGW_IF(!gotLock,
- "Camera %d: %s: Unable to lock main lock, proceeding anyway",
- mId, __FUNCTION__);
+ "Camera %s: %s: Unable to lock main lock, proceeding anyway",
+ mId.string(), __FUNCTION__);
bool dumpTemplates = false;
@@ -571,8 +575,12 @@
lines.appendFormat(" Error cause: %s\n", mErrorCause.string());
}
lines.appendFormat(" Stream configuration:\n");
- lines.appendFormat(" Operation mode: %s \n", mIsConstrainedHighSpeedConfiguration ?
- "CONSTRAINED HIGH SPEED VIDEO" : "NORMAL");
+ const char *mode =
+ mOperatingMode == static_cast<int>(StreamConfigurationMode::NORMAL_MODE) ? "NORMAL" :
+ mOperatingMode == static_cast<int>(
+ StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE) ? "CONSTRAINED_HIGH_SPEED" :
+ "CUSTOM";
+ lines.appendFormat(" Operation mode: %s (%d) \n", mode, mOperatingMode);
if (mInputStream != NULL) {
write(fd, lines.string(), lines.size());
@@ -605,6 +613,11 @@
}
write(fd, lines.string(), lines.size());
+ if (mRequestThread != NULL) {
+ mRequestThread->dumpCaptureRequestLatency(fd,
+ " ProcessCaptureRequest latency histogram:");
+ }
+
{
lines = String8(" Last request sent:\n");
write(fd, lines.string(), lines.size());
@@ -624,12 +637,11 @@
};
for (int i = 1; i < CAMERA3_TEMPLATE_COUNT; i++) {
- const camera_metadata_t *templateRequest;
- templateRequest =
- mHal3Device->ops->construct_default_request_settings(
- mHal3Device, i);
+ camera_metadata_t *templateRequest = nullptr;
+ mInterface->constructDefaultRequestSettings(
+ (camera3_request_template_t) i, &templateRequest);
lines = String8::format(" HAL Request %s:\n", templateNames[i-1]);
- if (templateRequest == NULL) {
+ if (templateRequest == nullptr) {
lines.append(" Not supported\n");
write(fd, lines.string(), lines.size());
} else {
@@ -637,15 +649,16 @@
dump_indented_camera_metadata(templateRequest,
fd, /*verbosity*/2, /*indentation*/8);
}
+ free_camera_metadata(templateRequest);
}
}
mTagMonitor.dumpMonitoredMetadata(fd);
- if (mHal3Device != NULL) {
- lines = String8(" HAL device dump:\n");
+ if (mInterface->valid()) {
+ lines = String8(" HAL device dump:\n");
write(fd, lines.string(), lines.size());
- mHal3Device->ops->dump(mHal3Device, fd);
+ mInterface->dump(fd);
}
if (gotLock) mLock.unlock();
@@ -686,29 +699,36 @@
}
status_t Camera3Device::convertMetadataListToRequestListLocked(
- const List<const CameraMetadata> &metadataList, RequestList *requestList) {
+ const List<const CameraMetadata> &metadataList,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
+ RequestList *requestList) {
if (requestList == NULL) {
CLOGE("requestList cannot be NULL.");
return BAD_VALUE;
}
int32_t burstId = 0;
- for (List<const CameraMetadata>::const_iterator it = metadataList.begin();
- it != metadataList.end(); ++it) {
- sp<CaptureRequest> newRequest = setUpRequestLocked(*it);
+ List<const CameraMetadata>::const_iterator metadataIt = metadataList.begin();
+ std::list<const SurfaceMap>::const_iterator surfaceMapIt = surfaceMaps.begin();
+ for (; metadataIt != metadataList.end() && surfaceMapIt != surfaceMaps.end();
+ ++metadataIt, ++surfaceMapIt) {
+ sp<CaptureRequest> newRequest = setUpRequestLocked(*metadataIt, *surfaceMapIt);
if (newRequest == 0) {
CLOGE("Can't create capture request");
return BAD_VALUE;
}
+ newRequest->mRepeating = repeating;
+
// Setup burst Id and request Id
newRequest->mResultExtras.burstId = burstId++;
- if (it->exists(ANDROID_REQUEST_ID)) {
- if (it->find(ANDROID_REQUEST_ID).count == 0) {
+ if (metadataIt->exists(ANDROID_REQUEST_ID)) {
+ if (metadataIt->find(ANDROID_REQUEST_ID).count == 0) {
CLOGE("RequestID entry exists; but must not be empty in metadata");
return BAD_VALUE;
}
- newRequest->mResultExtras.requestId = it->find(ANDROID_REQUEST_ID).data.i32[0];
+ newRequest->mResultExtras.requestId = metadataIt->find(ANDROID_REQUEST_ID).data.i32[0];
} else {
CLOGE("RequestID does not exist in metadata");
return BAD_VALUE;
@@ -718,6 +738,10 @@
ALOGV("%s: requestId = %" PRId32, __FUNCTION__, newRequest->mResultExtras.requestId);
}
+ if (metadataIt != metadataList.end() || surfaceMapIt != surfaceMaps.end()) {
+ ALOGE("%s: metadataList and surfaceMaps are not the same size!", __FUNCTION__);
+ return BAD_VALUE;
+ }
// Setup batch size if this is a high speed video recording request.
if (mIsConstrainedHighSpeedConfiguration && requestList->size() > 0) {
@@ -737,12 +761,31 @@
ATRACE_CALL();
List<const CameraMetadata> requests;
+ std::list<const SurfaceMap> surfaceMaps;
+ convertToRequestList(requests, surfaceMaps, request);
+
+ return captureList(requests, surfaceMaps, /*lastFrameNumber*/NULL);
+}
+
+void Camera3Device::convertToRequestList(List<const CameraMetadata>& requests,
+ std::list<const SurfaceMap>& surfaceMaps,
+ const CameraMetadata& request) {
requests.push_back(request);
- return captureList(requests, /*lastFrameNumber*/NULL);
+
+ SurfaceMap surfaceMap;
+ camera_metadata_ro_entry streams = request.find(ANDROID_REQUEST_OUTPUT_STREAMS);
+ // With no surface list passed in, stream and surface will have 1-to-1
+ // mapping. So the surface index is 0 for each stream in the surfaceMap.
+ for (size_t i = 0; i < streams.count; i++) {
+ surfaceMap[streams.data.i32[i]].push_back(0);
+ }
+ surfaceMaps.push_back(surfaceMap);
}
status_t Camera3Device::submitRequestsHelper(
- const List<const CameraMetadata> &requests, bool repeating,
+ const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
/*out*/
int64_t *lastFrameNumber) {
ATRACE_CALL();
@@ -757,7 +800,8 @@
RequestList requestList;
- res = convertMetadataListToRequestListLocked(requests, /*out*/&requestList);
+ res = convertMetadataListToRequestListLocked(requests, surfaceMaps,
+ repeating, /*out*/&requestList);
if (res != OK) {
// error logged by previous call
return res;
@@ -775,7 +819,7 @@
SET_ERR_L("Can't transition to active in %f seconds!",
kActiveTimeout/1e9);
}
- ALOGV("Camera %d: Capture request %" PRId32 " enqueued", mId,
+ ALOGV("Camera %s: Capture request %" PRId32 " enqueued", mId.string(),
(*(requestList.begin()))->mResultExtras.requestId);
} else {
CLOGE("Cannot queue request. Impossible.");
@@ -785,11 +829,197 @@
return res;
}
+// Only one processCaptureResult should be called at a time, so
+// the locks won't block. The locks are present here simply to enforce this.
+hardware::Return<void> Camera3Device::processCaptureResult(
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_2::CaptureResult>& results) {
+
+ if (mProcessCaptureResultLock.tryLock() != OK) {
+ // This should never happen; it indicates a wrong client implementation
+ // that doesn't follow the contract. But, we can be tolerant here.
+ ALOGE("%s: callback overlapped! waiting 1s...",
+ __FUNCTION__);
+ if (mProcessCaptureResultLock.timedLock(1000000000 /* 1s */) != OK) {
+ ALOGE("%s: cannot acquire lock in 1s, dropping results",
+ __FUNCTION__);
+ // really don't know what to do, so bail out.
+ return hardware::Void();
+ }
+ }
+ for (const auto& result : results) {
+ processOneCaptureResultLocked(result);
+ }
+ mProcessCaptureResultLock.unlock();
+ return hardware::Void();
+}
+
+void Camera3Device::processOneCaptureResultLocked(
+ const hardware::camera::device::V3_2::CaptureResult& result) {
+ camera3_capture_result r;
+ status_t res;
+ r.frame_number = result.frameNumber;
+
+ hardware::camera::device::V3_2::CameraMetadata resultMetadata;
+ if (result.fmqResultSize > 0) {
+ resultMetadata.resize(result.fmqResultSize);
+ if (mResultMetadataQueue == nullptr) {
+ return; // logged in initialize()
+ }
+ if (!mResultMetadataQueue->read(resultMetadata.data(), result.fmqResultSize)) {
+ ALOGE("%s: Frame %d: Cannot read camera metadata from fmq, size = %" PRIu64,
+ __FUNCTION__, result.frameNumber, result.fmqResultSize);
+ return;
+ }
+ } else {
+ resultMetadata.setToExternal(const_cast<uint8_t *>(result.result.data()),
+ result.result.size());
+ }
+
+ if (resultMetadata.size() != 0) {
+ r.result = reinterpret_cast<const camera_metadata_t*>(resultMetadata.data());
+ size_t expected_metadata_size = resultMetadata.size();
+ if ((res = validate_camera_metadata_structure(r.result, &expected_metadata_size)) != OK) {
+ ALOGE("%s: Frame %d: Invalid camera metadata received by camera service from HAL: %s (%d)",
+ __FUNCTION__, result.frameNumber, strerror(-res), res);
+ return;
+ }
+ } else {
+ r.result = nullptr;
+ }
+
+ std::vector<camera3_stream_buffer_t> outputBuffers(result.outputBuffers.size());
+ std::vector<buffer_handle_t> outputBufferHandles(result.outputBuffers.size());
+ for (size_t i = 0; i < result.outputBuffers.size(); i++) {
+ auto& bDst = outputBuffers[i];
+ const StreamBuffer &bSrc = result.outputBuffers[i];
+
+ ssize_t idx = mOutputStreams.indexOfKey(bSrc.streamId);
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Frame %d: Buffer %zu: Invalid output stream id %d",
+ __FUNCTION__, result.frameNumber, i, bSrc.streamId);
+ return;
+ }
+ bDst.stream = mOutputStreams.valueAt(idx)->asHalStream();
+
+ buffer_handle_t *buffer;
+ res = mInterface->popInflightBuffer(result.frameNumber, bSrc.streamId, &buffer);
+ if (res != OK) {
+ ALOGE("%s: Frame %d: Buffer %zu: No in-flight buffer for stream %d",
+ __FUNCTION__, result.frameNumber, i, bSrc.streamId);
+ return;
+ }
+ bDst.buffer = buffer;
+ bDst.status = mapHidlBufferStatus(bSrc.status);
+ bDst.acquire_fence = -1;
+ if (bSrc.releaseFence == nullptr) {
+ bDst.release_fence = -1;
+ } else if (bSrc.releaseFence->numFds == 1) {
+ bDst.release_fence = dup(bSrc.releaseFence->data[0]);
+ } else {
+ ALOGE("%s: Frame %d: Invalid release fence for buffer %zu, fd count is %d, not 1",
+ __FUNCTION__, result.frameNumber, i, bSrc.releaseFence->numFds);
+ return;
+ }
+ }
+ r.num_output_buffers = outputBuffers.size();
+ r.output_buffers = outputBuffers.data();
+
+ camera3_stream_buffer_t inputBuffer;
+ if (result.inputBuffer.streamId == -1) {
+ r.input_buffer = nullptr;
+ } else {
+ if (mInputStream->getId() != result.inputBuffer.streamId) {
+ ALOGE("%s: Frame %d: Invalid input stream id %d", __FUNCTION__,
+ result.frameNumber, result.inputBuffer.streamId);
+ return;
+ }
+ inputBuffer.stream = mInputStream->asHalStream();
+ buffer_handle_t *buffer;
+ res = mInterface->popInflightBuffer(result.frameNumber, result.inputBuffer.streamId,
+ &buffer);
+ if (res != OK) {
+ ALOGE("%s: Frame %d: Input buffer: No in-flight buffer for stream %d",
+ __FUNCTION__, result.frameNumber, result.inputBuffer.streamId);
+ return;
+ }
+ inputBuffer.buffer = buffer;
+ inputBuffer.status = mapHidlBufferStatus(result.inputBuffer.status);
+ inputBuffer.acquire_fence = -1;
+ if (result.inputBuffer.releaseFence == nullptr) {
+ inputBuffer.release_fence = -1;
+ } else if (result.inputBuffer.releaseFence->numFds == 1) {
+ inputBuffer.release_fence = dup(result.inputBuffer.releaseFence->data[0]);
+ } else {
+ ALOGE("%s: Frame %d: Invalid release fence for input buffer, fd count is %d, not 1",
+ __FUNCTION__, result.frameNumber, result.inputBuffer.releaseFence->numFds);
+ return;
+ }
+ r.input_buffer = &inputBuffer;
+ }
+
+ r.partial_result = result.partialResult;
+
+ processCaptureResult(&r);
+}
+
+hardware::Return<void> Camera3Device::notify(
+ const hardware::hidl_vec<hardware::camera::device::V3_2::NotifyMsg>& msgs) {
+ for (const auto& msg : msgs) {
+ notify(msg);
+ }
+ return hardware::Void();
+}
+
+void Camera3Device::notify(
+ const hardware::camera::device::V3_2::NotifyMsg& msg) {
+
+ camera3_notify_msg m;
+ switch (msg.type) {
+ case MsgType::ERROR:
+ m.type = CAMERA3_MSG_ERROR;
+ m.message.error.frame_number = msg.msg.error.frameNumber;
+ if (msg.msg.error.errorStreamId >= 0) {
+ ssize_t idx = mOutputStreams.indexOfKey(msg.msg.error.errorStreamId);
+ if (idx == NAME_NOT_FOUND) {
+ ALOGE("%s: Frame %d: Invalid error stream id %d",
+ __FUNCTION__, m.message.error.frame_number, msg.msg.error.errorStreamId);
+ return;
+ }
+ m.message.error.error_stream = mOutputStreams.valueAt(idx)->asHalStream();
+ } else {
+ m.message.error.error_stream = nullptr;
+ }
+ switch (msg.msg.error.errorCode) {
+ case ErrorCode::ERROR_DEVICE:
+ m.message.error.error_code = CAMERA3_MSG_ERROR_DEVICE;
+ break;
+ case ErrorCode::ERROR_REQUEST:
+ m.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST;
+ break;
+ case ErrorCode::ERROR_RESULT:
+ m.message.error.error_code = CAMERA3_MSG_ERROR_RESULT;
+ break;
+ case ErrorCode::ERROR_BUFFER:
+ m.message.error.error_code = CAMERA3_MSG_ERROR_BUFFER;
+ break;
+ }
+ break;
+ case MsgType::SHUTTER:
+ m.type = CAMERA3_MSG_SHUTTER;
+ m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
+ m.message.shutter.timestamp = msg.msg.shutter.timestamp;
+ break;
+ }
+ notify(&m);
+}
+
status_t Camera3Device::captureList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber) {
ATRACE_CALL();
- return submitRequestsHelper(requests, /*repeating*/false, lastFrameNumber);
+ return submitRequestsHelper(requests, surfaceMaps, /*repeating*/false, lastFrameNumber);
}
status_t Camera3Device::setStreamingRequest(const CameraMetadata &request,
@@ -797,23 +1027,29 @@
ATRACE_CALL();
List<const CameraMetadata> requests;
- requests.push_back(request);
- return setStreamingRequestList(requests, /*lastFrameNumber*/NULL);
+ std::list<const SurfaceMap> surfaceMaps;
+ convertToRequestList(requests, surfaceMaps, request);
+
+ return setStreamingRequestList(requests, /*surfaceMap*/surfaceMaps,
+ /*lastFrameNumber*/NULL);
}
status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
int64_t *lastFrameNumber) {
ATRACE_CALL();
- return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber);
+ return submitRequestsHelper(requests, surfaceMaps, /*repeating*/true, lastFrameNumber);
}
sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
- const CameraMetadata &request) {
+ const CameraMetadata &request, const SurfaceMap &surfaceMap) {
status_t res;
if (mStatus == STATUS_UNCONFIGURED || mNeedConfig) {
- res = configureStreamsLocked();
+ // This point should only be reached via API1 (API2 must explicitly call configureStreams)
+ // so unilaterally select normal operating mode.
+ res = configureStreamsLocked(CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE);
// Stream configuration failed. Client might try other configuraitons.
if (res != OK) {
CLOGE("Can't set up streams: %s (%d)", strerror(-res), res);
@@ -825,7 +1061,7 @@
}
}
- sp<CaptureRequest> newRequest = createCaptureRequest(request);
+ sp<CaptureRequest> newRequest = createCaptureRequest(request, surfaceMap);
return newRequest;
}
@@ -850,7 +1086,7 @@
SET_ERR_L("Unexpected status: %d", mStatus);
return INVALID_OPERATION;
}
- ALOGV("Camera %d: Clearing repeating request", mId);
+ ALOGV("Camera %s: Clearing repeating request", mId.string());
return mRequestThread->clearRepeatingRequests(lastFrameNumber);
}
@@ -867,8 +1103,8 @@
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- ALOGV("Camera %d: Creating new input stream %d: %d x %d, format %d",
- mId, mNextStreamId, width, height, format);
+ ALOGV("Camera %s: Creating new input stream %d: %d x %d, format %d",
+ mId.string(), mNextStreamId, width, height, format);
status_t res;
bool wasActive = false;
@@ -915,7 +1151,8 @@
// Continue captures if active at start
if (wasActive) {
ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__);
- res = configureStreamsLocked();
+ // Reuse current operating mode for new stream config
+ res = configureStreamsLocked(mOperatingMode);
if (res != OK) {
ALOGE("%s: Can't reconfigure device for new stream %d: %s (%d)",
__FUNCTION__, mNextStreamId, strerror(-res), res);
@@ -924,99 +1161,38 @@
internalResumeLocked();
}
- ALOGV("Camera %d: Created input stream", mId);
- return OK;
-}
-
-
-status_t Camera3Device::createZslStream(
- uint32_t width, uint32_t height,
- int depth,
- /*out*/
- int *id,
- sp<Camera3ZslStream>* zslStream) {
- ATRACE_CALL();
- Mutex::Autolock il(mInterfaceLock);
- Mutex::Autolock l(mLock);
- ALOGV("Camera %d: Creating ZSL stream %d: %d x %d, depth %d",
- mId, mNextStreamId, width, height, depth);
-
- status_t res;
- bool wasActive = false;
-
- switch (mStatus) {
- case STATUS_ERROR:
- ALOGE("%s: Device has encountered a serious error", __FUNCTION__);
- return INVALID_OPERATION;
- case STATUS_UNINITIALIZED:
- ALOGE("%s: Device not initialized", __FUNCTION__);
- return INVALID_OPERATION;
- case STATUS_UNCONFIGURED:
- case STATUS_CONFIGURED:
- // OK
- break;
- case STATUS_ACTIVE:
- ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__);
- res = internalPauseAndWaitLocked();
- if (res != OK) {
- SET_ERR_L("Can't pause captures to reconfigure streams!");
- return res;
- }
- wasActive = true;
- break;
- default:
- SET_ERR_L("Unexpected status: %d", mStatus);
- return INVALID_OPERATION;
- }
- assert(mStatus != STATUS_ACTIVE);
-
- if (mInputStream != 0) {
- ALOGE("%s: Cannot create more than 1 input stream", __FUNCTION__);
- return INVALID_OPERATION;
- }
-
- sp<Camera3ZslStream> newStream = new Camera3ZslStream(mNextStreamId,
- width, height, depth);
- newStream->setStatusTracker(mStatusTracker);
-
- res = mOutputStreams.add(mNextStreamId, newStream);
- if (res < 0) {
- ALOGE("%s: Can't add new stream to set: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- return res;
- }
- mInputStream = newStream;
-
- mNeedConfig = true;
-
- *id = mNextStreamId++;
- *zslStream = newStream;
-
- // Continue captures if active at start
- if (wasActive) {
- ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__);
- res = configureStreamsLocked();
- if (res != OK) {
- ALOGE("%s: Can't reconfigure device for new stream %d: %s (%d)",
- __FUNCTION__, mNextStreamId, strerror(-res), res);
- return res;
- }
- internalResumeLocked();
- }
-
- ALOGV("Camera %d: Created ZSL stream", mId);
+ ALOGV("Camera %s: Created input stream", mId.string());
return OK;
}
status_t Camera3Device::createStream(sp<Surface> consumer,
- uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
- camera3_stream_rotation_t rotation, int *id, int streamSetId, uint32_t consumerUsage) {
+ uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId, bool isShared, uint32_t consumerUsage) {
+ ATRACE_CALL();
+
+ if (consumer == nullptr) {
+ ALOGE("%s: consumer must not be null", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ std::vector<sp<Surface>> consumers;
+ consumers.push_back(consumer);
+
+ return createStream(consumers, /*hasDeferredConsumer*/ false, width, height,
+ format, dataSpace, rotation, id, streamSetId, isShared, consumerUsage);
+}
+
+status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId, bool isShared, uint32_t consumerUsage) {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
- " consumer usage 0x%x", mId, mNextStreamId, width, height, format, dataSpace, rotation,
- consumerUsage);
+ ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
+ " consumer usage 0x%x, isShared %d", mId.string(), mNextStreamId, width, height, format,
+ dataSpace, rotation, consumerUsage, isShared);
status_t res;
bool wasActive = false;
@@ -1048,28 +1224,17 @@
assert(mStatus != STATUS_ACTIVE);
sp<Camera3OutputStream> newStream;
- // Overwrite stream set id to invalid for HAL3.2 or lower, as buffer manager does support
- // such devices.
- if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_2) {
- streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
- }
- // HAL3.1 doesn't support deferred consumer stream creation as it requires buffer registration
- // which requires a consumer surface to be available.
- if (consumer == nullptr && mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
- ALOGE("HAL3.1 doesn't support deferred consumer stream creation");
+ if (consumers.size() == 0 && !hasDeferredConsumer) {
+ ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
return BAD_VALUE;
}
- if (consumer == nullptr && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (hasDeferredConsumer && format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
ALOGE("Deferred consumer stream creation only support IMPLEMENTATION_DEFINED format");
return BAD_VALUE;
}
- // Use legacy dataspace values for older HALs
- if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_3) {
- dataSpace = mapToLegacyDataspace(dataSpace);
- }
if (format == HAL_PIXEL_FORMAT_BLOB) {
ssize_t blobBufferSize;
if (dataSpace != HAL_DATASPACE_DEPTH) {
@@ -1085,7 +1250,7 @@
return BAD_VALUE;
}
}
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, blobBufferSize, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
} else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -1094,29 +1259,25 @@
SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
return BAD_VALUE;
}
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
- } else if (consumer == nullptr) {
+ } else if (isShared) {
+ newStream = new Camera3SharedOutputStream(mNextStreamId, consumers,
+ width, height, format, consumerUsage, dataSpace, rotation,
+ mTimestampOffset, streamSetId);
+ } else if (consumers.size() == 0 && hasDeferredConsumer) {
newStream = new Camera3OutputStream(mNextStreamId,
width, height, format, consumerUsage, dataSpace, rotation,
mTimestampOffset, streamSetId);
} else {
- newStream = new Camera3OutputStream(mNextStreamId, consumer,
+ newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, format, dataSpace, rotation,
mTimestampOffset, streamSetId);
}
newStream->setStatusTracker(mStatusTracker);
- /**
- * Camera3 Buffer manager is only supported by HAL3.3 onwards, as the older HALs ( < HAL3.2)
- * requires buffers to be statically allocated for internal static buffer registration, while
- * the buffers provided by buffer manager are really dynamically allocated. For HAL3.2, because
- * not all HAL implementation supports dynamic buffer registeration, exlude it as well.
- */
- if (mDeviceVersion > CAMERA_DEVICE_API_VERSION_3_2) {
- newStream->setBufferManager(mBufferManager);
- }
+ newStream->setBufferManager(mBufferManager);
res = mOutputStreams.add(mNextStreamId, newStream);
if (res < 0) {
@@ -1130,7 +1291,8 @@
// Continue captures if active at start
if (wasActive) {
ALOGV("%s: Restarting activity to reconfigure streams", __FUNCTION__);
- res = configureStreamsLocked();
+ // Reuse current operating mode for new stream config
+ res = configureStreamsLocked(mOperatingMode);
if (res != OK) {
CLOGE("Can't reconfigure device for new stream %d: %s (%d)",
mNextStreamId, strerror(-res), res);
@@ -1138,19 +1300,10 @@
}
internalResumeLocked();
}
- ALOGV("Camera %d: Created new stream", mId);
+ ALOGV("Camera %s: Created new stream", mId.string());
return OK;
}
-status_t Camera3Device::createReprocessStreamFromStream(int outputId, int *id) {
- ATRACE_CALL();
- (void)outputId; (void)id;
-
- CLOGE("Unimplemented");
- return INVALID_OPERATION;
-}
-
-
status_t Camera3Device::getStreamInfo(int id,
uint32_t *width, uint32_t *height,
uint32_t *format, android_dataspace *dataSpace) {
@@ -1227,12 +1380,12 @@
Mutex::Autolock l(mLock);
status_t res;
- ALOGV("%s: Camera %d: Deleting stream %d", __FUNCTION__, mId, id);
+ ALOGV("%s: Camera %s: Deleting stream %d", __FUNCTION__, mId.string(), id);
// CameraDevice semantics require device to already be idle before
// deleteStream is called, unlike for createStream.
if (mStatus == STATUS_ACTIVE) {
- ALOGV("%s: Camera %d: Device not idle", __FUNCTION__, mId);
+ ALOGV("%s: Camera %s: Device not idle", __FUNCTION__, mId.string());
return -EBUSY;
}
@@ -1266,27 +1419,14 @@
return res;
}
-status_t Camera3Device::deleteReprocessStream(int id) {
- ATRACE_CALL();
- (void)id;
-
- CLOGE("Unimplemented");
- return INVALID_OPERATION;
-}
-
-status_t Camera3Device::configureStreams(bool isConstrainedHighSpeed) {
+status_t Camera3Device::configureStreams(int operatingMode) {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- if (mIsConstrainedHighSpeedConfiguration != isConstrainedHighSpeed) {
- mNeedConfig = true;
- mIsConstrainedHighSpeedConfiguration = isConstrainedHighSpeed;
- }
-
- return configureStreamsLocked();
+ return configureStreamsLocked(operatingMode);
}
status_t Camera3Device::getInputBufferProducer(
@@ -1339,27 +1479,21 @@
return OK;
}
- const camera_metadata_t *rawRequest;
- ATRACE_BEGIN("camera3->construct_default_request_settings");
- rawRequest = mHal3Device->ops->construct_default_request_settings(
- mHal3Device, templateId);
- ATRACE_END();
- if (rawRequest == NULL) {
+ camera_metadata_t *rawRequest;
+ status_t res = mInterface->constructDefaultRequestSettings(
+ (camera3_request_template_t) templateId, &rawRequest);
+ if (res == BAD_VALUE) {
ALOGI("%s: template %d is not supported on this camera device",
__FUNCTION__, templateId);
- return BAD_VALUE;
+ return res;
+ } else if (res != OK) {
+ CLOGE("Unable to construct request template %d: %s (%d)",
+ templateId, strerror(-res), res);
+ return res;
}
- mRequestTemplateCache[templateId] = rawRequest;
-
- // Derive some new keys for backward compatibility
- if (mDerivePostRawSensKey && !mRequestTemplateCache[templateId].exists(
- ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST)) {
- int32_t defaultBoost[1] = {100};
- mRequestTemplateCache[templateId].update(
- ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST,
- defaultBoost, 1);
- }
+ set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
+ mRequestTemplateCache[templateId].acquire(rawRequest);
*request = mRequestTemplateCache[templateId];
return OK;
@@ -1390,7 +1524,7 @@
return INVALID_OPERATION;
}
- ALOGV("%s: Camera %d: Waiting until idle", __FUNCTION__, mId);
+ ALOGV("%s: Camera %s: Waiting until idle", __FUNCTION__, mId.string());
status_t res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
if (res != OK) {
SET_ERR_L("Error waiting for HAL to drain: %s (%d)", strerror(-res),
@@ -1411,7 +1545,7 @@
mRequestThread->setPaused(true);
mPauseStateNotify = true;
- ALOGV("%s: Camera %d: Internal wait until idle", __FUNCTION__, mId);
+ ALOGV("%s: Camera %s: Internal wait until idle", __FUNCTION__, mId.string());
status_t res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
if (res != OK) {
SET_ERR_L("Can't idle device in %f seconds!",
@@ -1510,8 +1644,8 @@
if (res == TIMED_OUT) {
return res;
} else if (res != OK) {
- ALOGW("%s: Camera %d: No frame in %" PRId64 " ns: %s (%d)",
- __FUNCTION__, mId, timeout, strerror(-res), res);
+ ALOGW("%s: Camera %s: No frame in %" PRId64 " ns: %s (%d)",
+ __FUNCTION__, mId.string(), timeout, strerror(-res), res);
return res;
}
}
@@ -1602,18 +1736,9 @@
sizeof(trigger)/sizeof(trigger[0]));
}
-status_t Camera3Device::pushReprocessBuffer(int reprocessStreamId,
- buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
- ATRACE_CALL();
- (void)reprocessStreamId; (void)buffer; (void)listener;
-
- CLOGE("Unimplemented");
- return INVALID_OPERATION;
-}
-
status_t Camera3Device::flush(int64_t *frameNumber) {
ATRACE_CALL();
- ALOGV("%s: Camera %d: Flushing all requests", __FUNCTION__, mId);
+ ALOGV("%s: Camera %s: Flushing all requests", __FUNCTION__, mId.string());
Mutex::Autolock il(mInterfaceLock);
{
@@ -1621,15 +1746,7 @@
mRequestThread->clear(/*out*/frameNumber);
}
- status_t res;
- if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) {
- res = mRequestThread->flush();
- } else {
- Mutex::Autolock l(mLock);
- res = waitUntilDrainedLocked();
- }
-
- return res;
+ return mRequestThread->flush();
}
status_t Camera3Device::prepare(int streamId) {
@@ -1638,7 +1755,7 @@
status_t Camera3Device::prepare(int maxCount, int streamId) {
ATRACE_CALL();
- ALOGV("%s: Camera %d: Preparing stream %d", __FUNCTION__, mId, streamId);
+ ALOGV("%s: Camera %s: Preparing stream %d", __FUNCTION__, mId.string(), streamId);
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -1666,18 +1783,10 @@
status_t Camera3Device::tearDown(int streamId) {
ATRACE_CALL();
- ALOGV("%s: Camera %d: Tearing down stream %d", __FUNCTION__, mId, streamId);
+ ALOGV("%s: Camera %s: Tearing down stream %d", __FUNCTION__, mId.string(), streamId);
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- // Teardown can only be accomplished on devices that don't require register_stream_buffers,
- // since we cannot call register_stream_buffers except right after configure_streams.
- if (mHal3Device->common.version < CAMERA_DEVICE_API_VERSION_3_2) {
- ALOGE("%s: Unable to tear down streams on device HAL v%x",
- __FUNCTION__, mHal3Device->common.version);
- return NO_INIT;
- }
-
sp<Camera3StreamInterface> stream;
ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
if (outputStreamIdx == NAME_NOT_FOUND) {
@@ -1698,7 +1807,7 @@
status_t Camera3Device::addBufferListenerForStream(int streamId,
wp<Camera3StreamBufferListener> listener) {
ATRACE_CALL();
- ALOGV("%s: Camera %d: Adding buffer listener for stream %d", __FUNCTION__, mId, streamId);
+ ALOGV("%s: Camera %s: Adding buffer listener for stream %d", __FUNCTION__, mId.string(), streamId);
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -1715,12 +1824,6 @@
return OK;
}
-uint32_t Camera3Device::getDeviceVersion() {
- ATRACE_CALL();
- Mutex::Autolock il(mInterfaceLock);
- return mDeviceVersion;
-}
-
/**
* Methods called by subclasses
*/
@@ -1736,7 +1839,7 @@
if (mStatus != STATUS_ACTIVE && mStatus != STATUS_CONFIGURED) {
return;
}
- ALOGV("%s: Camera %d: Now %s", __FUNCTION__, mId,
+ ALOGV("%s: Camera %s: Now %s", __FUNCTION__, mId.string(),
idle ? "idle" : "active");
internalUpdateStatusLocked(idle ? STATUS_CONFIGURED : STATUS_ACTIVE);
@@ -1755,14 +1858,16 @@
}
}
-status_t Camera3Device::setConsumerSurface(int streamId, sp<Surface> consumer) {
+status_t Camera3Device::setConsumerSurfaces(int streamId,
+ const std::vector<sp<Surface>>& consumers) {
ATRACE_CALL();
- ALOGV("%s: Camera %d: set consumer surface for stream %d", __FUNCTION__, mId, streamId);
+ ALOGV("%s: Camera %s: set consumer surface for stream %d",
+ __FUNCTION__, mId.string(), streamId);
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- if (consumer == nullptr) {
- CLOGE("Null consumer is passed!");
+ if (consumers.size() == 0) {
+ CLOGE("No consumer is passed!");
return BAD_VALUE;
}
@@ -1772,22 +1877,24 @@
return idx;
}
sp<Camera3OutputStreamInterface> stream = mOutputStreams[idx];
- status_t res = stream->setConsumer(consumer);
+ status_t res = stream->setConsumers(consumers);
if (res != OK) {
CLOGE("Stream %d set consumer failed (error %d %s) ", streamId, res, strerror(-res));
return res;
}
- if (!stream->isConfiguring()) {
- CLOGE("Stream %d was already fully configured.", streamId);
- return INVALID_OPERATION;
- }
+ if (stream->isConsumerConfigurationDeferred()) {
+ if (!stream->isConfiguring()) {
+ CLOGE("Stream %d was already fully configured.", streamId);
+ return INVALID_OPERATION;
+ }
- res = stream->finishConfiguration(mHal3Device);
- if (res != OK) {
- SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
- stream->getId(), strerror(-res), res);
- return res;
+ res = stream->finishConfiguration();
+ if (res != OK) {
+ SET_ERR_L("Can't finish configuring output stream %d: %s (%d)",
+ stream->getId(), strerror(-res), res);
+ return res;
+ }
}
return OK;
@@ -1798,7 +1905,7 @@
*/
sp<Camera3Device::CaptureRequest> Camera3Device::createCaptureRequest(
- const CameraMetadata &request) {
+ const CameraMetadata &request, const SurfaceMap &surfaceMap) {
ATRACE_CALL();
status_t res;
@@ -1817,7 +1924,7 @@
// Lazy completion of stream configuration (allocation/registration)
// on first use
if (mInputStream->isConfiguring()) {
- res = mInputStream->finishConfiguration(mHal3Device);
+ res = mInputStream->finishConfiguration();
if (res != OK) {
SET_ERR_L("Unable to finish configuring input stream %d:"
" %s (%d)",
@@ -1853,16 +1960,23 @@
mOutputStreams.editValueAt(idx);
// It is illegal to include a deferred consumer output stream into a request
- if (stream->isConsumerConfigurationDeferred()) {
- CLOGE("Stream %d hasn't finished configuration yet due to deferred consumer",
- stream->getId());
- return NULL;
+ auto iter = surfaceMap.find(streams.data.i32[i]);
+ if (iter != surfaceMap.end()) {
+ const std::vector<size_t>& surfaces = iter->second;
+ for (const auto& surface : surfaces) {
+ if (stream->isConsumerConfigurationDeferred(surface)) {
+ CLOGE("Stream %d surface %zu hasn't finished configuration yet "
+ "due to deferred consumer", stream->getId(), surface);
+ return NULL;
+ }
+ }
+ newRequest->mOutputSurfaces[i] = surfaces;
}
// Lazy completion of stream configuration (allocation/registration)
// on first use
if (stream->isConfiguring()) {
- res = stream->finishConfiguration(mHal3Device);
+ res = stream->finishConfiguration();
if (res != OK) {
SET_ERR_L("Unable to finish configuring stream %d: %s (%d)",
stream->getId(), strerror(-res), res);
@@ -1921,7 +2035,7 @@
mNeedConfig = true;
}
-status_t Camera3Device::configureStreamsLocked() {
+status_t Camera3Device::configureStreamsLocked(int operatingMode) {
ATRACE_CALL();
status_t res;
@@ -1930,6 +2044,21 @@
return INVALID_OPERATION;
}
+ if (operatingMode < 0) {
+ CLOGE("Invalid operating mode: %d", operatingMode);
+ return BAD_VALUE;
+ }
+
+ bool isConstrainedHighSpeed =
+ static_cast<int>(StreamConfigurationMode::CONSTRAINED_HIGH_SPEED_MODE) ==
+ operatingMode;
+
+ if (mOperatingMode != operatingMode) {
+ mNeedConfig = true;
+ mIsConstrainedHighSpeedConfiguration = isConstrainedHighSpeed;
+ mOperatingMode = operatingMode;
+ }
+
if (!mNeedConfig) {
ALOGV("%s: Skipping config, no stream changes", __FUNCTION__);
return OK;
@@ -1945,12 +2074,10 @@
}
// Start configuring the streams
- ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
+ ALOGV("%s: Camera %s: Starting stream configuration", __FUNCTION__, mId.string());
camera3_stream_configuration config;
- config.operation_mode = mIsConstrainedHighSpeedConfiguration ?
- CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE :
- CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE;
+ config.operation_mode = mOperatingMode;
config.num_streams = (mInputStream != NULL) + mOutputStreams.size();
Vector<camera3_stream_t*> streams;
@@ -1991,9 +2118,8 @@
// Do the HAL configuration; will potentially touch stream
// max_buffers, usage, priv fields.
- ATRACE_BEGIN("camera3->configure_streams");
- res = mHal3Device->ops->configure_streams(mHal3Device, &config);
- ATRACE_END();
+
+ res = mInterface->configureStreams(&config);
if (res == BAD_VALUE) {
// HAL rejected this set of streams as unsupported, clean up config
@@ -2014,7 +2140,7 @@
// faster
if (mInputStream != NULL && mInputStream->isConfiguring()) {
- res = mInputStream->finishConfiguration(mHal3Device);
+ res = mInputStream->finishConfiguration();
if (res != OK) {
CLOGE("Can't finish configuring input stream %d: %s (%d)",
mInputStream->getId(), strerror(-res), res);
@@ -2027,7 +2153,7 @@
sp<Camera3OutputStreamInterface> outputStream =
mOutputStreams.editValueAt(i);
if (outputStream->isConfiguring() && !outputStream->isConsumerConfigurationDeferred()) {
- res = outputStream->finishConfiguration(mHal3Device);
+ res = outputStream->finishConfiguration();
if (res != OK) {
CLOGE("Can't finish configuring output stream %d: %s (%d)",
outputStream->getId(), strerror(-res), res);
@@ -2048,7 +2174,7 @@
// Boost priority of request thread to SCHED_FIFO.
pid_t requestThreadTid = mRequestThread->getTid();
res = requestPriority(getpid(), requestThreadTid,
- kRequestThreadPriority, /*asynchronous*/ false);
+ kRequestThreadPriority, /*isForApp*/ false, /*asynchronous*/ false);
if (res != OK) {
ALOGW("Can't set realtime priority for request processing thread: %s (%d)",
strerror(-res), res);
@@ -2064,7 +2190,7 @@
internalUpdateStatusLocked((mDummyStreamId == NO_STREAM) ?
STATUS_CONFIGURED : STATUS_UNCONFIGURED);
- ALOGV("%s: Camera %d: Stream configuration complete", __FUNCTION__, mId);
+ ALOGV("%s: Camera %s: Stream configuration complete", __FUNCTION__, mId.string());
// tear down the deleted streams after configure streams.
mDeletedStreams.clear();
@@ -2079,12 +2205,12 @@
if (mDummyStreamId != NO_STREAM) {
// Should never be adding a second dummy stream when one is already
// active
- SET_ERR_L("%s: Camera %d: A dummy stream already exists!",
- __FUNCTION__, mId);
+ SET_ERR_L("%s: Camera %s: A dummy stream already exists!",
+ __FUNCTION__, mId.string());
return INVALID_OPERATION;
}
- ALOGV("%s: Camera %d: Adding a dummy stream", __FUNCTION__, mId);
+ ALOGV("%s: Camera %s: Adding a dummy stream", __FUNCTION__, mId.string());
sp<Camera3OutputStreamInterface> dummyStream =
new Camera3DummyStream(mNextStreamId);
@@ -2108,7 +2234,7 @@
if (mDummyStreamId == NO_STREAM) return OK;
if (mOutputStreams.size() == 1) return OK;
- ALOGV("%s: Camera %d: Removing the dummy stream", __FUNCTION__, mId);
+ ALOGV("%s: Camera %s: Removing the dummy stream", __FUNCTION__, mId.string());
// Ok, have a dummy stream and there's at least one other output stream,
// so remove the dummy
@@ -2162,7 +2288,7 @@
void Camera3Device::setErrorStateLockedV(const char *fmt, va_list args) {
// Print out all error messages to log
String8 errorCause = String8::formatV(fmt, args);
- ALOGE("Camera %d: %s", mId, errorCause.string());
+ ALOGE("Camera %s: %s", mId.string(), errorCause.string());
// But only do error state transition steps for the first error
if (mStatus == STATUS_ERROR || mStatus == STATUS_UNINITIALIZED) return;
@@ -2190,13 +2316,13 @@
status_t Camera3Device::registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+ bool hasAppCallback) {
ATRACE_CALL();
Mutex::Autolock l(mInFlightLock);
ssize_t res;
res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
- aeTriggerCancelOverride));
+ hasAppCallback));
if (res < 0) return res;
if (mInFlightMap.size() == 1) {
@@ -2250,8 +2376,9 @@
(request.haveResultMetadata && shutterTimestamp != 0))) {
ATRACE_ASYNC_END("frame capture", frameNumber);
- // Sanity check - if sensor timestamp matches shutter timestamp
- if (request.requestStatus == OK &&
+ // Sanity check - if sensor timestamp matches shutter timestamp in the
+ // case of request having callback.
+ if (request.hasCallback && request.requestStatus == OK &&
sensorTimestamp != shutterTimestamp) {
SET_ERR("sensor timestamp (%" PRId64
") for frame %d doesn't match shutter timestamp (%" PRId64 ")",
@@ -2280,10 +2407,15 @@
}
}
-void Camera3Device::insertResultLocked(CaptureResult *result, uint32_t frameNumber,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+void Camera3Device::insertResultLocked(CaptureResult *result,
+ uint32_t frameNumber) {
if (result == nullptr) return;
+ camera_metadata_t *meta = const_cast<camera_metadata_t *>(
+ result->mMetadata.getAndLock());
+ set_camera_metadata_vendor_id(meta, mVendorTagId);
+ result->mMetadata.unlock(meta);
+
if (result->mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
(int32_t*)&frameNumber, 1) != OK) {
SET_ERR("Failed to set frame number %d in metadata", frameNumber);
@@ -2295,8 +2427,6 @@
return;
}
- overrideResultForPrecaptureCancel(&result->mMetadata, aeTriggerCancelOverride);
-
// Valid result, insert into queue
List<CaptureResult>::iterator queuedResult =
mResultQueue.insert(mResultQueue.end(), CaptureResult(*result));
@@ -2311,15 +2441,14 @@
void Camera3Device::sendPartialCaptureResult(const camera_metadata_t * partialResult,
- const CaptureResultExtras &resultExtras, uint32_t frameNumber,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+ const CaptureResultExtras &resultExtras, uint32_t frameNumber) {
Mutex::Autolock l(mOutputLock);
CaptureResult captureResult;
captureResult.mResultExtras = resultExtras;
captureResult.mMetadata = partialResult;
- insertResultLocked(&captureResult, frameNumber, aeTriggerCancelOverride);
+ insertResultLocked(&captureResult, frameNumber);
}
@@ -2327,8 +2456,7 @@
CaptureResultExtras &resultExtras,
CameraMetadata &collectedPartialResult,
uint32_t frameNumber,
- bool reprocess,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+ bool reprocess) {
if (pendingMetadata.isEmpty())
return;
@@ -2362,15 +2490,6 @@
captureResult.mMetadata.append(collectedPartialResult);
}
- // Derive some new keys for backward compaibility
- if (mDerivePostRawSensKey && !captureResult.mMetadata.exists(
- ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST)) {
- int32_t defaultBoost[1] = {100};
- captureResult.mMetadata.update(
- ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST,
- defaultBoost, 1);
- }
-
captureResult.mMetadata.sort();
// Check that there's a timestamp in the result metadata
@@ -2384,7 +2503,7 @@
mTagMonitor.monitorMetadata(TagMonitor::RESULT,
frameNumber, timestamp.data.i64[0], captureResult.mMetadata);
- insertResultLocked(&captureResult, frameNumber, aeTriggerCancelOverride);
+ insertResultLocked(&captureResult, frameNumber);
}
/**
@@ -2404,10 +2523,7 @@
return;
}
- // For HAL3.2 or above, If HAL doesn't support partial, it must always set
- // partial_result to 1 when metadata is included in this result.
if (!mUsePartialResult &&
- mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2 &&
result->result != NULL &&
result->partial_result != 1) {
SET_ERR("Result is malformed for frame %d: partial_result %u must be 1"
@@ -2440,10 +2556,10 @@
InFlightRequest &request = mInFlightMap.editValueAt(idx);
ALOGVV("%s: got InFlightRequest requestId = %" PRId32
", frameNumber = %" PRId64 ", burstId = %" PRId32
- ", partialResultCount = %d",
+ ", partialResultCount = %d, hasCallback = %d",
__FUNCTION__, request.resultExtras.requestId,
request.resultExtras.frameNumber, request.resultExtras.burstId,
- result->partial_result);
+ result->partial_result, request.hasCallback);
// Always update the partial count to the latest one if it's not 0
// (buffers only). When framework aggregates adjacent partial results
// into one, the latest partial count will be used.
@@ -2452,39 +2568,21 @@
// Check if this result carries only partial metadata
if (mUsePartialResult && result->result != NULL) {
- if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- if (result->partial_result > mNumPartialResults || result->partial_result < 1) {
- SET_ERR("Result is malformed for frame %d: partial_result %u must be in"
- " the range of [1, %d] when metadata is included in the result",
- frameNumber, result->partial_result, mNumPartialResults);
- return;
- }
- isPartialResult = (result->partial_result < mNumPartialResults);
- if (isPartialResult) {
- request.collectedPartialResult.append(result->result);
- }
- } else {
- camera_metadata_ro_entry_t partialResultEntry;
- res = find_camera_metadata_ro_entry(result->result,
- ANDROID_QUIRKS_PARTIAL_RESULT, &partialResultEntry);
- if (res != NAME_NOT_FOUND &&
- partialResultEntry.count > 0 &&
- partialResultEntry.data.u8[0] ==
- ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL) {
- // A partial result. Flag this as such, and collect this
- // set of metadata into the in-flight entry.
- isPartialResult = true;
- request.collectedPartialResult.append(
- result->result);
- request.collectedPartialResult.erase(
- ANDROID_QUIRKS_PARTIAL_RESULT);
- }
+ if (result->partial_result > mNumPartialResults || result->partial_result < 1) {
+ SET_ERR("Result is malformed for frame %d: partial_result %u must be in"
+ " the range of [1, %d] when metadata is included in the result",
+ frameNumber, result->partial_result, mNumPartialResults);
+ return;
+ }
+ isPartialResult = (result->partial_result < mNumPartialResults);
+ if (isPartialResult) {
+ request.collectedPartialResult.append(result->result);
}
- if (isPartialResult) {
+ if (isPartialResult && request.hasCallback) {
// Send partial capture result
- sendPartialCaptureResult(result->result, request.resultExtras, frameNumber,
- request.aeTriggerCancelOverride);
+ sendPartialCaptureResult(result->result, request.resultExtras,
+ frameNumber);
}
}
@@ -2545,12 +2643,12 @@
if (shutterTimestamp == 0) {
request.pendingMetadata = result->result;
request.collectedPartialResult = collectedPartialResult;
- } else {
+ } else if (request.hasCallback) {
CameraMetadata metadata;
metadata = result->result;
sendCaptureResult(metadata, request.resultExtras,
- collectedPartialResult, frameNumber, hasInputBufferInRequest,
- request.aeTriggerCancelOverride);
+ collectedPartialResult, frameNumber,
+ hasInputBufferInRequest);
}
}
@@ -2635,8 +2733,8 @@
Camera3Stream::cast(msg.error_stream);
streamId = stream->getId();
}
- ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
- mId, __FUNCTION__, msg.frame_number,
+ ALOGV("Camera %s: %s: HAL error, frame %d, stream %d: %d",
+ mId.string(), __FUNCTION__, msg.frame_number,
streamId, msg.error_code);
CaptureResultExtras resultExtras;
@@ -2655,10 +2753,17 @@
InFlightRequest &r = mInFlightMap.editValueAt(idx);
r.requestStatus = msg.error_code;
resultExtras = r.resultExtras;
+ if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT ==
+ errorCode) {
+ // In case of missing result check whether the buffers
+ // returned. If they returned, then remove inflight
+ // request.
+ removeInFlightRequestIfReadyLocked(idx);
+ }
} else {
resultExtras.frameNumber = msg.frame_number;
- ALOGE("Camera %d: %s: cannot find in-flight request on "
- "frame %" PRId64 " error", mId, __FUNCTION__,
+ ALOGE("Camera %s: %s: cannot find in-flight request on "
+ "frame %" PRId64 " error", mId.string(), __FUNCTION__,
resultExtras.frameNumber);
}
}
@@ -2666,7 +2771,7 @@
if (listener != NULL) {
listener->notifyError(errorCode, resultExtras);
} else {
- ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
+ ALOGE("Camera %s: %s: no listener available", mId.string(), __FUNCTION__);
}
break;
default:
@@ -2711,20 +2816,20 @@
}
}
- ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
- mId, __FUNCTION__,
- msg.frame_number, r.resultExtras.requestId, msg.timestamp);
- // Call listener, if any
- if (listener != NULL) {
- listener->notifyShutter(r.resultExtras, msg.timestamp);
- }
-
r.shutterTimestamp = msg.timestamp;
-
- // send pending result and buffers
- sendCaptureResult(r.pendingMetadata, r.resultExtras,
- r.collectedPartialResult, msg.frame_number,
- r.hasInputBuffer, r.aeTriggerCancelOverride);
+ if (r.hasCallback) {
+ ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
+ mId.string(), __FUNCTION__,
+ msg.frame_number, r.resultExtras.requestId, msg.timestamp);
+ // Call listener, if any
+ if (listener != NULL) {
+ listener->notifyShutter(r.resultExtras, msg.timestamp);
+ }
+ // send pending result and buffers
+ sendCaptureResult(r.pendingMetadata, r.resultExtras,
+ r.collectedPartialResult, msg.frame_number,
+ r.hasInputBuffer);
+ }
returnOutputBuffers(r.pendingOutputBuffers.array(),
r.pendingOutputBuffers.size(), r.shutterTimestamp);
r.pendingOutputBuffers.clear();
@@ -2758,17 +2863,544 @@
}
/**
+ * HalInterface inner class methods
+ */
+
+Camera3Device::HalInterface::HalInterface(
+ sp<ICameraDeviceSession> &session,
+ std::shared_ptr<RequestMetadataQueue> queue) :
+ mHal3Device(nullptr),
+ mHidlSession(session),
+ mRequestMetadataQueue(queue) {}
+
+Camera3Device::HalInterface::HalInterface() :
+ mHal3Device(nullptr) {}
+
+Camera3Device::HalInterface::HalInterface(const HalInterface& other) :
+ mHal3Device(other.mHal3Device),
+ mHidlSession(other.mHidlSession),
+ mRequestMetadataQueue(other.mRequestMetadataQueue) {}
+
+bool Camera3Device::HalInterface::valid() {
+ return (mHal3Device != nullptr) || (mHidlSession != nullptr);
+}
+
+void Camera3Device::HalInterface::clear() {
+ mHal3Device = nullptr;
+ mHidlSession.clear();
+}
+
+bool Camera3Device::HalInterface::supportBatchRequest() {
+ return mHidlSession != nullptr;
+}
+
+status_t Camera3Device::HalInterface::constructDefaultRequestSettings(
+ camera3_request_template_t templateId,
+ /*out*/ camera_metadata_t **requestTemplate) {
+ ATRACE_NAME("CameraHal::constructDefaultRequestSettings");
+ if (!valid()) return INVALID_OPERATION;
+ status_t res = OK;
+
+ if (mHal3Device != nullptr) {
+ const camera_metadata *r;
+ r = mHal3Device->ops->construct_default_request_settings(
+ mHal3Device, templateId);
+ if (r == nullptr) return BAD_VALUE;
+ *requestTemplate = clone_camera_metadata(r);
+ if (requestTemplate == nullptr) {
+ ALOGE("%s: Unable to clone camera metadata received from HAL",
+ __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ } else {
+ common::V1_0::Status status;
+ RequestTemplate id;
+ switch (templateId) {
+ case CAMERA3_TEMPLATE_PREVIEW:
+ id = RequestTemplate::PREVIEW;
+ break;
+ case CAMERA3_TEMPLATE_STILL_CAPTURE:
+ id = RequestTemplate::STILL_CAPTURE;
+ break;
+ case CAMERA3_TEMPLATE_VIDEO_RECORD:
+ id = RequestTemplate::VIDEO_RECORD;
+ break;
+ case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+ id = RequestTemplate::VIDEO_SNAPSHOT;
+ break;
+ case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+ id = RequestTemplate::ZERO_SHUTTER_LAG;
+ break;
+ case CAMERA3_TEMPLATE_MANUAL:
+ id = RequestTemplate::MANUAL;
+ break;
+ default:
+ // Unknown template ID
+ return BAD_VALUE;
+ }
+ auto err = mHidlSession->constructDefaultRequestSettings(id,
+ [&status, &requestTemplate]
+ (common::V1_0::Status s, const device::V3_2::CameraMetadata& request) {
+ status = s;
+ if (status == common::V1_0::Status::OK) {
+ const camera_metadata *r =
+ reinterpret_cast<const camera_metadata_t*>(request.data());
+ size_t expectedSize = request.size();
+ int ret = validate_camera_metadata_structure(r, &expectedSize);
+ if (ret == OK || ret == CAMERA_METADATA_VALIDATION_SHIFTED) {
+ *requestTemplate = clone_camera_metadata(r);
+ if (*requestTemplate == nullptr) {
+ ALOGE("%s: Unable to clone camera metadata received from HAL",
+ __FUNCTION__);
+ status = common::V1_0::Status::INTERNAL_ERROR;
+ }
+ } else {
+ ALOGE("%s: Malformed camera metadata received from HAL", __FUNCTION__);
+ status = common::V1_0::Status::INTERNAL_ERROR;
+ }
+ }
+ });
+ if (!err.isOk()) {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+ res = DEAD_OBJECT;
+ } else {
+ res = CameraProviderManager::mapToStatusT(status);
+ }
+ }
+ return res;
+}
+
+status_t Camera3Device::HalInterface::configureStreams(camera3_stream_configuration *config) {
+ ATRACE_NAME("CameraHal::configureStreams");
+ if (!valid()) return INVALID_OPERATION;
+ status_t res = OK;
+
+ if (mHal3Device != nullptr) {
+ res = mHal3Device->ops->configure_streams(mHal3Device, config);
+ } else {
+ // Convert stream config to HIDL
+ std::set<int> activeStreams;
+ StreamConfiguration requestedConfiguration;
+ requestedConfiguration.streams.resize(config->num_streams);
+ for (size_t i = 0; i < config->num_streams; i++) {
+ Stream &dst = requestedConfiguration.streams[i];
+ camera3_stream_t *src = config->streams[i];
+
+ Camera3Stream* cam3stream = Camera3Stream::cast(src);
+ cam3stream->setBufferFreedListener(this);
+ int streamId = cam3stream->getId();
+ StreamType streamType;
+ switch (src->stream_type) {
+ case CAMERA3_STREAM_OUTPUT:
+ streamType = StreamType::OUTPUT;
+ break;
+ case CAMERA3_STREAM_INPUT:
+ streamType = StreamType::INPUT;
+ break;
+ default:
+ ALOGE("%s: Stream %d: Unsupported stream type %d",
+ __FUNCTION__, streamId, config->streams[i]->stream_type);
+ return BAD_VALUE;
+ }
+ dst.id = streamId;
+ dst.streamType = streamType;
+ dst.width = src->width;
+ dst.height = src->height;
+ dst.format = mapToPixelFormat(src->format);
+ dst.usage = mapToConsumerUsage(src->usage);
+ dst.dataSpace = mapToHidlDataspace(src->data_space);
+ dst.rotation = mapToStreamRotation((camera3_stream_rotation_t) src->rotation);
+
+ activeStreams.insert(streamId);
+ // Create Buffer ID map if necessary
+ if (mBufferIdMaps.count(streamId) == 0) {
+ mBufferIdMaps.emplace(streamId, BufferIdMap{});
+ }
+ }
+ // remove BufferIdMap for deleted streams
+ for(auto it = mBufferIdMaps.begin(); it != mBufferIdMaps.end();) {
+ int streamId = it->first;
+ bool active = activeStreams.count(streamId) > 0;
+ if (!active) {
+ it = mBufferIdMaps.erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ res = mapToStreamConfigurationMode(
+ (camera3_stream_configuration_mode_t) config->operation_mode,
+ /*out*/ &requestedConfiguration.operationMode);
+ if (res != OK) {
+ return res;
+ }
+
+ // Invoke configureStreams
+
+ HalStreamConfiguration finalConfiguration;
+ common::V1_0::Status status;
+ auto err = mHidlSession->configureStreams(requestedConfiguration,
+ [&status, &finalConfiguration]
+ (common::V1_0::Status s, const HalStreamConfiguration& halConfiguration) {
+ finalConfiguration = halConfiguration;
+ status = s;
+ });
+ if (!err.isOk()) {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+ return DEAD_OBJECT;
+ }
+
+ if (status != common::V1_0::Status::OK ) {
+ return CameraProviderManager::mapToStatusT(status);
+ }
+
+ // And convert output stream configuration from HIDL
+
+ for (size_t i = 0; i < config->num_streams; i++) {
+ camera3_stream_t *dst = config->streams[i];
+ int streamId = Camera3Stream::cast(dst)->getId();
+
+ // Start scan at i, with the assumption that the stream order matches
+ size_t realIdx = i;
+ bool found = false;
+ for (size_t idx = 0; idx < finalConfiguration.streams.size(); idx++) {
+ if (finalConfiguration.streams[realIdx].id == streamId) {
+ found = true;
+ break;
+ }
+ realIdx = (realIdx >= finalConfiguration.streams.size()) ? 0 : realIdx + 1;
+ }
+ if (!found) {
+ ALOGE("%s: Stream %d not found in stream configuration response from HAL",
+ __FUNCTION__, streamId);
+ return INVALID_OPERATION;
+ }
+ HalStream &src = finalConfiguration.streams[realIdx];
+
+ int overrideFormat = mapToFrameworkFormat(src.overrideFormat);
+ if (dst->format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (dst->format != overrideFormat) {
+ ALOGE("%s: Stream %d: Format override not allowed for format 0x%x", __FUNCTION__,
+ streamId, dst->format);
+ }
+ } else {
+ // Override allowed with IMPLEMENTATION_DEFINED
+ dst->format = overrideFormat;
+ }
+
+ if (dst->stream_type == CAMERA3_STREAM_INPUT) {
+ if (src.producerUsage != 0) {
+ ALOGE("%s: Stream %d: INPUT streams must have 0 for producer usage",
+ __FUNCTION__, streamId);
+ return INVALID_OPERATION;
+ }
+ dst->usage = mapConsumerToFrameworkUsage(src.consumerUsage);
+ } else {
+ // OUTPUT
+ if (src.consumerUsage != 0) {
+ ALOGE("%s: Stream %d: OUTPUT streams must have 0 for consumer usage",
+ __FUNCTION__, streamId);
+ return INVALID_OPERATION;
+ }
+ dst->usage = mapProducerToFrameworkUsage(src.producerUsage);
+ }
+ dst->max_buffers = src.maxBuffers;
+ }
+ }
+ return res;
+}
+
+void Camera3Device::HalInterface::wrapAsHidlRequest(camera3_capture_request_t* request,
+ /*out*/device::V3_2::CaptureRequest* captureRequest,
+ /*out*/std::vector<native_handle_t*>* handlesCreated) {
+
+ if (captureRequest == nullptr || handlesCreated == nullptr) {
+ ALOGE("%s: captureRequest (%p) and handlesCreated (%p) must not be null",
+ __FUNCTION__, captureRequest, handlesCreated);
+ return;
+ }
+
+ captureRequest->frameNumber = request->frame_number;
+
+ captureRequest->fmqSettingsSize = 0;
+
+ {
+ std::lock_guard<std::mutex> lock(mInflightLock);
+ if (request->input_buffer != nullptr) {
+ int32_t streamId = Camera3Stream::cast(request->input_buffer->stream)->getId();
+ buffer_handle_t buf = *(request->input_buffer->buffer);
+ auto pair = getBufferId(buf, streamId);
+ bool isNewBuffer = pair.first;
+ uint64_t bufferId = pair.second;
+ captureRequest->inputBuffer.streamId = streamId;
+ captureRequest->inputBuffer.bufferId = bufferId;
+ captureRequest->inputBuffer.buffer = (isNewBuffer) ? buf : nullptr;
+ captureRequest->inputBuffer.status = BufferStatus::OK;
+ native_handle_t *acquireFence = nullptr;
+ if (request->input_buffer->acquire_fence != -1) {
+ acquireFence = native_handle_create(1,0);
+ acquireFence->data[0] = request->input_buffer->acquire_fence;
+ handlesCreated->push_back(acquireFence);
+ }
+ captureRequest->inputBuffer.acquireFence = acquireFence;
+ captureRequest->inputBuffer.releaseFence = nullptr;
+
+ pushInflightBufferLocked(captureRequest->frameNumber, streamId,
+ request->input_buffer->buffer,
+ request->input_buffer->acquire_fence);
+ } else {
+ captureRequest->inputBuffer.streamId = -1;
+ captureRequest->inputBuffer.bufferId = BUFFER_ID_NO_BUFFER;
+ }
+
+ captureRequest->outputBuffers.resize(request->num_output_buffers);
+ for (size_t i = 0; i < request->num_output_buffers; i++) {
+ const camera3_stream_buffer_t *src = request->output_buffers + i;
+ StreamBuffer &dst = captureRequest->outputBuffers[i];
+ int32_t streamId = Camera3Stream::cast(src->stream)->getId();
+ buffer_handle_t buf = *(src->buffer);
+ auto pair = getBufferId(buf, streamId);
+ bool isNewBuffer = pair.first;
+ dst.streamId = streamId;
+ dst.bufferId = pair.second;
+ dst.buffer = isNewBuffer ? buf : nullptr;
+ dst.status = BufferStatus::OK;
+ native_handle_t *acquireFence = nullptr;
+ if (src->acquire_fence != -1) {
+ acquireFence = native_handle_create(1,0);
+ acquireFence->data[0] = src->acquire_fence;
+ handlesCreated->push_back(acquireFence);
+ }
+ dst.acquireFence = acquireFence;
+ dst.releaseFence = nullptr;
+
+ pushInflightBufferLocked(captureRequest->frameNumber, streamId,
+ src->buffer, src->acquire_fence);
+ }
+ }
+}
+
+status_t Camera3Device::HalInterface::processBatchCaptureRequests(
+ std::vector<camera3_capture_request_t*>& requests,/*out*/uint32_t* numRequestProcessed) {
+ ATRACE_NAME("CameraHal::processBatchCaptureRequests");
+ if (!valid()) return INVALID_OPERATION;
+
+ hardware::hidl_vec<device::V3_2::CaptureRequest> captureRequests;
+ size_t batchSize = requests.size();
+ captureRequests.resize(batchSize);
+ std::vector<native_handle_t*> handlesCreated;
+
+ for (size_t i = 0; i < batchSize; i++) {
+ wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
+ }
+
+ std::vector<device::V3_2::BufferCache> cachesToRemove;
+ {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+ for (auto& pair : mFreedBuffers) {
+ // The stream might have been removed since onBufferFreed
+ if (mBufferIdMaps.find(pair.first) != mBufferIdMaps.end()) {
+ cachesToRemove.push_back({pair.first, pair.second});
+ }
+ }
+ mFreedBuffers.clear();
+ }
+
+ common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
+ *numRequestProcessed = 0;
+
+ // Write metadata to FMQ.
+ for (size_t i = 0; i < batchSize; i++) {
+ camera3_capture_request_t* request = requests[i];
+ device::V3_2::CaptureRequest* captureRequest = &captureRequests[i];
+
+ if (request->settings != nullptr) {
+ size_t settingsSize = get_camera_metadata_size(request->settings);
+ if (mRequestMetadataQueue != nullptr && mRequestMetadataQueue->write(
+ reinterpret_cast<const uint8_t*>(request->settings), settingsSize)) {
+ captureRequest->settings.resize(0);
+ captureRequest->fmqSettingsSize = settingsSize;
+ } else {
+ if (mRequestMetadataQueue != nullptr) {
+ ALOGW("%s: couldn't utilize fmq, fallback to hwbinder", __FUNCTION__);
+ }
+ captureRequest->settings.setToExternal(
+ reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(request->settings)),
+ get_camera_metadata_size(request->settings));
+ captureRequest->fmqSettingsSize = 0u;
+ }
+ } else {
+ // A null request settings maps to a size-0 CameraMetadata
+ captureRequest->settings.resize(0);
+ captureRequest->fmqSettingsSize = 0u;
+ }
+ }
+ auto err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
+ [&status, &numRequestProcessed] (auto s, uint32_t n) {
+ status = s;
+ *numRequestProcessed = n;
+ });
+ if (!err.isOk()) {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+ return DEAD_OBJECT;
+ }
+ if (status == common::V1_0::Status::OK && *numRequestProcessed != batchSize) {
+ ALOGE("%s: processCaptureRequest returns OK but processed %d/%zu requests",
+ __FUNCTION__, *numRequestProcessed, batchSize);
+ status = common::V1_0::Status::INTERNAL_ERROR;
+ }
+
+ for (auto& handle : handlesCreated) {
+ native_handle_delete(handle);
+ }
+ return CameraProviderManager::mapToStatusT(status);
+}
+
+status_t Camera3Device::HalInterface::processCaptureRequest(
+ camera3_capture_request_t *request) {
+ ATRACE_NAME("CameraHal::processCaptureRequest");
+ if (!valid()) return INVALID_OPERATION;
+ status_t res = OK;
+
+ if (mHal3Device != nullptr) {
+ res = mHal3Device->ops->process_capture_request(mHal3Device, request);
+ } else {
+ uint32_t numRequestProcessed = 0;
+ std::vector<camera3_capture_request_t*> requests(1);
+ requests[0] = request;
+ res = processBatchCaptureRequests(requests, &numRequestProcessed);
+ }
+ return res;
+}
+
+status_t Camera3Device::HalInterface::flush() {
+ ATRACE_NAME("CameraHal::flush");
+ if (!valid()) return INVALID_OPERATION;
+ status_t res = OK;
+
+ if (mHal3Device != nullptr) {
+ res = mHal3Device->ops->flush(mHal3Device);
+ } else {
+ auto err = mHidlSession->flush();
+ if (!err.isOk()) {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+ res = DEAD_OBJECT;
+ } else {
+ res = CameraProviderManager::mapToStatusT(err);
+ }
+ }
+ return res;
+}
+
+status_t Camera3Device::HalInterface::dump(int fd) {
+ ATRACE_NAME("CameraHal::dump");
+ if (!valid()) return INVALID_OPERATION;
+ status_t res = OK;
+
+ if (mHal3Device != nullptr) {
+ mHal3Device->ops->dump(mHal3Device, fd);
+ } else {
+ // Handled by CameraProviderManager::dump
+ }
+ return res;
+}
+
+status_t Camera3Device::HalInterface::close() {
+ ATRACE_NAME("CameraHal::close()");
+ if (!valid()) return INVALID_OPERATION;
+ status_t res = OK;
+
+ if (mHal3Device != nullptr) {
+ mHal3Device->common.close(&mHal3Device->common);
+ } else {
+ auto err = mHidlSession->close();
+ // Interface will be dead shortly anyway, so don't log errors
+ if (!err.isOk()) {
+ res = DEAD_OBJECT;
+ }
+ }
+ return res;
+}
+
+status_t Camera3Device::HalInterface::pushInflightBufferLocked(
+ int32_t frameNumber, int32_t streamId, buffer_handle_t *buffer, int acquireFence) {
+ uint64_t key = static_cast<uint64_t>(frameNumber) << 32 | static_cast<uint64_t>(streamId);
+ auto pair = std::make_pair(buffer, acquireFence);
+ mInflightBufferMap[key] = pair;
+ return OK;
+}
+
+status_t Camera3Device::HalInterface::popInflightBuffer(
+ int32_t frameNumber, int32_t streamId,
+ /*out*/ buffer_handle_t **buffer) {
+ std::lock_guard<std::mutex> lock(mInflightLock);
+
+ uint64_t key = static_cast<uint64_t>(frameNumber) << 32 | static_cast<uint64_t>(streamId);
+ auto it = mInflightBufferMap.find(key);
+ if (it == mInflightBufferMap.end()) return NAME_NOT_FOUND;
+ auto pair = it->second;
+ *buffer = pair.first;
+ int acquireFence = pair.second;
+ if (acquireFence > 0) {
+ ::close(acquireFence);
+ }
+ mInflightBufferMap.erase(it);
+ return OK;
+}
+
+std::pair<bool, uint64_t> Camera3Device::HalInterface::getBufferId(
+ const buffer_handle_t& buf, int streamId) {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+
+ BufferIdMap& bIdMap = mBufferIdMaps.at(streamId);
+ auto it = bIdMap.find(buf);
+ if (it == bIdMap.end()) {
+ bIdMap[buf] = mNextBufferId++;
+ ALOGV("stream %d now have %zu buffer caches, buf %p",
+ streamId, bIdMap.size(), buf);
+ return std::make_pair(true, mNextBufferId - 1);
+ } else {
+ return std::make_pair(false, it->second);
+ }
+}
+
+void Camera3Device::HalInterface::onBufferFreed(
+ int streamId, const native_handle_t* handle) {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+ uint64_t bufferId = BUFFER_ID_NO_BUFFER;
+ auto mapIt = mBufferIdMaps.find(streamId);
+ if (mapIt == mBufferIdMaps.end()) {
+ // streamId might be from a deleted stream here
+ ALOGI("%s: stream %d has been removed",
+ __FUNCTION__, streamId);
+ return;
+ }
+ BufferIdMap& bIdMap = mapIt->second;
+ auto it = bIdMap.find(handle);
+ if (it == bIdMap.end()) {
+ ALOGW("%s: cannot find buffer %p in stream %d",
+ __FUNCTION__, handle, streamId);
+ return;
+ } else {
+ bufferId = it->second;
+ bIdMap.erase(it);
+ ALOGV("%s: stream %d now have %zu buffer caches after removing buf %p",
+ __FUNCTION__, streamId, bIdMap.size(), handle);
+ }
+ mFreedBuffers.push_back(std::make_pair(streamId, bufferId));
+}
+
+/**
* RequestThread inner class methods
*/
Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent,
sp<StatusTracker> statusTracker,
- camera3_device_t *hal3Device,
- bool aeLockAvailable) :
+ HalInterface* interface) :
Thread(/*canCallJava*/false),
mParent(parent),
mStatusTracker(statusTracker),
- mHal3Device(hal3Device),
+ mInterface(interface),
mListener(nullptr),
mId(getId(parent)),
mReconfigured(false),
@@ -2780,11 +3412,13 @@
mCurrentPreCaptureTriggerId(0),
mRepeatingLastFrameNumber(
hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
- mAeLockAvailable(aeLockAvailable),
- mPrepareVideoStream(false) {
+ mPrepareVideoStream(false),
+ mRequestLatency(kRequestLatencyBinSize) {
mStatusId = statusTracker->addComponent();
}
+Camera3Device::RequestThread::~RequestThread() {}
+
void Camera3Device::RequestThread::setNotificationListener(
wp<NotificationListener> listener) {
Mutex::Autolock l(mRequestLock);
@@ -2839,10 +3473,11 @@
return OK;
}
-int Camera3Device::RequestThread::getId(const wp<Camera3Device> &device) {
+const String8& Camera3Device::RequestThread::getId(const wp<Camera3Device> &device) {
+ static String8 deadId("<DeadDevice>");
sp<Camera3Device> d = device.promote();
- if (d != NULL) return d->mId;
- return 0;
+ if (d != nullptr) return d->mId;
+ return deadId;
}
status_t Camera3Device::RequestThread::queueTriggerLocked(
@@ -2972,11 +3607,7 @@
ATRACE_CALL();
Mutex::Autolock l(mFlushLock);
- if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) {
- return mHal3Device->ops->flush(mHal3Device);
- }
-
- return -ENOTSUP;
+ return mInterface->flush();
}
void Camera3Device::RequestThread::setPaused(bool paused) {
@@ -3007,65 +3638,9 @@
// The exit from any possible waits
mDoPauseSignal.signal();
mRequestSignal.signal();
-}
-
-/**
- * For devices <= CAMERA_DEVICE_API_VERSION_3_2, AE_PRECAPTURE_TRIGGER_CANCEL is not supported so
- * we need to override AE_PRECAPTURE_TRIGGER_CANCEL to AE_PRECAPTURE_TRIGGER_IDLE and AE_LOCK_OFF
- * to AE_LOCK_ON to start cancelling AE precapture. If AE lock is not available, it still overrides
- * AE_PRECAPTURE_TRIGGER_CANCEL to AE_PRECAPTURE_TRIGGER_IDLE but doesn't add AE_LOCK_ON to the
- * request.
- */
-void Camera3Device::RequestThread::handleAePrecaptureCancelRequest(const sp<CaptureRequest>& request) {
- request->mAeTriggerCancelOverride.applyAeLock = false;
- request->mAeTriggerCancelOverride.applyAePrecaptureTrigger = false;
-
- if (mHal3Device->common.version > CAMERA_DEVICE_API_VERSION_3_2) {
- return;
- }
-
- camera_metadata_entry_t aePrecaptureTrigger =
- request->mSettings.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER);
- if (aePrecaptureTrigger.count > 0 &&
- aePrecaptureTrigger.data.u8[0] == ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL) {
- // Always override CANCEL to IDLE
- uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
- request->mSettings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &aePrecaptureTrigger, 1);
- request->mAeTriggerCancelOverride.applyAePrecaptureTrigger = true;
- request->mAeTriggerCancelOverride.aePrecaptureTrigger =
- ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL;
-
- if (mAeLockAvailable == true) {
- camera_metadata_entry_t aeLock = request->mSettings.find(ANDROID_CONTROL_AE_LOCK);
- if (aeLock.count == 0 || aeLock.data.u8[0] == ANDROID_CONTROL_AE_LOCK_OFF) {
- uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_ON;
- request->mSettings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
- request->mAeTriggerCancelOverride.applyAeLock = true;
- request->mAeTriggerCancelOverride.aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
- }
- }
- }
-}
-
-/**
- * Override result metadata for cancelling AE precapture trigger applied in
- * handleAePrecaptureCancelRequest().
- */
-void Camera3Device::overrideResultForPrecaptureCancel(
- CameraMetadata *result, const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
- if (aeTriggerCancelOverride.applyAeLock) {
- // Only devices <= v3.2 should have this override
- assert(mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_2);
- result->update(ANDROID_CONTROL_AE_LOCK, &aeTriggerCancelOverride.aeLock, 1);
- }
-
- if (aeTriggerCancelOverride.applyAePrecaptureTrigger) {
- // Only devices <= v3.2 should have this override
- assert(mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_2);
- result->update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
- &aeTriggerCancelOverride.aePrecaptureTrigger, 1);
- }
+ mRequestLatency.log("ProcessCaptureRequest latency histogram");
+ mRequestLatency.reset();
}
void Camera3Device::RequestThread::checkAndStopRepeatingRequest() {
@@ -3096,6 +3671,128 @@
}
}
+bool Camera3Device::RequestThread::sendRequestsBatch() {
+ status_t res;
+ size_t batchSize = mNextRequests.size();
+ std::vector<camera3_capture_request_t*> requests(batchSize);
+ uint32_t numRequestProcessed = 0;
+ for (size_t i = 0; i < batchSize; i++) {
+ requests[i] = &mNextRequests.editItemAt(i).halRequest;
+ }
+
+ ATRACE_ASYNC_BEGIN("batch frame capture", mNextRequests[0].halRequest.frame_number);
+ res = mInterface->processBatchCaptureRequests(requests, &numRequestProcessed);
+
+ bool triggerRemoveFailed = false;
+ NextRequest& triggerFailedRequest = mNextRequests.editItemAt(0);
+ for (size_t i = 0; i < numRequestProcessed; i++) {
+ NextRequest& nextRequest = mNextRequests.editItemAt(i);
+ nextRequest.submitted = true;
+
+
+ // Update the latest request sent to HAL
+ if (nextRequest.halRequest.settings != NULL) { // Don't update if they were unchanged
+ Mutex::Autolock al(mLatestRequestMutex);
+
+ camera_metadata_t* cloned = clone_camera_metadata(nextRequest.halRequest.settings);
+ mLatestRequest.acquire(cloned);
+
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent != NULL) {
+ parent->monitorMetadata(TagMonitor::REQUEST,
+ nextRequest.halRequest.frame_number,
+ 0, mLatestRequest);
+ }
+ }
+
+ if (nextRequest.halRequest.settings != NULL) {
+ nextRequest.captureRequest->mSettings.unlock(nextRequest.halRequest.settings);
+ }
+
+ if (!triggerRemoveFailed) {
+ // Remove any previously queued triggers (after unlock)
+ status_t removeTriggerRes = removeTriggers(mPrevRequest);
+ if (removeTriggerRes != OK) {
+ triggerRemoveFailed = true;
+ triggerFailedRequest = nextRequest;
+ }
+ }
+ }
+
+ if (triggerRemoveFailed) {
+ SET_ERR("RequestThread: Unable to remove triggers "
+ "(capture request %d, HAL device: %s (%d)",
+ triggerFailedRequest.halRequest.frame_number, strerror(-res), res);
+ cleanUpFailedRequests(/*sendRequestError*/ false);
+ return false;
+ }
+
+ if (res != OK) {
+ // Should only get a failure here for malformed requests or device-level
+ // errors, so consider all errors fatal. Bad metadata failures should
+ // come through notify.
+ SET_ERR("RequestThread: Unable to submit capture request %d to HAL device: %s (%d)",
+ mNextRequests[numRequestProcessed].halRequest.frame_number,
+ strerror(-res), res);
+ cleanUpFailedRequests(/*sendRequestError*/ false);
+ return false;
+ }
+ return true;
+}
+
+bool Camera3Device::RequestThread::sendRequestsOneByOne() {
+ status_t res;
+
+ for (auto& nextRequest : mNextRequests) {
+ // Submit request and block until ready for next one
+ ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number);
+ res = mInterface->processCaptureRequest(&nextRequest.halRequest);
+
+ if (res != OK) {
+ // Should only get a failure here for malformed requests or device-level
+ // errors, so consider all errors fatal. Bad metadata failures should
+ // come through notify.
+ SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
+ " device: %s (%d)", nextRequest.halRequest.frame_number, strerror(-res),
+ res);
+ cleanUpFailedRequests(/*sendRequestError*/ false);
+ return false;
+ }
+
+ // Mark that the request has be submitted successfully.
+ nextRequest.submitted = true;
+
+ // Update the latest request sent to HAL
+ if (nextRequest.halRequest.settings != NULL) { // Don't update if they were unchanged
+ Mutex::Autolock al(mLatestRequestMutex);
+
+ camera_metadata_t* cloned = clone_camera_metadata(nextRequest.halRequest.settings);
+ mLatestRequest.acquire(cloned);
+
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent != NULL) {
+ parent->monitorMetadata(TagMonitor::REQUEST, nextRequest.halRequest.frame_number,
+ 0, mLatestRequest);
+ }
+ }
+
+ if (nextRequest.halRequest.settings != NULL) {
+ nextRequest.captureRequest->mSettings.unlock(nextRequest.halRequest.settings);
+ }
+
+ // Remove any previously queued triggers (after unlock)
+ res = removeTriggers(mPrevRequest);
+ if (res != OK) {
+ SET_ERR("RequestThread: Unable to remove triggers "
+ "(capture request %d, HAL device: %s (%d)",
+ nextRequest.halRequest.frame_number, strerror(-res), res);
+ cleanUpFailedRequests(/*sendRequestError*/ false);
+ return false;
+ }
+ }
+ return true;
+}
+
bool Camera3Device::RequestThread::threadLoop() {
ATRACE_CALL();
status_t res;
@@ -3158,61 +3855,16 @@
ALOGVV("%s: %d: submitting %zu requests in a batch.", __FUNCTION__, __LINE__,
mNextRequests.size());
- for (auto& nextRequest : mNextRequests) {
- // Submit request and block until ready for next one
- ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number);
- ATRACE_BEGIN("camera3->process_capture_request");
- res = mHal3Device->ops->process_capture_request(mHal3Device, &nextRequest.halRequest);
- ATRACE_END();
- if (res != OK) {
- // Should only get a failure here for malformed requests or device-level
- // errors, so consider all errors fatal. Bad metadata failures should
- // come through notify.
- SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
- " device: %s (%d)", nextRequest.halRequest.frame_number, strerror(-res),
- res);
- cleanUpFailedRequests(/*sendRequestError*/ false);
- if (useFlushLock) {
- mFlushLock.unlock();
- }
- return false;
- }
-
- // Mark that the request has be submitted successfully.
- nextRequest.submitted = true;
-
- // Update the latest request sent to HAL
- if (nextRequest.halRequest.settings != NULL) { // Don't update if they were unchanged
- Mutex::Autolock al(mLatestRequestMutex);
-
- camera_metadata_t* cloned = clone_camera_metadata(nextRequest.halRequest.settings);
- mLatestRequest.acquire(cloned);
-
- sp<Camera3Device> parent = mParent.promote();
- if (parent != NULL) {
- parent->monitorMetadata(TagMonitor::REQUEST, nextRequest.halRequest.frame_number,
- 0, mLatestRequest);
- }
- }
-
- if (nextRequest.halRequest.settings != NULL) {
- nextRequest.captureRequest->mSettings.unlock(nextRequest.halRequest.settings);
- }
-
- // Remove any previously queued triggers (after unlock)
- res = removeTriggers(mPrevRequest);
- if (res != OK) {
- SET_ERR("RequestThread: Unable to remove triggers "
- "(capture request %d, HAL device: %s (%d)",
- nextRequest.halRequest.frame_number, strerror(-res), res);
- cleanUpFailedRequests(/*sendRequestError*/ false);
- if (useFlushLock) {
- mFlushLock.unlock();
- }
- return false;
- }
+ bool submitRequestSuccess = false;
+ nsecs_t tRequestStart = systemTime(SYSTEM_TIME_MONOTONIC);
+ if (mInterface->supportBatchRequest()) {
+ submitRequestSuccess = sendRequestsBatch();
+ } else {
+ submitRequestSuccess = sendRequestsOneByOne();
}
+ nsecs_t tRequestEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+ mRequestLatency.add(tRequestStart, tRequestEnd);
if (useFlushLock) {
mFlushLock.unlock();
@@ -3224,13 +3876,14 @@
mNextRequests.clear();
}
- return true;
+ return submitRequestSuccess;
}
status_t Camera3Device::RequestThread::prepareHalRequests() {
ATRACE_CALL();
- for (auto& nextRequest : mNextRequests) {
+ for (size_t i = 0; i < mNextRequests.size(); i++) {
+ auto& nextRequest = mNextRequests.editItemAt(i);
sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
camera3_capture_request_t* halRequest = &nextRequest.halRequest;
Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
@@ -3307,8 +3960,8 @@
outputBuffers->insertAt(camera3_stream_buffer_t(), 0,
captureRequest->mOutputStreams.size());
halRequest->output_buffers = outputBuffers->array();
- for (size_t i = 0; i < captureRequest->mOutputStreams.size(); i++) {
- sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(i);
+ for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
+ sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(j);
// Prepare video buffers for high speed recording on the first video request.
if (mPrepareVideoStream && outputStream->isVideoStream()) {
@@ -3326,7 +3979,8 @@
}
}
- res = outputStream->getBuffer(&outputBuffers->editItemAt(i));
+ res = outputStream->getBuffer(&outputBuffers->editItemAt(j),
+ captureRequest->mOutputSurfaces[j]);
if (res != OK) {
// Can't get output buffer from gralloc queue - this could be due to
// abandoned queue or other consumer misbehavior, so not a fatal
@@ -3337,6 +3991,7 @@
return TIMED_OUT;
}
halRequest->num_output_buffers++;
+
}
totalNumBuffers += halRequest->num_output_buffers;
@@ -3347,10 +4002,18 @@
CLOGE("RequestThread: Parent is gone");
return INVALID_OPERATION;
}
+
+ // If this request list is for constrained high speed recording (not
+ // preview), and the current request is not the last one in the batch,
+ // do not send callback to the app.
+ bool hasCallback = true;
+ if (mNextRequests[0].captureRequest->mBatchSize > 1 && i != mNextRequests.size()-1) {
+ hasCallback = false;
+ }
res = parent->registerInFlight(halRequest->frame_number,
totalNumBuffers, captureRequest->mResultExtras,
/*hasInput*/halRequest->input_buffer != NULL,
- captureRequest->mAeTriggerCancelOverride);
+ hasCallback);
ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
", burstId = %" PRId32 ".",
__FUNCTION__,
@@ -3429,6 +4092,13 @@
}
for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
+ //Buffers that failed processing could still have
+ //valid acquire fence.
+ int acquireFence = (*outputBuffers)[i].acquire_fence;
+ if (0 <= acquireFence) {
+ close(acquireFence);
+ outputBuffers->editItemAt(i).acquire_fence = -1;
+ }
outputBuffers->editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0);
}
@@ -3553,6 +4223,12 @@
mRequestQueue.begin();
nextRequest = *firstRequest;
mRequestQueue.erase(firstRequest);
+ if (mRequestQueue.empty() && !nextRequest->mRepeating) {
+ sp<NotificationListener> listener = mListener.promote();
+ if (listener != NULL) {
+ listener->notifyRequestQueueEmpty();
+ }
+ }
}
// In case we've been unpaused by setPaused clearing mDoPause, need to
@@ -3602,8 +4278,6 @@
}
}
- handleAePrecaptureCancelRequest(nextRequest);
-
return nextRequest;
}
@@ -3689,9 +4363,7 @@
request->mResultExtras.afTriggerId = triggerId;
mCurrentAfTriggerId = triggerId;
}
- if (parent->mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
- continue; // Trigger ID tag is deprecated since device HAL 3.2
- }
+ continue;
}
camera_metadata_entry entry = metadata.find(tag);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 87c43f3..bfb58c6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -17,6 +17,9 @@
#ifndef ANDROID_SERVERS_CAMERA3DEVICE_H
#define ANDROID_SERVERS_CAMERA3DEVICE_H
+#include <utility>
+#include <unordered_map>
+
#include <utils/Condition.h>
#include <utils/Errors.h>
#include <utils/List.h>
@@ -24,13 +27,21 @@
#include <utils/Thread.h>
#include <utils/KeyedVector.h>
#include <utils/Timers.h>
+
+#include <android/hardware/camera/device/3.2/ICameraDevice.h>
+#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
+#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
+#include <fmq/MessageQueue.h>
#include <hardware/camera3.h>
+
#include <camera/CaptureResult.h>
#include "common/CameraDeviceBase.h"
#include "device3/StatusTracker.h"
#include "device3/Camera3BufferManager.h"
#include "utils/TagMonitor.h"
+#include "utils/LatencyHistogram.h"
+#include <camera_metadata_hidden.h>
/**
* Function pointer types with C calling convention to
@@ -55,17 +66,18 @@
class Camera3OutputStreamInterface;
class Camera3StreamInterface;
-}
+} // namespace camera3
/**
* CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 or higher.
*/
class Camera3Device :
public CameraDeviceBase,
+ virtual public hardware::camera::device::V3_2::ICameraDeviceCallback,
private camera3_callback_ops {
public:
- explicit Camera3Device(int id);
+ explicit Camera3Device(const String8& id);
virtual ~Camera3Device();
@@ -73,91 +85,88 @@
* CameraDeviceBase interface
*/
- virtual int getId() const;
+ const String8& getId() const override;
// Transitions to idle state on success.
- virtual status_t initialize(CameraModule *module);
- virtual status_t disconnect();
- virtual status_t dump(int fd, const Vector<String16> &args);
- virtual const CameraMetadata& info() const;
+ status_t initialize(sp<CameraProviderManager> manager) override;
+ status_t disconnect() override;
+ status_t dump(int fd, const Vector<String16> &args) override;
+ const CameraMetadata& info() const override;
// Capture and setStreamingRequest will configure streams if currently in
// idle state
- virtual status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL);
- virtual status_t captureList(const List<const CameraMetadata> &requests,
- int64_t *lastFrameNumber = NULL);
- virtual status_t setStreamingRequest(const CameraMetadata &request,
- int64_t *lastFrameNumber = NULL);
- virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
- int64_t *lastFrameNumber = NULL);
- virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL);
+ status_t capture(CameraMetadata &request, int64_t *lastFrameNumber = NULL) override;
+ status_t captureList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ int64_t *lastFrameNumber = NULL) override;
+ status_t setStreamingRequest(const CameraMetadata &request,
+ int64_t *lastFrameNumber = NULL) override;
+ status_t setStreamingRequestList(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ int64_t *lastFrameNumber = NULL) override;
+ status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL) override;
- virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
+ status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout) override;
// Actual stream creation/deletion is delayed until first request is submitted
// If adding streams while actively capturing, will pause device before adding
// stream, reconfiguring device, and unpausing. If the client create a stream
- // with nullptr consumer surface, the client must then call setConsumer()
+ // with nullptr consumer surface, the client must then call setConsumers()
// and finish the stream configuration before starting output streaming.
- virtual status_t createStream(sp<Surface> consumer,
+ status_t createStream(sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
- uint32_t consumerUsage = 0);
- virtual status_t createInputStream(
+ bool isShared = false, uint32_t consumerUsage = 0) override;
+ status_t createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
+ int streamSetId = camera3::CAMERA3_STREAM_SET_ID_INVALID,
+ bool isShared = false, uint32_t consumerUsage = 0) override;
+
+ status_t createInputStream(
uint32_t width, uint32_t height, int format,
- int *id);
- virtual status_t createZslStream(
- uint32_t width, uint32_t height,
- int depth,
- /*out*/
- int *id,
- sp<camera3::Camera3ZslStream>* zslStream);
- virtual status_t createReprocessStreamFromStream(int outputId, int *id);
+ int *id) override;
- virtual status_t getStreamInfo(int id,
+ status_t getStreamInfo(int id,
uint32_t *width, uint32_t *height,
- uint32_t *format, android_dataspace *dataSpace);
- virtual status_t setStreamTransform(int id, int transform);
+ uint32_t *format, android_dataspace *dataSpace) override;
+ status_t setStreamTransform(int id, int transform) override;
- virtual status_t deleteStream(int id);
- virtual status_t deleteReprocessStream(int id);
+ status_t deleteStream(int id) override;
- virtual status_t configureStreams(bool isConstraiedHighSpeed = false);
- virtual status_t getInputBufferProducer(
- sp<IGraphicBufferProducer> *producer);
+ status_t configureStreams(int operatingMode =
+ static_cast<int>(hardware::camera::device::V3_2::StreamConfigurationMode::NORMAL_MODE))
+ override;
+ status_t getInputBufferProducer(
+ sp<IGraphicBufferProducer> *producer) override;
- virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
+ status_t createDefaultRequest(int templateId, CameraMetadata *request) override;
// Transitions to the idle state on success
- virtual status_t waitUntilDrained();
+ status_t waitUntilDrained() override;
- virtual status_t setNotifyCallback(wp<NotificationListener> listener);
- virtual bool willNotify3A();
- virtual status_t waitForNextFrame(nsecs_t timeout);
- virtual status_t getNextResult(CaptureResult *frame);
+ status_t setNotifyCallback(wp<NotificationListener> listener) override;
+ bool willNotify3A() override;
+ status_t waitForNextFrame(nsecs_t timeout) override;
+ status_t getNextResult(CaptureResult *frame) override;
- virtual status_t triggerAutofocus(uint32_t id);
- virtual status_t triggerCancelAutofocus(uint32_t id);
- virtual status_t triggerPrecaptureMetering(uint32_t id);
+ status_t triggerAutofocus(uint32_t id) override;
+ status_t triggerCancelAutofocus(uint32_t id) override;
+ status_t triggerPrecaptureMetering(uint32_t id) override;
- virtual status_t pushReprocessBuffer(int reprocessStreamId,
- buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
+ status_t flush(int64_t *lastFrameNumber = NULL) override;
- virtual status_t flush(int64_t *lastFrameNumber = NULL);
+ status_t prepare(int streamId) override;
- virtual status_t prepare(int streamId);
+ status_t tearDown(int streamId) override;
- virtual status_t tearDown(int streamId);
+ status_t addBufferListenerForStream(int streamId,
+ wp<camera3::Camera3StreamBufferListener> listener) override;
- virtual status_t addBufferListenerForStream(int streamId,
- wp<camera3::Camera3StreamBufferListener> listener);
+ status_t prepare(int maxCount, int streamId) override;
- virtual status_t prepare(int maxCount, int streamId);
-
- virtual uint32_t getDeviceVersion();
-
- virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
+ ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const override;
ssize_t getPointCloudBufferSize() const;
ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height) const;
@@ -165,12 +174,17 @@
void notifyStatus(bool idle); // updates from StatusTracker
/**
- * Set the deferred consumer surface to the output stream and finish the deferred
+ * Set the deferred consumer surfaces to the output stream and finish the deferred
* consumer configuration.
*/
- virtual status_t setConsumerSurface(int streamId, sp<Surface> consumer);
+ status_t setConsumerSurfaces(int streamId, const std::vector<sp<Surface>>& consumers) override;
private:
+
+ // internal typedefs
+ using RequestMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
+ using ResultMetadataQueue = hardware::MessageQueue<uint8_t, hardware::kSynchronizedReadWrite>;
+
static const size_t kDumpLockAttempts = 10;
static const size_t kDumpSleepDuration = 100000; // 0.10 sec
static const nsecs_t kShutdownTimeout = 5000000000; // 5 sec
@@ -198,25 +212,133 @@
Mutex mLock;
// Camera device ID
- const int mId;
+ const String8 mId;
+
+ // Current stream configuration mode;
+ int mOperatingMode;
+ // Constant to use for no set operating mode
+ static const int NO_MODE = -1;
// Flag indicating is the current active stream configuration is constrained high speed.
bool mIsConstrainedHighSpeedConfiguration;
+ // FMQ to write result on. Must be guarded by mProcessCaptureResultLock.
+ std::unique_ptr<ResultMetadataQueue> mResultMetadataQueue;
+
/**** Scope for mLock ****/
- camera3_device_t *mHal3Device;
+ /**
+ * Adapter for legacy HAL / HIDL HAL interface calls; calls either into legacy HALv3 or the
+ * HIDL HALv3 interfaces.
+ */
+ class HalInterface : public camera3::Camera3StreamBufferFreedListener {
+ public:
+ HalInterface(sp<hardware::camera::device::V3_2::ICameraDeviceSession> &session,
+ std::shared_ptr<RequestMetadataQueue> queue);
+ HalInterface(const HalInterface &other);
+ HalInterface();
+
+ // Returns true if constructed with a valid device or session, and not yet cleared
+ bool valid();
+
+ // Reset this HalInterface object (does not call close())
+ void clear();
+
+ // Check if HalInterface support sending requests in batch
+ bool supportBatchRequest();
+
+ // Calls into the HAL interface
+
+ // Caller takes ownership of requestTemplate
+ status_t constructDefaultRequestSettings(camera3_request_template_t templateId,
+ /*out*/ camera_metadata_t **requestTemplate);
+ status_t configureStreams(/*inout*/ camera3_stream_configuration *config);
+ status_t processCaptureRequest(camera3_capture_request_t *request);
+ status_t processBatchCaptureRequests(
+ std::vector<camera3_capture_request_t*>& requests,
+ /*out*/uint32_t* numRequestProcessed);
+ status_t flush();
+ status_t dump(int fd);
+ status_t close();
+
+ // Find a buffer_handle_t based on frame number and stream ID
+ status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
+ /*out*/ buffer_handle_t **buffer);
+
+ private:
+ camera3_device_t *mHal3Device;
+ sp<hardware::camera::device::V3_2::ICameraDeviceSession> mHidlSession;
+ std::shared_ptr<RequestMetadataQueue> mRequestMetadataQueue;
+
+ std::mutex mInflightLock;
+
+ // The output HIDL request still depends on input camera3_capture_request_t
+ // Do not free input camera3_capture_request_t before output HIDL request
+ void wrapAsHidlRequest(camera3_capture_request_t* in,
+ /*out*/hardware::camera::device::V3_2::CaptureRequest* out,
+ /*out*/std::vector<native_handle_t*>* handlesCreated);
+
+ status_t pushInflightBufferLocked(int32_t frameNumber, int32_t streamId,
+ buffer_handle_t *buffer, int acquireFence);
+ // Cache of buffer handles keyed off (frameNumber << 32 | streamId)
+ // value is a pair of (buffer_handle_t*, acquire_fence FD)
+ std::unordered_map<uint64_t, std::pair<buffer_handle_t*, int>> mInflightBufferMap;
+
+ struct BufferHasher {
+ size_t operator()(const buffer_handle_t& buf) const {
+ if (buf == nullptr)
+ return 0;
+
+ size_t result = 1;
+ result = 31 * result + buf->numFds;
+ for (int i = 0; i < buf->numFds; i++) {
+ result = 31 * result + buf->data[i];
+ }
+ return result;
+ }
+ };
+
+ struct BufferComparator {
+ bool operator()(const buffer_handle_t& buf1, const buffer_handle_t& buf2) const {
+ if (buf1->numFds == buf2->numFds) {
+ for (int i = 0; i < buf1->numFds; i++) {
+ if (buf1->data[i] != buf2->data[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+ };
+
+ std::mutex mBufferIdMapLock; // protecting mBufferIdMaps and mNextBufferId
+ typedef std::unordered_map<const buffer_handle_t, uint64_t,
+ BufferHasher, BufferComparator> BufferIdMap;
+ // stream ID -> per stream buffer ID map
+ std::unordered_map<int, BufferIdMap> mBufferIdMaps;
+ uint64_t mNextBufferId = 1; // 0 means no buffer
+ static const uint64_t BUFFER_ID_NO_BUFFER = 0;
+
+ // method to extract buffer's unique ID
+ // TODO: we should switch to use gralloc mapper's getBackingStore API
+ // once we ran in binderized gralloc mode, but before that is ready,
+ // we need to rely on the conventional buffer queue behavior where
+ // buffer_handle_t's FD won't change.
+ // return pair of (newlySeenBuffer?, bufferId)
+ std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId);
+
+ virtual void onBufferFreed(int streamId, const native_handle_t* handle) override;
+
+ std::vector<std::pair<int, uint64_t>> mFreedBuffers;
+ };
+
+ std::unique_ptr<HalInterface> mInterface;
CameraMetadata mDeviceInfo;
CameraMetadata mRequestTemplateCache[CAMERA3_TEMPLATE_COUNT];
- uint32_t mDeviceVersion;
-
- // whether Camera3Device should derive ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST for
- // backward compatibility. Should not be changed after initialization.
- bool mDerivePostRawSensKey = false;
-
struct Size {
uint32_t width;
uint32_t height;
@@ -274,13 +396,6 @@
// words, camera device shouldn't be open during CPU suspend.
nsecs_t mTimestampOffset;
- typedef struct AeTriggerCancelOverride {
- bool applyAeLock;
- uint8_t aeLock;
- bool applyAePrecaptureTrigger;
- uint8_t aePrecaptureTrigger;
- } AeTriggerCancelOverride_t;
-
class CaptureRequest : public LightRefBase<CaptureRequest> {
public:
CameraMetadata mSettings;
@@ -288,15 +403,15 @@
camera3_stream_buffer_t mInputBuffer;
Vector<sp<camera3::Camera3OutputStreamInterface> >
mOutputStreams;
+ SurfaceMap mOutputSurfaces;
CaptureResultExtras mResultExtras;
- // Used to cancel AE precapture trigger for devices doesn't support
- // CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
- AeTriggerCancelOverride_t mAeTriggerCancelOverride;
// The number of requests that should be submitted to HAL at a time.
// For example, if batch size is 8, this request and the following 7
// requests will be submitted to HAL at a time. The batch size for
// the following 7 requests will be ignored by the request thread.
int mBatchSize;
+ // Whether this request is from a repeating or repeating burst.
+ bool mRepeating;
};
typedef List<sp<CaptureRequest> > RequestList;
@@ -304,16 +419,52 @@
status_t convertMetadataListToRequestListLocked(
const List<const CameraMetadata> &metadataList,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
/*out*/
RequestList *requestList);
- status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating,
+ void convertToRequestList(List<const CameraMetadata>& requests,
+ std::list<const SurfaceMap>& surfaceMaps,
+ const CameraMetadata& request);
+
+ status_t submitRequestsHelper(const List<const CameraMetadata> &requests,
+ const std::list<const SurfaceMap> &surfaceMaps,
+ bool repeating,
int64_t *lastFrameNumber = NULL);
+
+ /**
+ * Implementation of android::hardware::camera::device::V3_2::ICameraDeviceCallback
+ */
+
+ hardware::Return<void> processCaptureResult(
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_2::CaptureResult>& results) override;
+ hardware::Return<void> notify(
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_2::NotifyMsg>& msgs) override;
+
+ // Handle one capture result. Assume that mProcessCaptureResultLock is held.
+ void processOneCaptureResultLocked(
+ const hardware::camera::device::V3_2::CaptureResult& results);
+ // Handle one notify message
+ void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
+
+ // lock to ensure only one processCaptureResult is called at a time.
+ Mutex mProcessCaptureResultLock;
+
+ /**
+ * Common initialization code shared by both HAL paths
+ *
+ * Must be called with mLock and mInterfaceLock held.
+ */
+ status_t initializeCommonLocked();
+
/**
* Get the last request submitted to the hal by the request thread.
*
- * Takes mLock.
+ * Must be called with mLock held.
*/
virtual CameraMetadata getLatestRequestLocked();
@@ -362,19 +513,21 @@
* Do common work for setting up a streaming or single capture request.
* On success, will transition to ACTIVE if in IDLE.
*/
- sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request);
+ sp<CaptureRequest> setUpRequestLocked(const CameraMetadata &request,
+ const SurfaceMap &surfaceMap);
/**
* Build a CaptureRequest request from the CameraDeviceBase request
* settings.
*/
- sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request);
+ sp<CaptureRequest> createCaptureRequest(const CameraMetadata &request,
+ const SurfaceMap &surfaceMap);
/**
* Take the currently-defined set of streams and configure the HAL to use
* them. This is a long-running operation (may be several hundered ms).
*/
- status_t configureStreamsLocked();
+ status_t configureStreamsLocked(int operatingMode);
/**
* Cancel stream configuration that did not finish successfully.
@@ -427,9 +580,23 @@
static nsecs_t getMonoToBoottimeOffset();
/**
- * Helper function to map between legacy and new dataspace enums
+ * Helper functions to map between framework and HIDL values
*/
- static android_dataspace mapToLegacyDataspace(android_dataspace dataSpace);
+ static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
+ static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
+ android_dataspace dataSpace);
+ static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint32_t usage);
+ static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
+ camera3_stream_rotation_t rotation);
+ // Returns a negative error code if the passed-in operation mode is not valid.
+ static status_t mapToStreamConfigurationMode(camera3_stream_configuration_mode_t operationMode,
+ /*out*/ hardware::camera::device::V3_2::StreamConfigurationMode *mode);
+ static camera3_buffer_status_t mapHidlBufferStatus(hardware::camera::device::V3_2::BufferStatus status);
+ static int mapToFrameworkFormat(hardware::graphics::common::V1_0::PixelFormat pixelFormat);
+ static uint32_t mapConsumerToFrameworkUsage(
+ hardware::camera::device::V3_2::BufferUsageFlags usage);
+ static uint32_t mapProducerToFrameworkUsage(
+ hardware::camera::device::V3_2::BufferUsageFlags usage);
struct RequestTrigger {
// Metadata tag number, e.g. android.control.aePrecaptureTrigger
@@ -457,8 +624,8 @@
RequestThread(wp<Camera3Device> parent,
sp<camera3::StatusTracker> statusTracker,
- camera3_device_t *hal3Device,
- bool aeLockAvailable);
+ HalInterface* interface);
+ ~RequestThread();
void setNotificationListener(wp<NotificationListener> listener);
@@ -533,12 +700,17 @@
*/
bool isStreamPending(sp<camera3::Camera3StreamInterface>& stream);
+ // dump processCaptureRequest latency
+ void dumpCaptureRequestLatency(int fd, const char* name) {
+ mRequestLatency.dump(fd, name);
+ }
+
protected:
virtual bool threadLoop();
private:
- static int getId(const wp<Camera3Device> &device);
+ static const String8& getId(const wp<Camera3Device> &device);
status_t queueTriggerLocked(RequestTrigger trigger);
// Mix-in queued triggers into this request
@@ -591,19 +763,22 @@
// If the input request is in mRepeatingRequests. Must be called with mRequestLock hold
bool isRepeatingRequestLocked(const sp<CaptureRequest>&);
- // Handle AE precapture trigger cancel for devices <= CAMERA_DEVICE_API_VERSION_3_2.
- void handleAePrecaptureCancelRequest(const sp<CaptureRequest>& request);
-
// Clear repeating requests. Must be called with mRequestLock held.
status_t clearRepeatingRequestsLocked(/*out*/ int64_t *lastFrameNumber = NULL);
+ // send request in mNextRequests to HAL one by one. Return true = sucssess
+ bool sendRequestsOneByOne();
+
+ // send request in mNextRequests to HAL in a batch. Return true = sucssess
+ bool sendRequestsBatch();
+
wp<Camera3Device> mParent;
wp<camera3::StatusTracker> mStatusTracker;
- camera3_device_t *mHal3Device;
+ HalInterface* mInterface;
wp<NotificationListener> mListener;
- const int mId; // The camera ID
+ const String8& mId; // The camera ID
int mStatusId; // The RequestThread's component ID for
// status tracking
@@ -649,11 +824,11 @@
int64_t mRepeatingLastFrameNumber;
- // Whether the device supports AE lock
- bool mAeLockAvailable;
-
// Flag indicating if we should prepare video stream for video requests.
bool mPrepareVideoStream;
+
+ static const int32_t kRequestLatencyBinSize = 40; // in ms
+ CameraLatencyHistogram mRequestLatency;
};
sp<RequestThread> mRequestThread;
@@ -692,9 +867,11 @@
// the shutter event.
Vector<camera3_stream_buffer_t> pendingOutputBuffers;
- // Used to cancel AE precapture trigger for devices doesn't support
- // CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
- AeTriggerCancelOverride_t aeTriggerCancelOverride;
+ // Whether this inflight request's shutter and result callback are to be
+ // called. The policy is that if the request is the last one in the constrained
+ // high speed recording request list, this flag will be true. If the request list
+ // is not for constrained high speed recording, this flag will also be true.
+ bool hasCallback;
// Default constructor needed by KeyedVector
InFlightRequest() :
@@ -704,11 +881,11 @@
haveResultMetadata(false),
numBuffersLeft(0),
hasInputBuffer(false),
- aeTriggerCancelOverride({false, 0, false, 0}){
+ hasCallback(true) {
}
InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
- AeTriggerCancelOverride aeTriggerCancelOverride) :
+ bool hasAppCallback) :
shutterTimestamp(0),
sensorTimestamp(0),
requestStatus(OK),
@@ -716,7 +893,7 @@
numBuffersLeft(numBuffers),
resultExtras(extras),
hasInputBuffer(hasInput),
- aeTriggerCancelOverride(aeTriggerCancelOverride){
+ hasCallback(hasAppCallback) {
}
};
@@ -729,14 +906,7 @@
status_t registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
-
- /**
- * Override result metadata for cancelling AE precapture trigger applied in
- * handleAePrecaptureCancelRequest().
- */
- void overrideResultForPrecaptureCancel(CameraMetadata* result,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+ bool callback);
/**
* Tracking for idle detection
@@ -831,21 +1001,19 @@
// Send a partial capture result.
void sendPartialCaptureResult(const camera_metadata_t * partialResult,
- const CaptureResultExtras &resultExtras, uint32_t frameNumber,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+ const CaptureResultExtras &resultExtras, uint32_t frameNumber);
// Send a total capture result given the pending metadata and result extras,
// partial results, and the frame number to the result queue.
void sendCaptureResult(CameraMetadata &pendingMetadata,
CaptureResultExtras &resultExtras,
CameraMetadata &collectedPartialResult, uint32_t frameNumber,
- bool reprocess, const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+ bool reprocess);
// Insert the result to the result queue after updating frame number and overriding AE
// trigger cancel.
// mOutputLock must be held when calling this function.
- void insertResultLocked(CaptureResult *result, uint32_t frameNumber,
- const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+ void insertResultLocked(CaptureResult *result, uint32_t frameNumber);
/**** Scope for mInFlightLock ****/
@@ -868,6 +1036,8 @@
void monitorMetadata(TagMonitor::eventSource source, int64_t frameNumber,
nsecs_t timestamp, const CameraMetadata& metadata);
+ metadata_vendor_id_t mVendorTagId;
+
/**
* Static callback forwarding methods from HAL to instance
*/
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 5123785..9c951b7 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -36,7 +36,8 @@
}
-status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *) {
+status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *,
+ const std::vector<size_t>&) {
ATRACE_CALL();
ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", __FUNCTION__, mId);
return INVALID_OPERATION;
@@ -103,13 +104,13 @@
return false;
}
-bool Camera3DummyStream::isConsumerConfigurationDeferred() const {
+bool Camera3DummyStream::isConsumerConfigurationDeferred(size_t /*surface_id*/) const {
return false;
}
-status_t Camera3DummyStream::setConsumer(sp<Surface> consumer) {
- ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface %p!",
- __FUNCTION__, mId, consumer.get());
+status_t Camera3DummyStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
+ ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface!",
+ __FUNCTION__, mId);
return INVALID_OPERATION;
}
}; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 18e8a23..35a6a18 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -64,12 +64,12 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
/**
- * Set the consumer surface to the output stream.
+ * Set the consumer surfaces to the output stream.
*/
- virtual status_t setConsumer(sp<Surface> consumer);
+ virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
protected:
@@ -99,7 +99,8 @@
/**
* Internal Camera3Stream interface
*/
- virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer,
+ const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t returnBufferLocked(
const camera3_stream_buffer &buffer,
nsecs_t timestamp);
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index cb39244..7ad2300 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -85,6 +85,8 @@
lines.appendFormat(" Total buffers: %zu, currently dequeued: %zu\n",
mTotalBufferCount, mHandoutTotalBufferCount);
write(fd, lines.string(), lines.size());
+
+ Camera3Stream::dump(fd, args);
}
status_t Camera3IOStreamBase::configureQueueLocked() {
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index f781ded..4eb15ad 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -85,6 +85,9 @@
/*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/false);
mBuffersInFlight.push_back(bufferItem);
+ mFrameCount++;
+ mLastTimestamp = bufferItem.mTimestamp;
+
return OK;
}
@@ -220,6 +223,7 @@
mHandoutTotalBufferCount = 0;
mFrameCount = 0;
+ mLastTimestamp = 0;
if (mConsumer.get() == 0) {
sp<IGraphicBufferProducer> producer;
@@ -259,6 +263,8 @@
mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
mProducer = producer;
+
+ mConsumer->setBufferFreedListener(this);
}
res = mConsumer->setDefaultBufferSize(camera3_stream::width,
@@ -284,6 +290,17 @@
return OK;
}
+void Camera3InputStream::onBufferFreed(const wp<GraphicBuffer>& gb) {
+ const sp<GraphicBuffer> buffer = gb.promote();
+ if (buffer != nullptr) {
+ if (mBufferFreedListener != nullptr) {
+ mBufferFreedListener->onBufferFreed(mId, buffer->handle);
+ }
+ } else {
+ ALOGE("%s: GraphicBuffer is freed before onBufferFreed callback finishes!", __FUNCTION__);
+ }
+}
+
}; // namespace camera3
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 9f3de10..8f5b431 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -34,7 +34,8 @@
* buffers by feeding them into the HAL, as well as releasing the buffers back
* the buffers once the HAL is done with them.
*/
-class Camera3InputStream : public Camera3IOStreamBase {
+class Camera3InputStream : public Camera3IOStreamBase,
+ public BufferItemConsumer::BufferFreedListener {
public:
/**
* Set up a stream for formats that have fixed size, such as RAW and YUV.
@@ -77,6 +78,11 @@
virtual status_t getEndpointUsage(uint32_t *usage) const;
+ /**
+ * BufferItemConsumer::BufferFreedListener interface
+ */
+ virtual void onBufferFreed(const wp<GraphicBuffer>&) override;
+
}; // class Camera3InputStream
}; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7229929..e15aa43 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -43,7 +43,8 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
- mConsumerUsage(0) {
+ mConsumerUsage(0),
+ mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (mConsumer == NULL) {
ALOGE("%s: Consumer is NULL!", __FUNCTION__);
@@ -68,7 +69,8 @@
mUseMonoTimestamp(false),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
- mConsumerUsage(0) {
+ mConsumerUsage(0),
+ mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
@@ -97,7 +99,8 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
- mConsumerUsage(consumerUsage) {
+ mConsumerUsage(consumerUsage),
+ mDequeueBufferLatency(kDequeueLatencyBinSize) {
// Deferred consumer only support preview surface format now.
if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
ALOGE("%s: Deferred consumer only supports IMPLEMENTATION_DEFINED format now!",
@@ -124,6 +127,7 @@
int format,
android_dataspace dataSpace,
camera3_stream_rotation_t rotation,
+ uint32_t consumerUsage, nsecs_t timestampOffset,
int setId) :
Camera3IOStreamBase(id, type, width, height,
/*maxSize*/0,
@@ -132,7 +136,9 @@
mTraceFirstBuffer(true),
mUseMonoTimestamp(false),
mUseBufferManager(false),
- mConsumerUsage(0) {
+ mTimestampOffset(timestampOffset),
+ mConsumerUsage(consumerUsage),
+ mDequeueBufferLatency(kDequeueLatencyBinSize) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
mBufferReleasedListener = new BufferReleasedListener(this);
@@ -146,73 +152,17 @@
disconnectLocked();
}
-status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer) {
+status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer,
+ const std::vector<size_t>&) {
ATRACE_CALL();
- status_t res;
-
- if ((res = getBufferPreconditionCheckLocked()) != OK) {
- return res;
- }
ANativeWindowBuffer* anb;
int fenceFd = -1;
- bool gotBufferFromManager = false;
- if (mUseBufferManager) {
- sp<GraphicBuffer> gb;
- res = mBufferManager->getBufferForStream(getId(), getStreamSetId(), &gb, &fenceFd);
- if (res == OK) {
- // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
- // successful return.
- anb = gb.get();
- res = mConsumer->attachBuffer(anb);
- if (res != OK) {
- ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
- return res;
- }
- gotBufferFromManager = true;
- ALOGV("Stream %d: Attached new buffer", getId());
- } else if (res == ALREADY_EXISTS) {
- // Have sufficient free buffers already attached, can just
- // dequeue from buffer queue
- ALOGV("Stream %d: Reusing attached buffer", getId());
- gotBufferFromManager = false;
- } else if (res != OK) {
- ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
- return res;
- }
- }
- if (!gotBufferFromManager) {
- /**
- * Release the lock briefly to avoid deadlock for below scenario:
- * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
- * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
- * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
- * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
- * StreamingProcessor lock.
- * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
- * and try to lock bufferQueue lock.
- * Then there is circular locking dependency.
- */
- sp<ANativeWindow> currentConsumer = mConsumer;
- mLock.unlock();
-
- res = currentConsumer->dequeueBuffer(currentConsumer.get(), &anb, &fenceFd);
- mLock.lock();
- if (res != OK) {
- ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
-
- // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is STATE_PREPARING,
- // let prepareNextBuffer handle the error.)
- if (res == NO_INIT && mState == STATE_CONFIGURED) {
- mState = STATE_ABANDONED;
- }
-
- return res;
- }
+ status_t res;
+ res = getBufferLockedCommon(&anb, &fenceFd);
+ if (res != OK) {
+ return res;
}
/**
@@ -225,6 +175,11 @@
return OK;
}
+status_t Camera3OutputStream::queueBufferToConsumer(sp<ANativeWindow>& consumer,
+ ANativeWindowBuffer* buffer, int anwReleaseFence) {
+ return consumer->queueBuffer(consumer.get(), buffer, anwReleaseFence);
+}
+
status_t Camera3OutputStream::returnBufferLocked(
const camera3_stream_buffer &buffer,
nsecs_t timestamp) {
@@ -237,6 +192,7 @@
}
mLastTimestamp = timestamp;
+ mFrameCount++;
return OK;
}
@@ -266,6 +222,7 @@
sp<ANativeWindow> currentConsumer = mConsumer;
mLock.unlock();
+ ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
/**
* Return buffer back to ANativeWindow
*/
@@ -273,13 +230,14 @@
// Cancel buffer
ALOGW("A frame is dropped for stream %d", mId);
res = currentConsumer->cancelBuffer(currentConsumer.get(),
- container_of(buffer.buffer, ANativeWindowBuffer, handle),
+ anwBuffer,
anwReleaseFence);
if (res != OK) {
ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
" %s (%d)", __FUNCTION__, mId, strerror(-res), res);
}
+ notifyBufferReleased(anwBuffer);
if (mUseBufferManager) {
// Return this buffer back to buffer manager.
mBufferReleasedListener->onBufferReleased();
@@ -305,9 +263,7 @@
return res;
}
- res = currentConsumer->queueBuffer(currentConsumer.get(),
- container_of(buffer.buffer, ANativeWindowBuffer, handle),
- anwReleaseFence);
+ res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence);
if (res != OK) {
ALOGE("%s: Stream %d: Error queueing buffer to native window: "
"%s (%d)", __FUNCTION__, mId, strerror(-res), res);
@@ -338,6 +294,9 @@
write(fd, lines.string(), lines.size());
Camera3IOStreamBase::dump(fd, args);
+
+ mDequeueBufferLatency.dump(fd,
+ " DequeueBuffer latency histogram:");
}
status_t Camera3OutputStream::setTransform(int transform) {
@@ -373,11 +332,31 @@
return res;
}
+ if ((res = configureConsumerQueueLocked()) != OK) {
+ return res;
+ }
+
+ // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+ // We need skip these cases as timeout will disable the non-blocking (async) mode.
+ if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
+ mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+ }
+
+ return OK;
+}
+
+status_t Camera3OutputStream::configureConsumerQueueLocked() {
+ status_t res;
+
+ mTraceFirstBuffer = true;
+
ALOG_ASSERT(mConsumer != 0, "mConsumer should never be NULL");
// Configure consumer-side ANativeWindow interface. The listener may be used
// to notify buffer manager (if it is used) of the returned buffers.
- res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA, /*listener*/mBufferReleasedListener);
+ res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
+ /*listener*/mBufferReleasedListener,
+ /*reportBufferRemoval*/true);
if (res != OK) {
ALOGE("%s: Unable to connect to native window for stream %d",
__FUNCTION__, mId);
@@ -470,12 +449,7 @@
if (res != OK) {
ALOGE("%s: Unable to configure stream transform to %x: %s (%d)",
__FUNCTION__, mTransform, strerror(-res), res);
- }
-
- // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
- // We need skip these cases as timeout will disable the non-blocking (async) mode.
- if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
- mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+ return res;
}
/**
@@ -511,6 +485,92 @@
return OK;
}
+status_t Camera3OutputStream::getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd) {
+ ATRACE_CALL();
+ status_t res;
+
+ if ((res = getBufferPreconditionCheckLocked()) != OK) {
+ return res;
+ }
+
+ bool gotBufferFromManager = false;
+
+ if (mUseBufferManager) {
+ sp<GraphicBuffer> gb;
+ res = mBufferManager->getBufferForStream(getId(), getStreamSetId(), &gb, fenceFd);
+ if (res == OK) {
+ // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after a
+ // successful return.
+ *anb = gb.get();
+ res = mConsumer->attachBuffer(*anb);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+ gotBufferFromManager = true;
+ ALOGV("Stream %d: Attached new buffer", getId());
+ } else if (res == ALREADY_EXISTS) {
+ // Have sufficient free buffers already attached, can just
+ // dequeue from buffer queue
+ ALOGV("Stream %d: Reusing attached buffer", getId());
+ gotBufferFromManager = false;
+ } else if (res != OK) {
+ ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+ }
+ if (!gotBufferFromManager) {
+ /**
+ * Release the lock briefly to avoid deadlock for below scenario:
+ * Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
+ * This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
+ * Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
+ * This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
+ * StreamingProcessor lock.
+ * Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
+ * and try to lock bufferQueue lock.
+ * Then there is circular locking dependency.
+ */
+ sp<ANativeWindow> currentConsumer = mConsumer;
+ mLock.unlock();
+
+ nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
+ res = currentConsumer->dequeueBuffer(currentConsumer.get(), anb, fenceFd);
+ nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+ mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
+
+ mLock.lock();
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+
+ // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is STATE_PREPARING,
+ // let prepareNextBuffer handle the error.)
+ if (res == NO_INIT && mState == STATE_CONFIGURED) {
+ mState = STATE_ABANDONED;
+ }
+
+ return res;
+ }
+ }
+
+ if (res == OK) {
+ std::vector<sp<GraphicBuffer>> removedBuffers;
+ res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
+ if (res == OK) {
+ onBuffersRemovedLocked(removedBuffers);
+
+ if (mUseBufferManager && removedBuffers.size() > 0) {
+ mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), removedBuffers.size());
+ }
+ }
+ }
+
+ return res;
+}
+
status_t Camera3OutputStream::disconnectLocked() {
status_t res;
@@ -562,20 +622,33 @@
mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
: STATE_CONSTRUCTED;
+
+ mDequeueBufferLatency.log("Stream %d dequeueBuffer latency histogram", mId);
+ mDequeueBufferLatency.reset();
return OK;
}
status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const {
status_t res;
- int32_t u = 0;
+
if (mConsumer == nullptr) {
// mConsumerUsage was sanitized before the Camera3OutputStream was constructed.
*usage = mConsumerUsage;
return OK;
}
- res = static_cast<ANativeWindow*>(mConsumer.get())->query(mConsumer.get(),
+ res = getEndpointUsageForSurface(usage, mConsumer);
+
+ return res;
+}
+
+status_t Camera3OutputStream::getEndpointUsageForSurface(uint32_t *usage,
+ const sp<Surface>& surface) const {
+ status_t res;
+ int32_t u = 0;
+
+ res = static_cast<ANativeWindow*>(surface.get())->query(surface.get(),
NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
// If an opaque output stream's endpoint is ImageReader, add
@@ -587,8 +660,8 @@
// 3. GRALLOC_USAGE_HW_COMPOSER
// 4. GRALLOC_USAGE_HW_VIDEO_ENCODER
if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
- (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER |
- GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
+ (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER |
+ GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
}
@@ -632,18 +705,42 @@
}
ALOGV("Stream %d: Buffer released", stream->getId());
+ bool shouldFreeBuffer = false;
status_t res = stream->mBufferManager->onBufferReleased(
- stream->getId(), stream->getStreamSetId());
+ stream->getId(), stream->getStreamSetId(), &shouldFreeBuffer);
if (res != OK) {
ALOGE("%s: signaling buffer release to buffer manager failed: %s (%d).", __FUNCTION__,
strerror(-res), res);
stream->mState = STATE_ERROR;
}
+
+ if (shouldFreeBuffer) {
+ sp<GraphicBuffer> buffer;
+ // Detach and free a buffer (when buffer goes out of scope)
+ stream->detachBufferLocked(&buffer, /*fenceFd*/ nullptr);
+ if (buffer.get() != nullptr) {
+ stream->mBufferManager->notifyBufferRemoved(
+ stream->getId(), stream->getStreamSetId());
+ }
+ }
+}
+
+void Camera3OutputStream::onBuffersRemovedLocked(
+ const std::vector<sp<GraphicBuffer>>& removedBuffers) {
+ Camera3StreamBufferFreedListener* callback = mBufferFreedListener;
+ if (callback != nullptr) {
+ for (auto gb : removedBuffers) {
+ callback->onBufferFreed(mId, gb->handle);
+ }
+ }
}
status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
Mutex::Autolock l(mLock);
+ return detachBufferLocked(buffer, fenceFd);
+}
+status_t Camera3OutputStream::detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd) {
ALOGV("Stream %d: detachBuffer", getId());
if (buffer == nullptr) {
return BAD_VALUE;
@@ -673,17 +770,36 @@
}
}
+ std::vector<sp<GraphicBuffer>> removedBuffers;
+ res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
+ if (res == OK) {
+ onBuffersRemovedLocked(removedBuffers);
+ }
+ return res;
+}
+
+status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
return OK;
}
-bool Camera3OutputStream::isConsumerConfigurationDeferred() const {
+bool Camera3OutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
Mutex::Autolock l(mLock);
+
+ if (surface_id != 0) {
+ ALOGE("%s: surface_id %zu for Camera3OutputStream should be 0!", __FUNCTION__, surface_id);
+ }
return mConsumer == nullptr;
}
-status_t Camera3OutputStream::setConsumer(sp<Surface> consumer) {
- if (consumer == nullptr) {
- ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+status_t Camera3OutputStream::setConsumers(const std::vector<sp<Surface>>& consumers) {
+ Mutex::Autolock l(mLock);
+ if (consumers.size() != 1) {
+ ALOGE("%s: it's illegal to set %zu consumer surfaces!",
+ __FUNCTION__, consumers.size());
+ return INVALID_OPERATION;
+ }
+ if (consumers[0] == nullptr) {
+ ALOGE("%s: it's illegal to set null consumer surface!", __FUNCTION__);
return INVALID_OPERATION;
}
@@ -692,7 +808,7 @@
return INVALID_OPERATION;
}
- mConsumer = consumer;
+ mConsumer = consumers[0];
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 12d497e..97aa7d4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -21,6 +21,7 @@
#include <gui/IProducerListener.h>
#include <gui/Surface.h>
+#include "utils/LatencyHistogram.h"
#include "Camera3Stream.h"
#include "Camera3IOStreamBase.h"
#include "Camera3OutputStreamInterface.h"
@@ -135,12 +136,12 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
/**
- * Set the consumer surface to the output stream.
+ * Set the consumer surfaces to the output stream.
*/
- virtual status_t setConsumer(sp<Surface> consumer);
+ virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
class BufferReleasedListener : public BnProducerListener {
public:
@@ -159,6 +160,12 @@
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
/**
+ * Notify that the buffer is being released to the buffer queue instead of
+ * being queued to the consumer.
+ */
+ virtual status_t notifyBufferReleased(ANativeWindowBuffer *anwBuffer);
+
+ /**
* Set the graphic buffer manager to get/return the stream buffers.
*
* It is only legal to call this method when stream is in STATE_CONSTRUCTED state.
@@ -169,6 +176,7 @@
Camera3OutputStream(int id, camera3_stream_type_t type,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ uint32_t consumerUsage = 0, nsecs_t timestampOffset = 0,
int setId = CAMERA3_STREAM_SET_ID_INVALID);
/**
@@ -183,12 +191,22 @@
virtual status_t disconnectLocked();
+ status_t getEndpointUsageForSurface(uint32_t *usage,
+ const sp<Surface>& surface) const;
+ status_t configureConsumerQueueLocked();
+
+ // Consumer as the output of camera HAL
sp<Surface> mConsumer;
- private:
+ uint32_t getPresetConsumerUsage() const { return mConsumerUsage; }
static const nsecs_t kDequeueBufferTimeout = 1000000000; // 1 sec
+ status_t getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd);
+
+
+ private:
+
int mTransform;
virtual status_t setTransformLocked(int transform);
@@ -232,15 +250,29 @@
/**
* Internal Camera3Stream interface
*/
- virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer,
+ const std::vector<size_t>& surface_ids);
+
virtual status_t returnBufferLocked(
const camera3_stream_buffer &buffer,
nsecs_t timestamp);
+ virtual status_t queueBufferToConsumer(sp<ANativeWindow>& consumer,
+ ANativeWindowBuffer* buffer, int anwReleaseFence);
+
virtual status_t configureQueueLocked();
virtual status_t getEndpointUsage(uint32_t *usage) const;
+ /**
+ * Private methods
+ */
+ void onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>>&);
+ status_t detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd);
+
+ static const int32_t kDequeueLatencyBinSize = 5; // in ms
+ CameraLatencyHistogram mDequeueBufferLatency;
+
}; // class Camera3OutputStream
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index 3f83c89..8107dd0 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -43,12 +43,12 @@
/**
* Return if the consumer configuration of this stream is deferred.
*/
- virtual bool isConsumerConfigurationDeferred() const = 0;
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id = 0) const = 0;
/**
- * Set the consumer surface to the output stream.
+ * Set the consumer surfaces to the output stream.
*/
- virtual status_t setConsumer(sp<Surface> consumer) = 0;
+ virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers) = 0;
/**
* Detach an unused buffer from the stream.
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
new file mode 100644
index 0000000..2ae5660
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Camera3SharedOutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3SharedOutputStream::Camera3SharedOutputStream(int id,
+ const std::vector<sp<Surface>>& surfaces,
+ uint32_t width, uint32_t height, int format,
+ uint32_t consumerUsage, android_dataspace dataSpace,
+ camera3_stream_rotation_t rotation,
+ nsecs_t timestampOffset, int setId) :
+ Camera3OutputStream(id, CAMERA3_STREAM_OUTPUT, width, height,
+ format, dataSpace, rotation, consumerUsage,
+ timestampOffset, setId),
+ mSurfaces(surfaces) {
+}
+
+Camera3SharedOutputStream::~Camera3SharedOutputStream() {
+ disconnectLocked();
+}
+
+status_t Camera3SharedOutputStream::connectStreamSplitterLocked() {
+ status_t res = OK;
+
+ mStreamSplitter = new Camera3StreamSplitter();
+
+ uint32_t usage;
+ getEndpointUsage(&usage);
+
+ res = mStreamSplitter->connect(mSurfaces, usage, camera3_stream::max_buffers, &mConsumer);
+ if (res != OK) {
+ ALOGE("%s: Failed to connect to stream splitter: %s(%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ return res;
+}
+
+status_t Camera3SharedOutputStream::notifyBufferReleased(ANativeWindowBuffer *anwBuffer) {
+ Mutex::Autolock l(mLock);
+ status_t res = OK;
+ const sp<GraphicBuffer> buffer(static_cast<GraphicBuffer*>(anwBuffer));
+
+ if (mStreamSplitter != nullptr) {
+ res = mStreamSplitter->notifyBufferReleased(buffer);
+ }
+
+ return res;
+}
+
+bool Camera3SharedOutputStream::isConsumerConfigurationDeferred(size_t surface_id) const {
+ Mutex::Autolock l(mLock);
+ return (surface_id >= mSurfaces.size());
+}
+
+status_t Camera3SharedOutputStream::setConsumers(const std::vector<sp<Surface>>& surfaces) {
+ Mutex::Autolock l(mLock);
+ if (surfaces.size() == 0) {
+ ALOGE("%s: it's illegal to set zero consumer surfaces!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ status_t ret = OK;
+ for (auto& surface : surfaces) {
+ if (surface == nullptr) {
+ ALOGE("%s: it's illegal to set a null consumer surface!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ mSurfaces.push_back(surface);
+
+ // Only call addOutput if the splitter has been connected.
+ if (mStreamSplitter != nullptr) {
+ ret = mStreamSplitter->addOutput(surface);
+ if (ret != OK) {
+ ALOGE("%s: addOutput failed with error code %d", __FUNCTION__, ret);
+ return ret;
+
+ }
+ }
+ }
+ return ret;
+}
+
+status_t Camera3SharedOutputStream::getBufferLocked(camera3_stream_buffer *buffer,
+ const std::vector<size_t>& surface_ids) {
+ ANativeWindowBuffer* anb;
+ int fenceFd = -1;
+
+ status_t res;
+ res = getBufferLockedCommon(&anb, &fenceFd);
+ if (res != OK) {
+ return res;
+ }
+
+ // Attach the buffer to the splitter output queues. This could block if
+ // the output queue doesn't have any empty slot. So unlock during the course
+ // of attachBufferToOutputs.
+ sp<Camera3StreamSplitter> splitter = mStreamSplitter;
+ mLock.unlock();
+ res = splitter->attachBufferToOutputs(anb, surface_ids);
+ mLock.lock();
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Cannot attach stream splitter buffer to outputs: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is STATE_PREPARING,
+ // let prepareNextBuffer handle the error.)
+ if (res == NO_INIT && mState == STATE_CONFIGURED) {
+ mState = STATE_ABANDONED;
+ }
+
+ return res;
+ }
+
+ /**
+ * FenceFD now owned by HAL except in case of error,
+ * in which case we reassign it to acquire_fence
+ */
+ handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
+ /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);
+
+ return OK;
+}
+
+status_t Camera3SharedOutputStream::queueBufferToConsumer(sp<ANativeWindow>& consumer,
+ ANativeWindowBuffer* buffer, int anwReleaseFence) {
+ status_t res = consumer->queueBuffer(consumer.get(), buffer, anwReleaseFence);
+
+ // After queuing buffer to the internal consumer queue, check whether the buffer is
+ // successfully queued to the output queues.
+ if (res == OK) {
+ res = mStreamSplitter->getOnFrameAvailableResult();
+ if (res != OK) {
+ ALOGE("%s: getOnFrameAvailable returns %d", __FUNCTION__, res);
+ }
+ } else {
+ ALOGE("%s: queueBufer failed %d", __FUNCTION__, res);
+ }
+
+ return res;
+}
+
+status_t Camera3SharedOutputStream::configureQueueLocked() {
+ status_t res;
+
+ if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) {
+ return res;
+ }
+
+ res = connectStreamSplitterLocked();
+ if (res != OK) {
+ ALOGE("Cannot connect to stream splitter: %s(%d)", strerror(-res), res);
+ return res;
+ }
+
+ res = configureConsumerQueueLocked();
+ if (res != OK) {
+ ALOGE("Failed to configureConsumerQueueLocked: %s(%d)", strerror(-res), res);
+ return res;
+ }
+
+ return OK;
+}
+
+status_t Camera3SharedOutputStream::disconnectLocked() {
+ status_t res;
+ res = Camera3OutputStream::disconnectLocked();
+
+ if (mStreamSplitter != nullptr) {
+ mStreamSplitter->disconnect();
+ }
+
+ return res;
+}
+
+status_t Camera3SharedOutputStream::getEndpointUsage(uint32_t *usage) const {
+
+ status_t res = OK;
+ uint32_t u = 0;
+
+ if (mConsumer == nullptr) {
+ // Called before shared buffer queue is constructed.
+ *usage = getPresetConsumerUsage();
+
+ for (auto surface : mSurfaces) {
+ if (surface != nullptr) {
+ res = getEndpointUsageForSurface(&u, surface);
+ *usage |= u;
+ }
+ }
+ } else {
+ // Called after shared buffer queue is constructed.
+ res = getEndpointUsageForSurface(&u, mConsumer);
+ *usage |= u;
+ }
+
+ return res;
+}
+
+} // namespace camera3
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
new file mode 100644
index 0000000..7be0940
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
+
+#include "Camera3StreamSplitter.h"
+#include "Camera3OutputStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3SharedOutputStream :
+ public Camera3OutputStream {
+public:
+ /**
+ * Set up a stream for formats that have 2 dimensions, with multiple
+ * surfaces. A valid stream set id needs to be set to support buffer
+ * sharing between multiple streams.
+ */
+ Camera3SharedOutputStream(int id, const std::vector<sp<Surface>>& surfaces,
+ uint32_t width, uint32_t height, int format,
+ uint32_t consumerUsage, android_dataspace dataSpace,
+ camera3_stream_rotation_t rotation, nsecs_t timestampOffset,
+ int setId = CAMERA3_STREAM_SET_ID_INVALID);
+
+ virtual ~Camera3SharedOutputStream();
+
+ virtual status_t notifyBufferReleased(ANativeWindowBuffer *buffer);
+
+ virtual bool isConsumerConfigurationDeferred(size_t surface_id) const;
+
+ virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
+
+private:
+ // Surfaces passed in constructor from app
+ std::vector<sp<Surface> > mSurfaces;
+
+ /**
+ * The Camera3StreamSplitter object this stream uses for stream
+ * sharing.
+ */
+ sp<Camera3StreamSplitter> mStreamSplitter;
+
+ /**
+ * Initialize stream splitter.
+ */
+ status_t connectStreamSplitterLocked();
+
+ /**
+ * Internal Camera3Stream interface
+ */
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer,
+ const std::vector<size_t>& surface_ids);
+
+ virtual status_t queueBufferToConsumer(sp<ANativeWindow>& consumer,
+ ANativeWindowBuffer* buffer, int anwReleaseFence);
+
+ virtual status_t configureQueueLocked();
+
+ virtual status_t disconnectLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage) const;
+
+}; // class Camera3SharedOutputStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA3_SHARED_OUTPUT_STREAM_H
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 3ffd9d1..ba352c4 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -60,7 +60,8 @@
mOldMaxBuffers(0),
mPrepared(false),
mPreparedBufferIdx(0),
- mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX) {
+ mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX),
+ mBufferLimitLatency(kBufferLimitLatencyBinSize) {
camera3_stream::stream_type = type;
camera3_stream::width = width;
@@ -166,7 +167,7 @@
return (mState == STATE_IN_CONFIG) || (mState == STATE_IN_RECONFIG);
}
-status_t Camera3Stream::finishConfiguration(camera3_device *hal3Device) {
+status_t Camera3Stream::finishConfiguration() {
ATRACE_CALL();
Mutex::Autolock l(mLock);
switch (mState) {
@@ -216,14 +217,6 @@
return res;
}
- res = registerBuffersLocked(hal3Device);
- if (res != OK) {
- ALOGE("%s: Unable to register stream buffers with HAL: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- mState = STATE_ERROR;
- return res;
- }
-
mState = STATE_CONFIGURED;
return res;
@@ -451,7 +444,8 @@
return OK;
}
-status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
+status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer,
+ const std::vector<size_t>& surface_ids) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
status_t res = OK;
@@ -466,8 +460,11 @@
// Wait for new buffer returned back if we are running into the limit.
if (getHandoutOutputBufferCountLocked() == camera3_stream::max_buffers) {
ALOGV("%s: Already dequeued max output buffers (%d), wait for next returned one.",
- __FUNCTION__, camera3_stream::max_buffers);
+ __FUNCTION__, camera3_stream::max_buffers);
+ nsecs_t waitStart = systemTime(SYSTEM_TIME_MONOTONIC);
res = mOutputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
+ nsecs_t waitEnd = systemTime(SYSTEM_TIME_MONOTONIC);
+ mBufferLimitLatency.add(waitStart, waitEnd);
if (res != OK) {
if (res == TIMED_OUT) {
ALOGE("%s: wait for output buffer return timed out after %lldms (max_buffers %d)",
@@ -478,7 +475,7 @@
}
}
- res = getBufferLocked(buffer);
+ res = getBufferLocked(buffer, surface_ids);
if (res == OK) {
fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
if (buffer->buffer) {
@@ -662,6 +659,9 @@
ALOGV("%s: Stream %d: Disconnecting...", __FUNCTION__, mId);
status_t res = disconnectLocked();
+ mBufferLimitLatency.log("Stream %d latency histogram for wait on max_buffers", mId);
+ mBufferLimitLatency.reset();
+
if (res == -ENOTCONN) {
// "Already disconnected" -- not an error
return OK;
@@ -670,90 +670,15 @@
}
}
-status_t Camera3Stream::registerBuffersLocked(camera3_device *hal3Device) {
- ATRACE_CALL();
-
- /**
- * >= CAMERA_DEVICE_API_VERSION_3_2:
- *
- * camera3_device_t->ops->register_stream_buffers() is not called and must
- * be NULL.
- */
- if (hal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_2) {
- ALOGV("%s: register_stream_buffers unused as of HAL3.2", __FUNCTION__);
-
- if (hal3Device->ops->register_stream_buffers != NULL) {
- ALOGE("%s: register_stream_buffers is deprecated in HAL3.2; "
- "must be set to NULL in camera3_device::ops", __FUNCTION__);
- return INVALID_OPERATION;
- }
-
- return OK;
- }
-
- ALOGV("%s: register_stream_buffers using deprecated code path", __FUNCTION__);
-
- status_t res;
-
- size_t bufferCount = getBufferCountLocked();
-
- Vector<buffer_handle_t*> buffers;
- buffers.insertAt(/*prototype_item*/NULL, /*index*/0, bufferCount);
-
- camera3_stream_buffer_set bufferSet = camera3_stream_buffer_set();
- bufferSet.stream = this;
- bufferSet.num_buffers = bufferCount;
- bufferSet.buffers = buffers.editArray();
-
- Vector<camera3_stream_buffer_t> streamBuffers;
- streamBuffers.insertAt(camera3_stream_buffer_t(), /*index*/0, bufferCount);
-
- // Register all buffers with the HAL. This means getting all the buffers
- // from the stream, providing them to the HAL with the
- // register_stream_buffers() method, and then returning them back to the
- // stream in the error state, since they won't have valid data.
- //
- // Only registered buffers can be sent to the HAL.
-
- uint32_t bufferIdx = 0;
- for (; bufferIdx < bufferCount; bufferIdx++) {
- res = getBufferLocked( &streamBuffers.editItemAt(bufferIdx) );
- if (res != OK) {
- ALOGE("%s: Unable to get buffer %d for registration with HAL",
- __FUNCTION__, bufferIdx);
- // Skip registering, go straight to cleanup
- break;
- }
-
- sp<Fence> fence = new Fence(streamBuffers[bufferIdx].acquire_fence);
- fence->waitForever("Camera3Stream::registerBuffers");
-
- buffers.editItemAt(bufferIdx) = streamBuffers[bufferIdx].buffer;
- }
- if (bufferIdx == bufferCount) {
- // Got all buffers, register with HAL
- ALOGV("%s: Registering %zu buffers with camera HAL",
- __FUNCTION__, bufferCount);
- ATRACE_BEGIN("camera3->register_stream_buffers");
- res = hal3Device->ops->register_stream_buffers(hal3Device,
- &bufferSet);
- ATRACE_END();
- }
-
- // Return all valid buffers to stream, in ERROR state to indicate
- // they weren't filled.
- for (size_t i = 0; i < bufferIdx; i++) {
- streamBuffers.editItemAt(i).release_fence = -1;
- streamBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
- returnBufferLocked(streamBuffers[i], 0);
- }
-
- mPrepared = true;
-
- return res;
+void Camera3Stream::dump(int fd, const Vector<String16> &args) const
+{
+ (void)args;
+ mBufferLimitLatency.dump(fd,
+ " Latency histogram for wait on max_buffers");
}
-status_t Camera3Stream::getBufferLocked(camera3_stream_buffer *) {
+status_t Camera3Stream::getBufferLocked(camera3_stream_buffer *,
+ const std::vector<size_t>&) {
ALOGE("%s: This type of stream does not support output", __FUNCTION__);
return INVALID_OPERATION;
}
@@ -818,6 +743,18 @@
}
}
+void Camera3Stream::setBufferFreedListener(
+ Camera3StreamBufferFreedListener* listener) {
+ Mutex::Autolock l(mLock);
+ // Only allow set listener during stream configuration because stream is guaranteed to be IDLE
+ // at this state, so setBufferFreedListener won't collide with onBufferFreed callbacks
+ if (mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) {
+ ALOGE("%s: listener must be set during stream configuration!",__FUNCTION__);
+ return;
+ }
+ mBufferFreedListener = listener;
+}
+
}; // namespace camera3
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 1ff215d..b5a9c5d 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -25,6 +25,7 @@
#include "hardware/camera3.h"
+#include "utils/LatencyHistogram.h"
#include "Camera3StreamBufferListener.h"
#include "Camera3StreamInterface.h"
@@ -144,6 +145,10 @@
int getFormat() const;
android_dataspace getDataSpace() const;
+ camera3_stream* asHalStream() override {
+ return this;
+ }
+
/**
* Start the stream configuration process. Returns a handle to the stream's
* information to be passed into the HAL device's configure_streams call.
@@ -165,11 +170,10 @@
bool isConfiguring() const;
/**
- * Completes the stream configuration process. During this call, the stream
- * may call the device's register_stream_buffers() method. The stream
- * information structure returned by startConfiguration() may no longer be
- * modified after this call, but can still be read until the destruction of
- * the stream.
+ * Completes the stream configuration process. The stream information
+ * structure returned by startConfiguration() may no longer be modified
+ * after this call, but can still be read until the destruction of the
+ * stream.
*
* Returns:
* OK on a successful configuration
@@ -178,7 +182,7 @@
* INVALID_OPERATION in case connecting to the consumer failed or consumer
* doesn't exist yet.
*/
- status_t finishConfiguration(camera3_device *hal3Device);
+ status_t finishConfiguration();
/**
* Cancels the stream configuration process. This returns the stream to the
@@ -274,12 +278,18 @@
* Fill in the camera3_stream_buffer with the next valid buffer for this
* stream, to hand over to the HAL.
*
+ * Multiple surfaces could share the same HAL stream, but a request may
+ * be only for a subset of surfaces. In this case, the
+ * Camera3StreamInterface object needs the surface ID information to acquire
+ * buffers for those surfaces.
+ *
* This method may only be called once finishConfiguration has been called.
* For bidirectional streams, this method applies to the output-side
* buffers.
*
*/
- status_t getBuffer(camera3_stream_buffer *buffer);
+ status_t getBuffer(camera3_stream_buffer *buffer,
+ const std::vector<size_t>& surface_ids = std::vector<size_t>());
/**
* Return a buffer to the stream after use by the HAL.
@@ -340,7 +350,7 @@
/**
* Debug dump of the stream's state.
*/
- virtual void dump(int fd, const Vector<String16> &args) const = 0;
+ virtual void dump(int fd, const Vector<String16> &args) const;
/**
* Add a camera3 buffer listener. Adding the same listener twice has
@@ -356,6 +366,11 @@
void removeBufferListener(
const sp<Camera3StreamBufferListener>& listener);
+
+ // Setting listener will remove previous listener (if exists)
+ virtual void setBufferFreedListener(
+ Camera3StreamBufferFreedListener* listener) override;
+
/**
* Return if the buffer queue of the stream is abandoned.
*/
@@ -399,6 +414,8 @@
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
int setId);
+ Camera3StreamBufferFreedListener* mBufferFreedListener;
+
/**
* Interface to be implemented by derived classes
*/
@@ -409,7 +426,8 @@
// cast to camera3_stream*, implementations must increment the
// refcount of the stream manually in getBufferLocked, and decrement it in
// returnBufferLocked.
- virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer,
+ const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t returnBufferLocked(const camera3_stream_buffer &buffer,
nsecs_t timestamp);
virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer);
@@ -457,9 +475,6 @@
Condition mInputBufferReturnedSignal;
static const nsecs_t kWaitForBufferDuration = 3000000000LL; // 3000 ms
- // Gets all buffers from endpoint and registers them with the HAL.
- status_t registerBuffersLocked(camera3_device *hal3Device);
-
void fireBufferListenersLocked(const camera3_stream_buffer& buffer,
bool acquired, bool output);
List<wp<Camera3StreamBufferListener> > mBufferListenerList;
@@ -488,6 +503,10 @@
// Outstanding buffers dequeued from the stream's buffer queue.
List<buffer_handle_t> mOutstandingBuffers;
+ // Latency histogram of the wait time for handout buffer count to drop below
+ // max_buffers.
+ static const int32_t kBufferLimitLatencyBinSize = 33; //in ms
+ CameraLatencyHistogram mBufferLimitLatency;
}; // class Camera3Stream
}; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h
new file mode 100644
index 0000000..478a752
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_STREAMBUFFERFREEDLISTENER_H
+#define ANDROID_SERVERS_CAMERA3_STREAMBUFFERFREEDLISTENER_H
+
+#include <gui/Surface.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3StreamBufferFreedListener {
+public:
+ // onBufferFreed is called when a buffer is no longer being managed
+ // by this stream. This will not be called in events when all
+ // buffers are freed due to stream disconnection.
+ //
+ // The input handle may be deleted after this callback ends, so attempting
+ // to dereference handle post this callback is illegal and might lead to
+ // crash.
+ //
+ // This callback will be called while holding Camera3Stream's lock, so
+ // calling into other Camera3Stream APIs within this callback will
+ // lead to deadlock.
+ virtual void onBufferFreed(int streamId, const native_handle_t* handle) = 0;
+
+ virtual ~Camera3StreamBufferFreedListener() {}
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 6cb7a54..37b7c36 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -19,6 +19,7 @@
#include <utils/RefBase.h>
#include "Camera3StreamBufferListener.h"
+#include "Camera3StreamBufferFreedListener.h"
struct camera3_stream_buffer;
@@ -72,6 +73,11 @@
virtual android_dataspace getDataSpace() const = 0;
/**
+ * Get a HAL3 handle for the stream, without starting stream configuration.
+ */
+ virtual camera3_stream* asHalStream() = 0;
+
+ /**
* Start the stream configuration process. Returns a handle to the stream's
* information to be passed into the HAL device's configure_streams call.
*
@@ -104,7 +110,7 @@
* NO_MEMORY in case of an error registering buffers
* INVALID_OPERATION in case connecting to the consumer failed
*/
- virtual status_t finishConfiguration(camera3_device *hal3Device) = 0;
+ virtual status_t finishConfiguration() = 0;
/**
* Cancels the stream configuration process. This returns the stream to the
@@ -195,12 +201,19 @@
* Fill in the camera3_stream_buffer with the next valid buffer for this
* stream, to hand over to the HAL.
*
+ * Multiple surfaces could share the same HAL stream, but a request may
+ * be only for a subset of surfaces. In this case, the
+ * Camera3StreamInterface object needs the surface ID information to acquire
+ * buffers for those surfaces. For the case of single surface for a HAL
+ * stream, surface_ids parameter has no effect.
+ *
* This method may only be called once finishConfiguration has been called.
* For bidirectional streams, this method applies to the output-side
* buffers.
*
*/
- virtual status_t getBuffer(camera3_stream_buffer *buffer) = 0;
+ virtual status_t getBuffer(camera3_stream_buffer *buffer,
+ const std::vector<size_t>& surface_ids = std::vector<size_t>()) = 0;
/**
* Return a buffer to the stream after use by the HAL.
@@ -275,6 +288,15 @@
wp<Camera3StreamBufferListener> listener) = 0;
virtual void removeBufferListener(
const sp<Camera3StreamBufferListener>& listener) = 0;
+
+ /**
+ * Setting listner will remove previous listener (if exists)
+ * Only allow set listener during stream configuration because stream is guaranteed to be IDLE
+ * at this state, so setBufferFreedListener won't collide with onBufferFreed callbacks.
+ * Client is responsible to keep the listener object alive throughout the lifecycle of this
+ * Camera3Stream.
+ */
+ virtual void setBufferFreedListener(Camera3StreamBufferFreedListener* listener) = 0;
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
new file mode 100644
index 0000000..869e93a
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -0,0 +1,564 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#define LOG_TAG "Camera3StreamSplitter"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <gui/BufferItem.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/BufferQueue.h>
+#include <gui/Surface.h>
+
+#include <ui/GraphicBuffer.h>
+
+#include <binder/ProcessState.h>
+
+#include <utils/Trace.h>
+
+#include <cutils/atomic.h>
+
+#include "Camera3StreamSplitter.h"
+
+namespace android {
+
+status_t Camera3StreamSplitter::connect(const std::vector<sp<Surface> >& surfaces,
+ uint32_t consumerUsage, size_t halMaxBuffers, sp<Surface>* consumer) {
+ ATRACE_CALL();
+ if (consumer == nullptr) {
+ SP_LOGE("%s: consumer pointer is NULL", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mMutex);
+ status_t res = OK;
+
+ if (mOutputs.size() > 0 || mConsumer != nullptr) {
+ SP_LOGE("%s: already connected", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ if (mBuffers.size() > 0) {
+ SP_LOGE("%s: still has %zu pending buffers", __FUNCTION__, mBuffers.size());
+ return BAD_VALUE;
+ }
+
+ mMaxHalBuffers = halMaxBuffers;
+ mConsumerName = getUniqueConsumerName();
+ // Add output surfaces. This has to be before creating internal buffer queue
+ // in order to get max consumer side buffers.
+ for (size_t i = 0; i < surfaces.size(); i++) {
+ if (surfaces[i] == nullptr) {
+ SP_LOGE("%s: Fatal: surface is NULL", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ res = addOutputLocked(surfaces[i]);
+ if (res != OK) {
+ SP_LOGE("%s: Failed to add output surface: %s(%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ }
+
+ // Create BufferQueue for input
+ BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+ // Allocate 1 extra buffer to handle the case where all buffers are detached
+ // from input, and attached to the outputs. In this case, the input queue's
+ // dequeueBuffer can still allocate 1 extra buffer before being blocked by
+ // the output's attachBuffer().
+ mBufferItemConsumer = new BufferItemConsumer(mConsumer, consumerUsage,
+ mMaxConsumerBuffers+1);
+ if (mBufferItemConsumer == nullptr) {
+ return NO_MEMORY;
+ }
+ mConsumer->setConsumerName(mConsumerName);
+
+ *consumer = new Surface(mProducer);
+ if (*consumer == nullptr) {
+ return NO_MEMORY;
+ }
+
+ res = mConsumer->consumerConnect(this, /* controlledByApp */ false);
+
+ SP_LOGV("%s: connected", __FUNCTION__);
+ return res;
+}
+
+status_t Camera3StreamSplitter::getOnFrameAvailableResult() {
+ ATRACE_CALL();
+ return mOnFrameAvailableRes.load();
+}
+
+void Camera3StreamSplitter::disconnect() {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ for (auto& notifier : mNotifiers) {
+ sp<IGraphicBufferProducer> producer = notifier.first;
+ sp<OutputListener> listener = notifier.second;
+ IInterface::asBinder(producer)->unlinkToDeath(listener);
+ }
+ mNotifiers.clear();
+
+ for (auto& output : mOutputs) {
+ output->disconnect(NATIVE_WINDOW_API_CAMERA);
+ }
+ mOutputs.clear();
+ mOutputSlots.clear();
+
+ mConsumer->consumerDisconnect();
+
+ if (mBuffers.size() > 0) {
+ SP_LOGW("%zu buffers still being tracked", mBuffers.size());
+ mBuffers.clear();
+ }
+
+ mMaxHalBuffers = 0;
+ mMaxConsumerBuffers = 0;
+ SP_LOGV("%s: Disconnected", __FUNCTION__);
+}
+
+
+Camera3StreamSplitter::~Camera3StreamSplitter() {
+ disconnect();
+}
+
+status_t Camera3StreamSplitter::addOutput(const sp<Surface>& outputQueue) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+ status_t res = addOutputLocked(outputQueue);
+
+ if (res != OK) {
+ SP_LOGE("%s: addOutputLocked failed %d", __FUNCTION__, res);
+ return res;
+ }
+
+ res = mConsumer->setMaxAcquiredBufferCount(mMaxConsumerBuffers+1);
+
+ return res;
+}
+
+status_t Camera3StreamSplitter::addOutputLocked(const sp<Surface>& outputQueue) {
+ ATRACE_CALL();
+ if (outputQueue == nullptr) {
+ SP_LOGE("addOutput: outputQueue must not be NULL");
+ return BAD_VALUE;
+ }
+
+ sp<IGraphicBufferProducer> gbp = outputQueue->getIGraphicBufferProducer();
+ // Connect to the buffer producer
+ sp<OutputListener> listener(new OutputListener(this, gbp));
+ IInterface::asBinder(gbp)->linkToDeath(listener);
+ status_t res = outputQueue->connect(NATIVE_WINDOW_API_CAMERA, listener);
+ if (res != NO_ERROR) {
+ SP_LOGE("addOutput: failed to connect (%d)", res);
+ return res;
+ }
+
+ // Query consumer side buffer count, and update overall buffer count
+ int maxConsumerBuffers = 0;
+ res = static_cast<ANativeWindow*>(outputQueue.get())->query(
+ outputQueue.get(),
+ NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
+ if (res != OK) {
+ SP_LOGE("%s: Unable to query consumer undequeued buffer count"
+ " for surface", __FUNCTION__);
+ return res;
+ }
+
+ SP_LOGV("%s: Consumer wants %d buffers, Producer wants %zu", __FUNCTION__,
+ maxConsumerBuffers, mMaxHalBuffers);
+ size_t totalBufferCount = maxConsumerBuffers + mMaxHalBuffers;
+ res = native_window_set_buffer_count(outputQueue.get(),
+ totalBufferCount);
+ if (res != OK) {
+ SP_LOGE("%s: Unable to set buffer count for surface %p",
+ __FUNCTION__, outputQueue.get());
+ return res;
+ }
+
+ // Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
+ // We need skip these cases as timeout will disable the non-blocking (async) mode.
+ int32_t usage = 0;
+ static_cast<ANativeWindow*>(outputQueue.get())->query(
+ outputQueue.get(),
+ NATIVE_WINDOW_CONSUMER_USAGE_BITS, &usage);
+ if (!(usage & (GRALLOC_USAGE_HW_COMPOSER | GRALLOC_USAGE_HW_TEXTURE))) {
+ outputQueue->setDequeueTimeout(kDequeueBufferTimeout);
+ }
+
+ res = gbp->allowAllocation(false);
+ if (res != OK) {
+ SP_LOGE("%s: Failed to turn off allocation for outputQueue", __FUNCTION__);
+ return res;
+ }
+
+ // Add new entry into mOutputs
+ mOutputs.push_back(gbp);
+ mNotifiers[gbp] = listener;
+ mOutputSlots[gbp] = std::make_unique<OutputSlots>(totalBufferCount);
+
+ mMaxConsumerBuffers += maxConsumerBuffers;
+ return NO_ERROR;
+}
+
+status_t Camera3StreamSplitter::outputBufferLocked(const sp<IGraphicBufferProducer>& output,
+ const BufferItem& bufferItem) {
+ ATRACE_CALL();
+ status_t res;
+ IGraphicBufferProducer::QueueBufferInput queueInput(
+ bufferItem.mTimestamp, bufferItem.mIsAutoTimestamp,
+ bufferItem.mDataSpace, bufferItem.mCrop,
+ static_cast<int32_t>(bufferItem.mScalingMode),
+ bufferItem.mTransform, bufferItem.mFence);
+
+ IGraphicBufferProducer::QueueBufferOutput queueOutput;
+
+ uint64_t bufferId = bufferItem.mGraphicBuffer->getId();
+ const BufferTracker& tracker = *(mBuffers[bufferId]);
+ int slot = getSlotForOutputLocked(output, tracker.getBuffer());
+
+ // In case the output BufferQueue has its own lock, if we hold splitter lock while calling
+ // queueBuffer (which will try to acquire the output lock), the output could be holding its
+ // own lock calling releaseBuffer (which will try to acquire the splitter lock), running into
+ // circular lock situation.
+ mMutex.unlock();
+ res = output->queueBuffer(slot, queueInput, &queueOutput);
+ mMutex.lock();
+
+ SP_LOGV("%s: Queuing buffer to buffer queue %p slot %d returns %d",
+ __FUNCTION__, output.get(), slot, res);
+ if (res != OK) {
+ if (res != NO_INIT && res != DEAD_OBJECT) {
+ SP_LOGE("Queuing buffer to output failed (%d)", res);
+ }
+ // If we just discovered that this output has been abandoned, note
+ // that, increment the release count so that we still release this
+ // buffer eventually, and move on to the next output
+ onAbandonedLocked();
+ decrementBufRefCountLocked(bufferItem.mGraphicBuffer->getId(), output);
+ return res;
+ }
+
+ // If the queued buffer replaces a pending buffer in the async
+ // queue, no onBufferReleased is called by the buffer queue.
+ // Proactively trigger the callback to avoid buffer loss.
+ if (queueOutput.bufferReplaced) {
+ onBufferReleasedByOutputLocked(output);
+ }
+
+ return res;
+}
+
+String8 Camera3StreamSplitter::getUniqueConsumerName() {
+ static volatile int32_t counter = 0;
+ return String8::format("Camera3StreamSplitter-%d", android_atomic_inc(&counter));
+}
+
+status_t Camera3StreamSplitter::notifyBufferReleased(const sp<GraphicBuffer>& buffer) {
+ ATRACE_CALL();
+ status_t res = OK;
+
+ Mutex::Autolock lock(mMutex);
+
+ uint64_t bufferId = buffer->getId();
+ std::unique_ptr<BufferTracker> tracker_ptr = std::move(mBuffers[bufferId]);
+ mBuffers.erase(bufferId);
+
+ for (const auto surface : tracker_ptr->requestedSurfaces()) {
+ sp<IGraphicBufferProducer>& gbp = mOutputs[surface];
+ OutputSlots& outputSlots = *(mOutputSlots[gbp]);
+ int slot = getSlotForOutputLocked(gbp, buffer);
+ if (slot != BufferItem::INVALID_BUFFER_SLOT) {
+ gbp->detachBuffer(slot);
+ outputSlots[slot].clear();
+ }
+ }
+
+ return res;
+}
+
+status_t Camera3StreamSplitter::attachBufferToOutputs(ANativeWindowBuffer* anb,
+ const std::vector<size_t>& surface_ids) {
+ ATRACE_CALL();
+ status_t res = OK;
+
+ Mutex::Autolock lock(mMutex);
+
+ sp<GraphicBuffer> gb(static_cast<GraphicBuffer*>(anb));
+ uint64_t bufferId = gb->getId();
+
+ // Initialize buffer tracker for this input buffer
+ auto tracker = std::make_unique<BufferTracker>(gb, surface_ids);
+
+ for (auto& surface_id : surface_ids) {
+ sp<IGraphicBufferProducer>& gbp = mOutputs[surface_id];
+ int slot = BufferItem::INVALID_BUFFER_SLOT;
+ //Temporarly Unlock the mutex when trying to attachBuffer to the output
+ //queue, because attachBuffer could block in case of a slow consumer. If
+ //we block while holding the lock, onFrameAvailable and onBufferReleased
+ //will block as well because they need to acquire the same lock.
+ mMutex.unlock();
+ res = gbp->attachBuffer(&slot, gb);
+ mMutex.lock();
+ if (res != OK) {
+ SP_LOGE("%s: Cannot acquireBuffer from GraphicBufferProducer %p: %s (%d)",
+ __FUNCTION__, gbp.get(), strerror(-res), res);
+ return res;
+ }
+ auto& outputSlots = *mOutputSlots[gbp];
+ if (outputSlots[slot] != nullptr) {
+ // If the buffer is attached to a slot which already contains a buffer,
+ // the previous buffer will be removed from the output queue. Decrement
+ // the reference count accordingly.
+ decrementBufRefCountLocked(outputSlots[slot]->getId(), gbp);
+ }
+ SP_LOGV("%s: Attached buffer %p to slot %d on output %p.",__FUNCTION__, gb.get(),
+ slot, gbp.get());
+ outputSlots[slot] = gb;
+ }
+
+ mBuffers[bufferId] = std::move(tracker);
+
+ return res;
+}
+
+void Camera3StreamSplitter::onFrameAvailable(const BufferItem& /*item*/) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ // Acquire and detach the buffer from the input
+ BufferItem bufferItem;
+ status_t res = mConsumer->acquireBuffer(&bufferItem, /* presentWhen */ 0);
+ if (res != NO_ERROR) {
+ SP_LOGE("%s: Acquiring buffer from input failed (%d)", __FUNCTION__, res);
+ mOnFrameAvailableRes.store(res);
+ return;
+ }
+ if (mBuffers.find(bufferItem.mGraphicBuffer->getId()) == mBuffers.end()) {
+ SP_LOGE("%s: Acquired buffer doesn't exist in attached buffer map",
+ __FUNCTION__);
+ mOnFrameAvailableRes.store(INVALID_OPERATION);
+ return;
+ }
+
+ SP_LOGV("acquired buffer %" PRId64 " from input at slot %d",
+ bufferItem.mGraphicBuffer->getId(), bufferItem.mSlot);
+
+ res = mConsumer->detachBuffer(bufferItem.mSlot);
+ if (res != NO_ERROR) {
+ SP_LOGE("%s: detaching buffer from input failed (%d)", __FUNCTION__, res);
+ mOnFrameAvailableRes.store(res);
+ return;
+ }
+
+ // Attach and queue the buffer to each of the outputs
+ BufferTracker& tracker = *(mBuffers[bufferItem.mGraphicBuffer->getId()]);
+
+ SP_LOGV("%s: BufferTracker for buffer %" PRId64 ", number of requests %zu",
+ __FUNCTION__, bufferItem.mGraphicBuffer->getId(), tracker.requestedSurfaces().size());
+ for (const auto id : tracker.requestedSurfaces()) {
+
+ LOG_ALWAYS_FATAL_IF(id >= mOutputs.size(),
+ "requested surface id exceeding max registered ids");
+
+ res = outputBufferLocked(mOutputs[id], bufferItem);
+ if (res != OK) {
+ SP_LOGE("%s: outputBufferLocked failed %d", __FUNCTION__, res);
+ mOnFrameAvailableRes.store(res);
+ // If we fail to send buffer to certain output, keep sending to
+ // other outputs.
+ continue;
+ }
+ }
+
+ mOnFrameAvailableRes.store(res);
+}
+
+void Camera3StreamSplitter::decrementBufRefCountLocked(uint64_t id,
+ const sp<IGraphicBufferProducer>& from) {
+ ATRACE_CALL();
+ size_t referenceCount = mBuffers[id]->decrementReferenceCountLocked();
+
+ removeSlotForOutputLocked(from, mBuffers[id]->getBuffer());
+ if (referenceCount > 0) {
+ return;
+ }
+
+ // We no longer need to track the buffer now that it is being returned to the
+ // input. Note that this should happen before we unlock the mutex and call
+ // releaseBuffer, to avoid the case where the same bufferId is acquired in
+ // attachBufferToOutputs resulting in a new BufferTracker with same bufferId
+ // overwrites the current one.
+ std::unique_ptr<BufferTracker> tracker_ptr = std::move(mBuffers[id]);
+ mBuffers.erase(id);
+
+ // Attach and release the buffer back to the input
+ int consumerSlot = BufferItem::INVALID_BUFFER_SLOT;
+ status_t res = mConsumer->attachBuffer(&consumerSlot, tracker_ptr->getBuffer());
+ if (res != NO_ERROR) {
+ SP_LOGE("%s: attaching buffer to input failed (%d)", __FUNCTION__, res);
+ return;
+ }
+
+ // Temporarily unlock mutex to avoid circular lock:
+ // 1. This function holds splitter lock, calls releaseBuffer which triggers
+ // onBufferReleased in Camera3OutputStream. onBufferReleased waits on the
+ // OutputStream lock
+ // 2. Camera3SharedOutputStream::getBufferLocked calls
+ // attachBufferToOutputs, which holds the stream lock, and waits for the
+ // splitter lock.
+ sp<IGraphicBufferConsumer> consumer(mConsumer);
+ mMutex.unlock();
+ if (consumer != nullptr) {
+ res = consumer->releaseBuffer(consumerSlot, /* frameNumber */ 0,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, tracker_ptr->getMergedFence());
+ } else {
+ SP_LOGE("%s: consumer has become null!", __FUNCTION__);
+ }
+ mMutex.lock();
+ // If the producer of this queue is disconnected, -22 error will occur
+ if (res != NO_ERROR) {
+ SP_LOGE("%s: releaseBuffer returns %d", __FUNCTION__, res);
+ }
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutput(
+ const sp<IGraphicBufferProducer>& from) {
+ ATRACE_CALL();
+ Mutex::Autolock lock(mMutex);
+
+ onBufferReleasedByOutputLocked(from);
+}
+
+void Camera3StreamSplitter::onBufferReleasedByOutputLocked(
+ const sp<IGraphicBufferProducer>& from) {
+ ATRACE_CALL();
+ sp<GraphicBuffer> buffer;
+ sp<Fence> fence;
+ status_t res = from->detachNextBuffer(&buffer, &fence);
+ if (res == NO_INIT) {
+ // If we just discovered that this output has been abandoned, note that,
+ // but we can't do anything else, since buffer is invalid
+ onAbandonedLocked();
+ return;
+ } else if (res == NO_MEMORY) {
+ SP_LOGV("%s: No free buffers", __FUNCTION__);
+ return;
+ } else if (res != OK) {
+ SP_LOGE("%s: detaching buffer from output failed (%d)", __FUNCTION__, res);
+ return;
+ }
+
+ BufferTracker& tracker = *(mBuffers[buffer->getId()]);
+ // Merge the release fence of the incoming buffer so that the fence we send
+ // back to the input includes all of the outputs' fences
+ if (fence != nullptr && fence->isValid()) {
+ tracker.mergeFence(fence);
+ }
+ SP_LOGV("detached buffer %" PRId64 " %p from output %p",
+ buffer->getId(), buffer.get(), from.get());
+
+ // Check to see if this is the last outstanding reference to this buffer
+ decrementBufRefCountLocked(buffer->getId(), from);
+}
+
+void Camera3StreamSplitter::onAbandonedLocked() {
+ // If this is called from binderDied callback, it means the app process
+ // holding the binder has died. CameraService will be notified of the binder
+ // death, and camera device will be closed, which in turn calls
+ // disconnect().
+ //
+ // If this is called from onBufferReleasedByOutput or onFrameAvailable, one
+ // consumer being abanoned shouldn't impact the other consumer. So we won't
+ // stop the buffer flow.
+ //
+ // In both cases, we don't need to do anything here.
+ SP_LOGV("One of my outputs has abandoned me");
+}
+
+int Camera3StreamSplitter::getSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp,
+ const sp<GraphicBuffer>& gb) {
+ auto& outputSlots = *mOutputSlots[gbp];
+
+ for (size_t i = 0; i < outputSlots.size(); i++) {
+ if (outputSlots[i] == gb) {
+ return (int)i;
+ }
+ }
+
+ SP_LOGE("%s: Cannot find slot for gb %p on output %p", __FUNCTION__, gb.get(),
+ gbp.get());
+ return BufferItem::INVALID_BUFFER_SLOT;
+}
+
+status_t Camera3StreamSplitter::removeSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp,
+ const sp<GraphicBuffer>& gb) {
+ auto& outputSlots = *mOutputSlots[gbp];
+
+ for (size_t i = 0; i < outputSlots.size(); i++) {
+ if (outputSlots[i] == gb) {
+ outputSlots[i].clear();
+ return NO_ERROR;
+ }
+ }
+
+ SP_LOGE("%s: Cannot find slot for gb %p on output %p", __FUNCTION__, gb.get(),
+ gbp.get());
+ return BAD_VALUE;
+}
+
+Camera3StreamSplitter::OutputListener::OutputListener(
+ wp<Camera3StreamSplitter> splitter,
+ wp<IGraphicBufferProducer> output)
+ : mSplitter(splitter), mOutput(output) {}
+
+void Camera3StreamSplitter::OutputListener::onBufferReleased() {
+ ATRACE_CALL();
+ sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+ sp<IGraphicBufferProducer> output = mOutput.promote();
+ if (splitter != nullptr && output != nullptr) {
+ splitter->onBufferReleasedByOutput(output);
+ }
+}
+
+void Camera3StreamSplitter::OutputListener::binderDied(const wp<IBinder>& /* who */) {
+ sp<Camera3StreamSplitter> splitter = mSplitter.promote();
+ if (splitter != nullptr) {
+ Mutex::Autolock lock(splitter->mMutex);
+ splitter->onAbandonedLocked();
+ }
+}
+
+Camera3StreamSplitter::BufferTracker::BufferTracker(
+ const sp<GraphicBuffer>& buffer, const std::vector<size_t>& requestedSurfaces)
+ : mBuffer(buffer), mMergedFence(Fence::NO_FENCE), mRequestedSurfaces(requestedSurfaces),
+ mReferenceCount(requestedSurfaces.size()) {}
+
+void Camera3StreamSplitter::BufferTracker::mergeFence(const sp<Fence>& with) {
+ mMergedFence = Fence::merge(String8("Camera3StreamSplitter"), mMergedFence, with);
+}
+
+size_t Camera3StreamSplitter::BufferTracker::decrementReferenceCountLocked() {
+ if (mReferenceCount > 0)
+ --mReferenceCount;
+ return mReferenceCount;
+}
+
+} // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
new file mode 100644
index 0000000..cc623e0
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2014,2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_STREAMSPLITTER_H
+#define ANDROID_SERVERS_STREAMSPLITTER_H
+
+#include <gui/IConsumerListener.h>
+#include <gui/IProducerListener.h>
+#include <gui/BufferItemConsumer.h>
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/StrongPointer.h>
+#include <utils/Timers.h>
+
+#define SP_LOGV(x, ...) ALOGV("[%s] " x, mConsumerName.string(), ##__VA_ARGS__)
+#define SP_LOGI(x, ...) ALOGI("[%s] " x, mConsumerName.string(), ##__VA_ARGS__)
+#define SP_LOGW(x, ...) ALOGW("[%s] " x, mConsumerName.string(), ##__VA_ARGS__)
+#define SP_LOGE(x, ...) ALOGE("[%s] " x, mConsumerName.string(), ##__VA_ARGS__)
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+
+// Camera3StreamSplitter is an autonomous class that manages one input BufferQueue
+// and multiple output BufferQueues. By using the buffer attach and detach logic
+// in BufferQueue, it is able to present the illusion of a single split
+// BufferQueue, where each buffer queued to the input is available to be
+// acquired by each of the outputs, and is able to be dequeued by the input
+// again only once all of the outputs have released it.
+class Camera3StreamSplitter : public BnConsumerListener {
+public:
+
+ // Constructor
+ Camera3StreamSplitter() = default;
+
+ // Connect to the stream splitter by creating buffer queue and connecting it
+ // with output surfaces.
+ status_t connect(const std::vector<sp<Surface> >& surfaces,
+ uint32_t consumerUsage, size_t halMaxBuffers,
+ sp<Surface>* consumer);
+
+ // addOutput adds an output BufferQueue to the splitter. The splitter
+ // connects to outputQueue as a CPU producer, and any buffers queued
+ // to the input will be queued to each output. It is assumed that all of the
+ // outputs are added before any buffers are queued on the input. If any
+ // output is abandoned by its consumer, the splitter will abandon its input
+ // queue (see onAbandoned).
+ //
+ // A return value other than NO_ERROR means that an error has occurred and
+ // outputQueue has not been added to the splitter. BAD_VALUE is returned if
+ // outputQueue is NULL. See IGraphicBufferProducer::connect for explanations
+ // of other error codes.
+ status_t addOutput(const sp<Surface>& outputQueue);
+
+ // Notification that the graphic buffer has been released to the input
+ // BufferQueue. The buffer should be reused by the camera device instead of
+ // queuing to the outputs.
+ status_t notifyBufferReleased(const sp<GraphicBuffer>& buffer);
+
+ // Attach a buffer to the specified outputs. This call reserves a buffer
+ // slot in the output queue.
+ status_t attachBufferToOutputs(ANativeWindowBuffer* anb,
+ const std::vector<size_t>& surface_ids);
+
+ // Get return value of onFrameAvailable to work around problem that
+ // onFrameAvailable is void. This function should be called by the producer
+ // right after calling queueBuffer().
+ status_t getOnFrameAvailableResult();
+
+ // Disconnect the buffer queue from output surfaces.
+ void disconnect();
+
+private:
+ // From IConsumerListener
+ //
+ // During this callback, we store some tracking information, detach the
+ // buffer from the input, and attach it to each of the outputs. This call
+ // can block if there are too many outstanding buffers. If it blocks, it
+ // will resume when onBufferReleasedByOutput releases a buffer back to the
+ // input.
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // From IConsumerListener
+ // We don't care about released buffers because we detach each buffer as
+ // soon as we acquire it. See the comment for onBufferReleased below for
+ // some clarifying notes about the name.
+ void onBuffersReleased() override {}
+
+ // From IConsumerListener
+ // We don't care about sideband streams, since we won't be splitting them
+ void onSidebandStreamChanged() override {}
+
+ // This is the implementation of the onBufferReleased callback from
+ // IProducerListener. It gets called from an OutputListener (see below), and
+ // 'from' is which producer interface from which the callback was received.
+ //
+ // During this callback, we detach the buffer from the output queue that
+ // generated the callback, update our state tracking to see if this is the
+ // last output releasing the buffer, and if so, release it to the input.
+ // If we release the buffer to the input, we allow a blocked
+ // onFrameAvailable call to proceed.
+ void onBufferReleasedByOutput(const sp<IGraphicBufferProducer>& from);
+
+ // This is the implementation of onBufferReleasedByOutput without the mutex locked.
+ // It could either be called from onBufferReleasedByOutput or from
+ // onFrameAvailable when a buffer in the async buffer queue is overwritten.
+ void onBufferReleasedByOutputLocked(const sp<IGraphicBufferProducer>& from);
+
+ // When this is called, the splitter disconnects from (i.e., abandons) its
+ // input queue and signals any waiting onFrameAvailable calls to wake up.
+ // It still processes callbacks from other outputs, but only detaches their
+ // buffers so they can continue operating until they run out of buffers to
+ // acquire. This must be called with mMutex locked.
+ void onAbandonedLocked();
+
+ // Decrement the buffer's reference count. Once the reference count becomes
+ // 0, return the buffer back to the input BufferQueue.
+ void decrementBufRefCountLocked(uint64_t id, const sp<IGraphicBufferProducer>& from);
+
+ // This is a thin wrapper class that lets us determine which BufferQueue
+ // the IProducerListener::onBufferReleased callback is associated with. We
+ // create one of these per output BufferQueue, and then pass the producer
+ // into onBufferReleasedByOutput above.
+ class OutputListener : public BnProducerListener,
+ public IBinder::DeathRecipient {
+ public:
+ OutputListener(wp<Camera3StreamSplitter> splitter,
+ wp<IGraphicBufferProducer> output);
+ virtual ~OutputListener() = default;
+
+ // From IProducerListener
+ void onBufferReleased() override;
+
+ // From IBinder::DeathRecipient
+ void binderDied(const wp<IBinder>& who) override;
+
+ private:
+ wp<Camera3StreamSplitter> mSplitter;
+ wp<IGraphicBufferProducer> mOutput;
+ };
+
+ class BufferTracker {
+ public:
+ BufferTracker(const sp<GraphicBuffer>& buffer,
+ const std::vector<size_t>& requestedSurfaces);
+ ~BufferTracker() = default;
+
+ const sp<GraphicBuffer>& getBuffer() const { return mBuffer; }
+ const sp<Fence>& getMergedFence() const { return mMergedFence; }
+
+ void mergeFence(const sp<Fence>& with);
+
+ // Returns the new value
+ // Only called while mMutex is held
+ size_t decrementReferenceCountLocked();
+
+ const std::vector<size_t> requestedSurfaces() const { return mRequestedSurfaces; }
+
+ private:
+
+ // Disallow copying
+ BufferTracker(const BufferTracker& other);
+ BufferTracker& operator=(const BufferTracker& other);
+
+ sp<GraphicBuffer> mBuffer; // One instance that holds this native handle
+ sp<Fence> mMergedFence;
+
+ // Request surfaces for a particular buffer. And when the buffer becomes
+ // available from the input queue, the registered surfaces are used to decide
+ // which output is the buffer sent to.
+ std::vector<size_t> mRequestedSurfaces;
+ size_t mReferenceCount;
+ };
+
+ // Must be accessed through RefBase
+ virtual ~Camera3StreamSplitter();
+
+ status_t addOutputLocked(const sp<Surface>& outputQueue);
+
+ // Send a buffer to particular output, and increment the reference count
+ // of the buffer. If this output is abandoned, the buffer's reference count
+ // won't be incremented.
+ status_t outputBufferLocked(const sp<IGraphicBufferProducer>& output,
+ const BufferItem& bufferItem);
+
+ // Get unique name for the buffer queue consumer
+ String8 getUniqueConsumerName();
+
+ // Helper function to get the BufferQueue slot where a particular buffer is attached to.
+ int getSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp,
+ const sp<GraphicBuffer>& gb);
+ // Helper function to remove the buffer from the BufferQueue slot
+ status_t removeSlotForOutputLocked(const sp<IGraphicBufferProducer>& gbp,
+ const sp<GraphicBuffer>& gb);
+
+
+ // Sum of max consumer buffers for all outputs
+ size_t mMaxConsumerBuffers = 0;
+ size_t mMaxHalBuffers = 0;
+
+ static const nsecs_t kDequeueBufferTimeout = s2ns(1); // 1 sec
+
+ Mutex mMutex;
+
+ sp<IGraphicBufferProducer> mProducer;
+ sp<IGraphicBufferConsumer> mConsumer;
+ sp<BufferItemConsumer> mBufferItemConsumer;
+ sp<Surface> mSurface;
+
+ std::vector<sp<IGraphicBufferProducer> > mOutputs;
+ // Map of GraphicBuffer IDs (GraphicBuffer::getId()) to buffer tracking
+ // objects (which are mostly for counting how many outputs have released the
+ // buffer, but also contain merged release fences).
+ std::unordered_map<uint64_t, std::unique_ptr<BufferTracker> > mBuffers;
+
+ struct GBPHash {
+ std::size_t operator()(const sp<IGraphicBufferProducer>& producer) const {
+ return std::hash<IGraphicBufferProducer *>{}(producer.get());
+ }
+ };
+
+ std::unordered_map<sp<IGraphicBufferProducer>, sp<OutputListener>,
+ GBPHash> mNotifiers;
+
+ typedef std::vector<sp<GraphicBuffer>> OutputSlots;
+ std::unordered_map<sp<IGraphicBufferProducer>, std::unique_ptr<OutputSlots>,
+ GBPHash> mOutputSlots;
+
+ // Latest onFrameAvailable return value
+ std::atomic<status_t> mOnFrameAvailableRes{0};
+
+ String8 mConsumerName;
+};
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
deleted file mode 100644
index ea138b7..0000000
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera3-ZslStream"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-
-#include <inttypes.h>
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-#include "Camera3ZslStream.h"
-
-typedef android::RingBufferConsumer::PinnedBufferItem PinnedBufferItem;
-
-namespace android {
-
-namespace camera3 {
-
-namespace {
-struct TimestampFinder : public RingBufferConsumer::RingBufferComparator {
- typedef RingBufferConsumer::BufferInfo BufferInfo;
-
- enum {
- SELECT_I1 = -1,
- SELECT_I2 = 1,
- SELECT_NEITHER = 0,
- };
-
- explicit TimestampFinder(nsecs_t timestamp) : mTimestamp(timestamp) {}
- ~TimestampFinder() {}
-
- template <typename T>
- static void swap(T& a, T& b) {
- T tmp = a;
- a = b;
- b = tmp;
- }
-
- /**
- * Try to find the best candidate for a ZSL buffer.
- * Match priority from best to worst:
- * 1) Timestamps match.
- * 2) Timestamp is closest to the needle (and lower).
- * 3) Timestamp is closest to the needle (and higher).
- *
- */
- virtual int compare(const BufferInfo *i1,
- const BufferInfo *i2) const {
- // Try to select non-null object first.
- if (i1 == NULL) {
- return SELECT_I2;
- } else if (i2 == NULL) {
- return SELECT_I1;
- }
-
- // Best result: timestamp is identical
- if (i1->mTimestamp == mTimestamp) {
- return SELECT_I1;
- } else if (i2->mTimestamp == mTimestamp) {
- return SELECT_I2;
- }
-
- const BufferInfo* infoPtrs[2] = {
- i1,
- i2
- };
- int infoSelectors[2] = {
- SELECT_I1,
- SELECT_I2
- };
-
- // Order i1,i2 so that always i1.timestamp < i2.timestamp
- if (i1->mTimestamp > i2->mTimestamp) {
- swap(infoPtrs[0], infoPtrs[1]);
- swap(infoSelectors[0], infoSelectors[1]);
- }
-
- // Second best: closest (lower) timestamp
- if (infoPtrs[1]->mTimestamp < mTimestamp) {
- return infoSelectors[1];
- } else if (infoPtrs[0]->mTimestamp < mTimestamp) {
- return infoSelectors[0];
- }
-
- // Worst: closest (higher) timestamp
- return infoSelectors[0];
-
- /**
- * The above cases should cover all the possibilities,
- * and we get an 'empty' result only if the ring buffer
- * was empty itself
- */
- }
-
- const nsecs_t mTimestamp;
-}; // struct TimestampFinder
-} // namespace anonymous
-
-Camera3ZslStream::Camera3ZslStream(int id, uint32_t width, uint32_t height,
- int bufferCount) :
- Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL,
- width, height,
- HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
- HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0) {
-
- sp<IGraphicBufferProducer> producer;
- sp<IGraphicBufferConsumer> consumer;
- BufferQueue::createBufferQueue(&producer, &consumer);
- mProducer = new RingBufferConsumer(consumer, GRALLOC_USAGE_HW_CAMERA_ZSL, bufferCount);
- mProducer->setName(String8("Camera2-ZslRingBufferConsumer"));
- mConsumer = new Surface(producer);
-}
-
-Camera3ZslStream::~Camera3ZslStream() {
-}
-
-status_t Camera3ZslStream::getInputBufferLocked(camera3_stream_buffer *buffer) {
- ATRACE_CALL();
-
- status_t res;
-
- // TODO: potentially register from inputBufferLocked
- // this should be ok, registerBuffersLocked only calls getBuffer for now
- // register in output mode instead of input mode for ZSL streams.
- if (mState == STATE_IN_CONFIG || mState == STATE_IN_RECONFIG) {
- ALOGE("%s: Stream %d: Buffer registration for input streams"
- " not implemented (state %d)",
- __FUNCTION__, mId, mState);
- return INVALID_OPERATION;
- }
-
- if ((res = getBufferPreconditionCheckLocked()) != OK) {
- return res;
- }
-
- ANativeWindowBuffer* anb;
- int fenceFd;
-
- assert(mProducer != 0);
-
- sp<PinnedBufferItem> bufferItem;
- {
- List<sp<RingBufferConsumer::PinnedBufferItem> >::iterator it, end;
- it = mInputBufferQueue.begin();
- end = mInputBufferQueue.end();
-
- // Need to call enqueueInputBufferByTimestamp as a prerequisite
- if (it == end) {
- ALOGE("%s: Stream %d: No input buffer was queued",
- __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
- bufferItem = *it;
- mInputBufferQueue.erase(it);
- }
-
- anb = bufferItem->getBufferItem().mGraphicBuffer->getNativeBuffer();
- assert(anb != NULL);
- fenceFd = bufferItem->getBufferItem().mFence->dup();
-
- /**
- * FenceFD now owned by HAL except in case of error,
- * in which case we reassign it to acquire_fence
- */
- handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
- /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/false);
-
- mBuffersInFlight.push_back(bufferItem);
-
- return OK;
-}
-
-status_t Camera3ZslStream::returnBufferCheckedLocked(
- const camera3_stream_buffer &buffer,
- nsecs_t timestamp,
- bool output,
- /*out*/
- sp<Fence> *releaseFenceOut) {
-
- if (output) {
- // Output stream path
- return Camera3OutputStream::returnBufferCheckedLocked(buffer,
- timestamp,
- output,
- releaseFenceOut);
- }
-
- /**
- * Input stream path
- */
- bool bufferFound = false;
- sp<PinnedBufferItem> bufferItem;
- {
- // Find the buffer we are returning
- Vector<sp<PinnedBufferItem> >::iterator it, end;
- for (it = mBuffersInFlight.begin(), end = mBuffersInFlight.end();
- it != end;
- ++it) {
-
- const sp<PinnedBufferItem>& tmp = *it;
- ANativeWindowBuffer *anb =
- tmp->getBufferItem().mGraphicBuffer->getNativeBuffer();
- if (anb != NULL && &(anb->handle) == buffer.buffer) {
- bufferFound = true;
- bufferItem = tmp;
- mBuffersInFlight.erase(it);
- break;
- }
- }
- }
- if (!bufferFound) {
- ALOGE("%s: Stream %d: Can't return buffer that wasn't sent to HAL",
- __FUNCTION__, mId);
- return INVALID_OPERATION;
- }
-
- int releaseFenceFd = buffer.release_fence;
-
- if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
- if (buffer.release_fence != -1) {
- ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when "
- "there is an error", __FUNCTION__, mId, buffer.release_fence);
- close(buffer.release_fence);
- }
-
- /**
- * Reassign release fence as the acquire fence incase of error
- */
- releaseFenceFd = buffer.acquire_fence;
- }
-
- /**
- * Unconditionally return buffer to the buffer queue.
- * - Fwk takes over the release_fence ownership
- */
- sp<Fence> releaseFence = new Fence(releaseFenceFd);
- bufferItem->getBufferItem().mFence = releaseFence;
- bufferItem.clear(); // dropping last reference unpins buffer
-
- *releaseFenceOut = releaseFence;
-
- return OK;
-}
-
-status_t Camera3ZslStream::returnInputBufferLocked(
- const camera3_stream_buffer &buffer) {
- ATRACE_CALL();
-
- status_t res = returnAnyBufferLocked(buffer, /*timestamp*/0,
- /*output*/false);
-
- return res;
-}
-
-void Camera3ZslStream::dump(int fd, const Vector<String16> &args) const {
- (void) args;
-
- String8 lines;
- lines.appendFormat(" Stream[%d]: ZSL\n", mId);
- write(fd, lines.string(), lines.size());
-
- Camera3IOStreamBase::dump(fd, args);
-
- lines = String8();
- lines.appendFormat(" Input buffers pending: %zu, in flight %zu\n",
- mInputBufferQueue.size(), mBuffersInFlight.size());
- write(fd, lines.string(), lines.size());
-}
-
-status_t Camera3ZslStream::enqueueInputBufferByTimestamp(
- nsecs_t timestamp,
- nsecs_t* actualTimestamp) {
-
- Mutex::Autolock l(mLock);
-
- TimestampFinder timestampFinder = TimestampFinder(timestamp);
-
- sp<RingBufferConsumer::PinnedBufferItem> pinnedBuffer =
- mProducer->pinSelectedBuffer(timestampFinder,
- /*waitForFence*/false);
-
- if (pinnedBuffer == 0) {
- ALOGE("%s: No ZSL buffers were available yet", __FUNCTION__);
- return NO_BUFFER_AVAILABLE;
- }
-
- nsecs_t actual = pinnedBuffer->getBufferItem().mTimestamp;
-
- if (actual != timestamp) {
- // TODO: this is problematic, we'll end up with using wrong result for this pinned buffer.
- ALOGW("%s: ZSL buffer candidate search didn't find an exact match --"
- " requested timestamp = %" PRId64 ", actual timestamp = %" PRId64,
- __FUNCTION__, timestamp, actual);
- }
-
- mInputBufferQueue.push_back(pinnedBuffer);
-
- if (actualTimestamp != NULL) {
- *actualTimestamp = actual;
- }
-
- return OK;
-}
-
-status_t Camera3ZslStream::clearInputRingBuffer(nsecs_t* latestTimestamp) {
- Mutex::Autolock l(mLock);
-
- return clearInputRingBufferLocked(latestTimestamp);
-}
-
-status_t Camera3ZslStream::clearInputRingBufferLocked(nsecs_t* latestTimestamp) {
-
- if (latestTimestamp) {
- *latestTimestamp = mProducer->getLatestTimestamp();
- }
- mInputBufferQueue.clear();
-
- return mProducer->clear();
-}
-
-status_t Camera3ZslStream::disconnectLocked() {
- clearInputRingBufferLocked(NULL);
-
- return Camera3OutputStream::disconnectLocked();
-}
-
-status_t Camera3ZslStream::setTransform(int /*transform*/) {
- ALOGV("%s: Not implemented", __FUNCTION__);
- return INVALID_OPERATION;
-}
-
-}; // namespace camera3
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.h b/services/camera/libcameraservice/device3/Camera3ZslStream.h
deleted file mode 100644
index 12369cf..0000000
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA3_ZSL_STREAM_H
-#define ANDROID_SERVERS_CAMERA3_ZSL_STREAM_H
-
-#include <utils/RefBase.h>
-#include <gui/Surface.h>
-#include <gui/RingBufferConsumer.h>
-
-#include "Camera3OutputStream.h"
-
-namespace android {
-
-namespace camera3 {
-
-/**
- * A class for managing a single opaque ZSL stream to/from the camera device.
- * This acts as a bidirectional stream at the HAL layer, caching and discarding
- * most output buffers, and when directed, pushes a buffer back to the HAL for
- * processing.
- */
-class Camera3ZslStream :
- public Camera3OutputStream {
- public:
- /**
- * Set up a ZSL stream of a given resolution. bufferCount is the number of buffers
- * cached within the stream that can be retrieved for input.
- */
- Camera3ZslStream(int id, uint32_t width, uint32_t height, int bufferCount);
- ~Camera3ZslStream();
-
- virtual void dump(int fd, const Vector<String16> &args) const;
-
- enum { NO_BUFFER_AVAILABLE = BufferQueue::NO_BUFFER_AVAILABLE };
-
- /**
- * Locate a buffer matching this timestamp in the RingBufferConsumer,
- * and mark it to be queued at the next getInputBufferLocked invocation.
- *
- * Errors: Returns NO_BUFFER_AVAILABLE if we could not find a match.
- *
- */
- status_t enqueueInputBufferByTimestamp(nsecs_t timestamp,
- nsecs_t* actualTimestamp);
-
- /**
- * Clears the buffers that can be used by enqueueInputBufferByTimestamp
- * latestTimestamp will be filled with the largest timestamp of buffers
- * being cleared, 0 if there is no buffer being clear.
- */
- status_t clearInputRingBuffer(nsecs_t* latestTimestamp);
-
- protected:
-
- /**
- * Camera3OutputStreamInterface implementation
- */
- status_t setTransform(int transform);
-
- private:
-
- // Input buffers pending to be queued into HAL
- List<sp<RingBufferConsumer::PinnedBufferItem> > mInputBufferQueue;
- sp<RingBufferConsumer> mProducer;
-
- // Input buffers in flight to HAL
- Vector<sp<RingBufferConsumer::PinnedBufferItem> > mBuffersInFlight;
-
- /**
- * Camera3Stream interface
- */
-
- // getInputBuffer/returnInputBuffer operate the input stream side of the
- // ZslStream.
- virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer);
- virtual status_t returnInputBufferLocked(
- const camera3_stream_buffer &buffer);
-
- // Actual body to return either input or output buffers
- virtual status_t returnBufferCheckedLocked(
- const camera3_stream_buffer &buffer,
- nsecs_t timestamp,
- bool output,
- /*out*/
- sp<Fence> *releaseFenceOut);
-
- // Disconnet the Camera3ZslStream specific bufferQueues.
- virtual status_t disconnectLocked();
-
- status_t clearInputRingBufferLocked(nsecs_t* latestTimestamp);
-
-}; // class Camera3ZslStream
-
-}; // namespace camera3
-
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.h b/services/camera/libcameraservice/gui/RingBufferConsumer.h
index 28dc5d5..2bafe4a 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.h
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.h
@@ -19,18 +19,16 @@
#include <gui/BufferItem.h>
#include <gui/ConsumerBase.h>
+#include <gui/BufferQueue.h>
-#include <ui/GraphicBuffer.h>
-
-#include <utils/String8.h>
-#include <utils/Vector.h>
-#include <utils/threads.h>
#include <utils/List.h>
#define ANDROID_GRAPHICS_RINGBUFFERCONSUMER_JNI_ID "mRingBufferConsumer"
namespace android {
+class String8;
+
/**
* The RingBufferConsumer maintains a ring buffer of BufferItem objects,
* (which are 'acquired' as long as they are part of the ring buffer, and
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
new file mode 100644
index 0000000..37a05c2
--- /dev/null
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -0,0 +1,42 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= $(call all-cpp-files-under, .)
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libcameraservice \
+ libhidlbase \
+ liblog \
+ libhidltransport \
+ libcamera_client \
+ libcamera_metadata \
+ libutils \
+ android.hardware.camera.common@1.0 \
+ android.hardware.camera.provider@2.4 \
+ android.hardware.camera.device@1.0 \
+ android.hardware.camera.device@3.2
+
+LOCAL_C_INCLUDES += \
+ system/media/private/camera/include \
+
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
+LOCAL_MODULE:= cameraservice_test
+LOCAL_MODULE_TAGS := tests
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
new file mode 100644
index 0000000..c1d6e85
--- /dev/null
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "CameraProviderManagerTest"
+
+#include "../common/CameraProviderManager.h"
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <android/hidl/manager/1.0/IServiceNotification.h>
+#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
+#include <camera_metadata_hidden.h>
+#include <gtest/gtest.h>
+
+using namespace android;
+using namespace android::hardware::camera;
+using android::hardware::camera::common::V1_0::Status;
+using android::hardware::camera::common::V1_0::VendorTag;
+using android::hardware::camera::common::V1_0::VendorTagSection;
+using android::hardware::camera::common::V1_0::CameraMetadataType;
+using android::hardware::camera::device::V3_2::ICameraDeviceCallback;
+using android::hardware::camera::device::V3_2::ICameraDeviceSession;
+
+/**
+ * Basic test implementation of a camera ver. 3.2 device interface
+ */
+struct TestDeviceInterface : public device::V3_2::ICameraDevice {
+ std::vector<hardware::hidl_string> mDeviceNames;
+ TestDeviceInterface(std::vector<hardware::hidl_string> deviceNames) :
+ mDeviceNames(deviceNames) {}
+ using getResourceCost_cb = std::function<void(
+ hardware::camera::common::V1_0::Status status,
+ const hardware::camera::common::V1_0::CameraResourceCost& resourceCost)>;
+ virtual ::android::hardware::Return<void> getResourceCost(
+ getResourceCost_cb _hidl_cb) override {
+ hardware::camera::common::V1_0::CameraResourceCost resourceCost = {100,
+ mDeviceNames};
+ _hidl_cb(Status::OK, resourceCost);
+ return hardware::Void();
+ }
+
+ using getCameraCharacteristics_cb = std::function<void(
+ hardware::camera::common::V1_0::Status status,
+ const hardware::hidl_vec<uint8_t>& cameraCharacteristics)>;
+ hardware::Return<void> getCameraCharacteristics(
+ getCameraCharacteristics_cb _hidl_cb) override {
+ hardware::hidl_vec<uint8_t> cameraCharacteristics;
+ _hidl_cb(Status::OK, cameraCharacteristics);
+ return hardware::Void();
+ }
+
+ hardware::Return<hardware::camera::common::V1_0::Status> setTorchMode(
+ ::android::hardware::camera::common::V1_0::TorchMode) override {
+ return Status::OK;
+ }
+
+ using open_cb = std::function<void(
+ ::android::hardware::camera::common::V1_0::Status status,
+ const ::android::sp<ICameraDeviceSession>& session)>;
+ hardware::Return<void> open(
+ const ::android::sp<ICameraDeviceCallback>&,
+ open_cb _hidl_cb) override {
+ sp<ICameraDeviceSession> deviceSession = nullptr;
+ _hidl_cb(Status::OK, deviceSession);
+ return hardware::Void();
+ }
+
+ hardware::Return<void> dumpState(
+ const ::android::hardware::hidl_handle&) override {
+ return hardware::Void();
+ }
+};
+
+/**
+ * Basic test implementation of a camera provider
+ */
+struct TestICameraProvider : virtual public provider::V2_4::ICameraProvider {
+ sp<provider::V2_4::ICameraProviderCallback> mCallbacks;
+ std::vector<hardware::hidl_string> mDeviceNames;
+ sp<device::V3_2::ICameraDevice> mDeviceInterface;
+ hardware::hidl_vec<common::V1_0::VendorTagSection> mVendorTagSections;
+
+ TestICameraProvider(const std::vector<hardware::hidl_string> &devices,
+ const hardware::hidl_vec<common::V1_0::VendorTagSection> &vendorSection) :
+ mDeviceNames(devices),
+ mDeviceInterface(new TestDeviceInterface(devices)),
+ mVendorTagSections (vendorSection) {}
+
+ virtual hardware::Return<Status> setCallback(
+ const sp<provider::V2_4::ICameraProviderCallback>& callbacks) override {
+ mCallbacks = callbacks;
+ return hardware::Return<Status>(Status::OK);
+ }
+
+ using getVendorTags_cb = std::function<void(Status status,
+ const hardware::hidl_vec<common::V1_0::VendorTagSection>& sections)>;
+ hardware::Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
+ _hidl_cb(Status::OK, mVendorTagSections);
+ return hardware::Void();
+ }
+
+ using isSetTorchModeSupported_cb = std::function<void(
+ ::android::hardware::camera::common::V1_0::Status status,
+ bool support)>;
+ virtual ::hardware::Return<void> isSetTorchModeSupported(
+ isSetTorchModeSupported_cb _hidl_cb) override {
+ _hidl_cb(Status::OK, false);
+ return hardware::Void();
+ }
+
+ using getCameraIdList_cb = std::function<void(Status status,
+ const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames)>;
+ virtual hardware::Return<void> getCameraIdList(getCameraIdList_cb _hidl_cb) override {
+ _hidl_cb(Status::OK, mDeviceNames);
+ return hardware::Void();
+ }
+
+ using getCameraDeviceInterface_V1_x_cb = std::function<void(Status status,
+ const sp<device::V1_0::ICameraDevice>& device)>;
+ virtual hardware::Return<void> getCameraDeviceInterface_V1_x(
+ const hardware::hidl_string& cameraDeviceName,
+ getCameraDeviceInterface_V1_x_cb _hidl_cb) override {
+ (void) cameraDeviceName;
+ _hidl_cb(Status::OK, nullptr); //TODO: impl. of ver. 1.0 device interface
+ // otherwise enumeration will fail.
+ return hardware::Void();
+ }
+
+ using getCameraDeviceInterface_V3_x_cb = std::function<void(Status status,
+ const sp<device::V3_2::ICameraDevice>& device)>;
+ virtual hardware::Return<void> getCameraDeviceInterface_V3_x(
+ const hardware::hidl_string&,
+ getCameraDeviceInterface_V3_x_cb _hidl_cb) override {
+ _hidl_cb(Status::OK, mDeviceInterface);
+ return hardware::Void();
+ }
+
+};
+
+/**
+ * Simple test version of the interaction proxy, to use to inject onRegistered calls to the
+ * CameraProviderManager
+ */
+struct TestInteractionProxy : public CameraProviderManager::ServiceInteractionProxy {
+ sp<hidl::manager::V1_0::IServiceNotification> mManagerNotificationInterface;
+ sp<TestICameraProvider> mTestCameraProvider;
+
+ TestInteractionProxy() {}
+ void setProvider(sp<TestICameraProvider> provider) {
+ mTestCameraProvider = provider;
+ }
+
+ std::string mLastRequestedServiceName;
+
+ virtual ~TestInteractionProxy() {}
+
+ virtual bool registerForNotifications(
+ const std::string &serviceName,
+ const sp<hidl::manager::V1_0::IServiceNotification> ¬ification) override {
+ (void) serviceName;
+ mManagerNotificationInterface = notification;
+ return true;
+ }
+
+ virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
+ const std::string &serviceName) override {
+ mLastRequestedServiceName = serviceName;
+ return mTestCameraProvider;
+ }
+
+};
+
+struct TestStatusListener : public CameraProviderManager::StatusListener {
+ ~TestStatusListener() {}
+
+ void onDeviceStatusChanged(const String8 &,
+ hardware::camera::common::V1_0::CameraDeviceStatus) override {}
+ void onTorchStatusChanged(const String8 &,
+ hardware::camera::common::V1_0::TorchModeStatus) override {}
+};
+
+TEST(CameraProviderManagerTest, InitializeTest) {
+ std::vector<hardware::hidl_string> deviceNames;
+ deviceNames.push_back("device@3.2/test/0");
+ deviceNames.push_back("device@1.0/test/0");
+ deviceNames.push_back("device@3.2/test/1");
+ hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+ status_t res;
+ sp<CameraProviderManager> providerManager = new CameraProviderManager();
+ sp<TestStatusListener> statusListener = new TestStatusListener();
+ TestInteractionProxy serviceProxy;
+ sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+ vendorSection);
+ serviceProxy.setProvider(provider);
+
+ res = providerManager->initialize(statusListener, &serviceProxy);
+ ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+ hardware::hidl_string legacyInstanceName = "legacy/0";
+ ASSERT_EQ(serviceProxy.mLastRequestedServiceName, legacyInstanceName) <<
+ "Legacy instance not requested from service manager";
+
+ hardware::hidl_string testProviderFqInterfaceName =
+ "android.hardware.camera.provider@2.4::ICameraProvider";
+ hardware::hidl_string testProviderInstanceName = "test/0";
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName,
+ testProviderInstanceName, false);
+
+ ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
+ "Incorrect instance requested from service manager";
+}
+
+TEST(CameraProviderManagerTest, MultipleVendorTagTest) {
+ hardware::hidl_string sectionName = "VendorTestSection";
+ hardware::hidl_string tagName = "VendorTestTag";
+ uint32_t tagId = VENDOR_SECTION << 16;
+ hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+ CameraMetadataType tagType = CameraMetadataType::BYTE;
+ vendorSection.resize(1);
+ vendorSection[0].sectionName = sectionName;
+ vendorSection[0].tags.resize(1);
+ vendorSection[0].tags[0].tagId = tagId;
+ vendorSection[0].tags[0].tagName = tagName;
+ vendorSection[0].tags[0].tagType = tagType;
+ std::vector<hardware::hidl_string> deviceNames = {"device@3.2/test/0"};
+
+ sp<CameraProviderManager> providerManager = new CameraProviderManager();
+ sp<TestStatusListener> statusListener = new TestStatusListener();
+ TestInteractionProxy serviceProxy;
+
+ sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+ vendorSection);
+ serviceProxy.setProvider(provider);
+
+ auto res = providerManager->initialize(statusListener, &serviceProxy);
+ ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+ hardware::hidl_string testProviderInstanceName = "test/0";
+ hardware::hidl_string testProviderFqInterfaceName =
+ "android.hardware.camera.provider@2.4::ICameraProvider";
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName, testProviderInstanceName, false);
+ ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
+ "Incorrect instance requested from service manager";
+
+ hardware::hidl_string sectionNameSecond = "SecondVendorTestSection";
+ hardware::hidl_string secondTagName = "SecondVendorTestTag";
+ CameraMetadataType secondTagType = CameraMetadataType::DOUBLE;
+ vendorSection[0].sectionName = sectionNameSecond;
+ vendorSection[0].tags[0].tagId = tagId;
+ vendorSection[0].tags[0].tagName = secondTagName;
+ vendorSection[0].tags[0].tagType = secondTagType;
+ deviceNames = {"device@3.2/test2/1"};
+
+ sp<TestICameraProvider> secondProvider = new TestICameraProvider(
+ deviceNames, vendorSection);
+ serviceProxy.setProvider(secondProvider);
+ hardware::hidl_string testProviderSecondInstanceName = "test2/0";
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName, testProviderSecondInstanceName, false);
+ ASSERT_EQ(serviceProxy.mLastRequestedServiceName,
+ testProviderSecondInstanceName) <<
+ "Incorrect instance requested from service manager";
+
+ ASSERT_EQ(NO_ERROR , providerManager->setUpVendorTags());
+ sp<VendorTagDescriptorCache> vendorCache =
+ VendorTagDescriptorCache::getGlobalVendorTagCache();
+ ASSERT_NE(nullptr, vendorCache.get());
+
+ metadata_vendor_id_t vendorId = std::hash<std::string> {} (
+ testProviderInstanceName.c_str());
+ metadata_vendor_id_t vendorIdSecond = std::hash<std::string> {} (
+ testProviderSecondInstanceName.c_str());
+
+ hardware::hidl_string resultTag = vendorCache->getTagName(tagId, vendorId);
+ ASSERT_EQ(resultTag, tagName);
+
+ resultTag = vendorCache->getTagName(tagId, vendorIdSecond);
+ ASSERT_EQ(resultTag, secondTagName);
+
+ // Check whether we can create two separate CameraMetadata instances
+ // using different tag vendor vendors.
+ camera_metadata *metaBuffer = allocate_camera_metadata(10, 20);
+ ASSERT_NE(nullptr, metaBuffer);
+ set_camera_metadata_vendor_id(metaBuffer, vendorId);
+ CameraMetadata metadata(metaBuffer);
+
+ uint8_t byteVal = 10;
+ ASSERT_TRUE(metadata.isEmpty());
+ ASSERT_EQ(OK, metadata.update(tagId, &byteVal, 1));
+ ASSERT_FALSE(metadata.isEmpty());
+ ASSERT_TRUE(metadata.exists(tagId));
+
+ metaBuffer = allocate_camera_metadata(10, 20);
+ ASSERT_NE(nullptr, metaBuffer);
+ set_camera_metadata_vendor_id(metaBuffer, vendorIdSecond);
+ CameraMetadata secondMetadata(metaBuffer);
+
+ ASSERT_TRUE(secondMetadata.isEmpty());
+ double doubleVal = 1.0f;
+ ASSERT_EQ(OK, secondMetadata.update(tagId, &doubleVal, 1));
+ ASSERT_FALSE(secondMetadata.isEmpty());
+ ASSERT_TRUE(secondMetadata.exists(tagId));
+
+ // Check whether CameraMetadata copying works as expected
+ CameraMetadata metadataCopy(metadata);
+ ASSERT_FALSE(metadataCopy.isEmpty());
+ ASSERT_TRUE(metadataCopy.exists(tagId));
+ ASSERT_EQ(OK, metadataCopy.update(tagId, &byteVal, 1));
+ ASSERT_TRUE(metadataCopy.exists(tagId));
+
+ // Check whether values are as expected
+ camera_metadata_entry_t entry = metadata.find(tagId);
+ ASSERT_EQ(1u, entry.count);
+ ASSERT_EQ(byteVal, entry.data.u8[0]);
+ entry = secondMetadata.find(tagId);
+ ASSERT_EQ(1u, entry.count);
+ ASSERT_EQ(doubleVal, entry.data.d[0]);
+
+ // Swap and erase
+ secondMetadata.swap(metadataCopy);
+ ASSERT_TRUE(metadataCopy.exists(tagId));
+ ASSERT_TRUE(secondMetadata.exists(tagId));
+ ASSERT_EQ(OK, secondMetadata.erase(tagId));
+ ASSERT_TRUE(secondMetadata.isEmpty());
+ doubleVal = 0.0f;
+ ASSERT_EQ(OK, metadataCopy.update(tagId, &doubleVal, 1));
+ entry = metadataCopy.find(tagId);
+ ASSERT_EQ(1u, entry.count);
+ ASSERT_EQ(doubleVal, entry.data.d[0]);
+
+ // Append
+ uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_ACTION;
+ secondMetadata.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+ // Append from two different vendor tag providers is not supported!
+ ASSERT_NE(OK, metadataCopy.append(secondMetadata));
+ ASSERT_EQ(OK, metadataCopy.erase(tagId));
+ metadataCopy.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+ // However appending from same vendor tag provider should be fine
+ ASSERT_EQ(OK, metadata.append(secondMetadata));
+ // Append from a metadata without vendor tag provider should be supported
+ CameraMetadata regularMetadata(10, 20);
+ uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+ regularMetadata.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+ ASSERT_EQ(OK, secondMetadata.append(regularMetadata));
+ ASSERT_EQ(2u, secondMetadata.entryCount());
+ ASSERT_EQ(2u, metadata.entryCount());
+
+ // Dump
+ metadata.dump(1, 2);
+ metadataCopy.dump(1, 2);
+ secondMetadata.dump(1, 2);
+}
diff --git a/services/camera/libcameraservice/utils/CameraTraces.cpp b/services/camera/libcameraservice/utils/CameraTraces.cpp
index 374dc5e..0198690 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.cpp
+++ b/services/camera/libcameraservice/utils/CameraTraces.cpp
@@ -74,7 +74,7 @@
return BAD_VALUE;
}
- dprintf(fd, "Camera traces (%zu):\n", pcsList.size());
+ dprintf(fd, "== Camera error traces (%zu): ==\n", pcsList.size());
if (pcsList.empty()) {
dprintf(fd, " No camera traces collected.\n");
diff --git a/services/camera/libcameraservice/utils/ClientManager.h b/services/camera/libcameraservice/utils/ClientManager.h
index 830c50b..d7135f1 100644
--- a/services/camera/libcameraservice/utils/ClientManager.h
+++ b/services/camera/libcameraservice/utils/ClientManager.h
@@ -31,6 +31,43 @@
namespace android {
namespace resource_policy {
+class ClientPriority {
+public:
+ ClientPriority(int32_t score, int32_t state) :
+ mScore(score), mState(state) {}
+
+ int32_t getScore() const { return mScore; }
+ int32_t getState() const { return mState; }
+
+ bool operator==(const ClientPriority& rhs) const {
+ return (this->mScore == rhs.mScore) && (this->mState == rhs.mState);
+ }
+
+ bool operator< (const ClientPriority& rhs) const {
+ if (this->mScore == rhs.mScore) {
+ return this->mState < rhs.mState;
+ } else {
+ return this->mScore < rhs.mScore;
+ }
+ }
+
+ bool operator> (const ClientPriority& rhs) const {
+ return rhs < *this;
+ }
+
+ bool operator<=(const ClientPriority& rhs) const {
+ return !(*this > rhs);
+ }
+
+ bool operator>=(const ClientPriority& rhs) const {
+ return !(*this < rhs);
+ }
+
+private:
+ int32_t mScore;
+ int32_t mState;
+};
+
// --------------------------------------------------------------------------------
/**
@@ -45,9 +82,9 @@
class ClientDescriptor final {
public:
ClientDescriptor(const KEY& key, const VALUE& value, int32_t cost,
- const std::set<KEY>& conflictingKeys, int32_t priority, int32_t ownerId);
+ const std::set<KEY>& conflictingKeys, int32_t score, int32_t ownerId, int32_t state);
ClientDescriptor(KEY&& key, VALUE&& value, int32_t cost, std::set<KEY>&& conflictingKeys,
- int32_t priority, int32_t ownerId);
+ int32_t score, int32_t ownerId, int32_t state);
~ClientDescriptor();
@@ -69,7 +106,7 @@
/**
* Return the priority for this descriptor.
*/
- int32_t getPriority() const;
+ const ClientPriority &getPriority() const;
/**
* Return the owner ID for this descriptor.
@@ -89,7 +126,7 @@
/**
* Set the proirity for this descriptor.
*/
- void setPriority(int32_t priority);
+ void setPriority(const ClientPriority& priority);
// This class is ordered by key
template<class K, class V>
@@ -100,7 +137,7 @@
VALUE mValue;
int32_t mCost;
std::set<KEY> mConflicting;
- int32_t mPriority;
+ ClientPriority mPriority;
int32_t mOwnerId;
}; // class ClientDescriptor
@@ -111,16 +148,17 @@
template<class KEY, class VALUE>
ClientDescriptor<KEY, VALUE>::ClientDescriptor(const KEY& key, const VALUE& value, int32_t cost,
- const std::set<KEY>& conflictingKeys, int32_t priority, int32_t ownerId) : mKey{key},
- mValue{value}, mCost{cost}, mConflicting{conflictingKeys}, mPriority{priority},
+ const std::set<KEY>& conflictingKeys, int32_t score, int32_t ownerId, int32_t state) :
+ mKey{key}, mValue{value}, mCost{cost}, mConflicting{conflictingKeys},
+ mPriority(score, state),
mOwnerId{ownerId} {}
template<class KEY, class VALUE>
ClientDescriptor<KEY, VALUE>::ClientDescriptor(KEY&& key, VALUE&& value, int32_t cost,
- std::set<KEY>&& conflictingKeys, int32_t priority, int32_t ownerId) :
+ std::set<KEY>&& conflictingKeys, int32_t score, int32_t ownerId, int32_t state) :
mKey{std::forward<KEY>(key)}, mValue{std::forward<VALUE>(value)}, mCost{cost},
- mConflicting{std::forward<std::set<KEY>>(conflictingKeys)}, mPriority{priority},
- mOwnerId{ownerId} {}
+ mConflicting{std::forward<std::set<KEY>>(conflictingKeys)},
+ mPriority(score, state), mOwnerId{ownerId} {}
template<class KEY, class VALUE>
ClientDescriptor<KEY, VALUE>::~ClientDescriptor() {}
@@ -141,7 +179,7 @@
}
template<class KEY, class VALUE>
-int32_t ClientDescriptor<KEY, VALUE>::getPriority() const {
+const ClientPriority& ClientDescriptor<KEY, VALUE>::getPriority() const {
return mPriority;
}
@@ -165,7 +203,7 @@
}
template<class KEY, class VALUE>
-void ClientDescriptor<KEY, VALUE>::setPriority(int32_t priority) {
+void ClientDescriptor<KEY, VALUE>::setPriority(const ClientPriority& priority) {
mPriority = priority;
}
@@ -231,7 +269,7 @@
* Given a map containing owner (pid) -> priority mappings, update the priority of each
* ClientDescriptor with an owner in this mapping.
*/
- void updatePriorities(const std::map<int32_t,int32_t>& ownerPriorityList);
+ void updatePriorities(const std::map<int32_t,ClientPriority>& ownerPriorityList);
/**
* Remove all ClientDescriptors.
@@ -383,17 +421,17 @@
const KEY& key = client->getKey();
int32_t cost = client->getCost();
- int32_t priority = client->getPriority();
+ ClientPriority priority = client->getPriority();
int32_t owner = client->getOwnerId();
int64_t totalCost = getCurrentCostLocked() + cost;
// Determine the MRU of the owners tied for having the highest priority
int32_t highestPriorityOwner = owner;
- int32_t highestPriority = priority;
+ ClientPriority highestPriority = priority;
for (const auto& i : mClients) {
- int32_t curPriority = i->getPriority();
- if (curPriority >= highestPriority) {
+ ClientPriority curPriority = i->getPriority();
+ if (curPriority <= highestPriority) {
highestPriority = curPriority;
highestPriorityOwner = i->getOwnerId();
}
@@ -408,7 +446,7 @@
for (const auto& i : mClients) {
const KEY& curKey = i->getKey();
int32_t curCost = i->getCost();
- int32_t curPriority = i->getPriority();
+ ClientPriority curPriority = i->getPriority();
int32_t curOwner = i->getOwnerId();
bool conflicting = (curKey == key || i->isConflicting(key) ||
@@ -417,13 +455,13 @@
if (!returnIncompatibleClients) {
// Find evicted clients
- if (conflicting && curPriority > priority) {
+ if (conflicting && curPriority < priority) {
// Pre-existing conflicting client with higher priority exists
evictList.clear();
evictList.push_back(client);
return evictList;
} else if (conflicting || ((totalCost > mMaxCost && curCost > 0) &&
- (curPriority <= priority) &&
+ (curPriority >= priority) &&
!(highestPriorityOwner == owner && owner == curOwner))) {
// Add a pre-existing client to the eviction list if:
// - We are adding a client with higher priority that conflicts with this one.
@@ -437,7 +475,7 @@
} else {
// Find clients preventing the incoming client from being added
- if (curPriority > priority && (conflicting || (totalCost > mMaxCost && curCost > 0))) {
+ if (curPriority < priority && (conflicting || (totalCost > mMaxCost && curCost > 0))) {
// Pre-existing conflicting client with higher priority exists
evictList.push_back(i);
}
@@ -524,7 +562,7 @@
template<class KEY, class VALUE, class LISTENER>
void ClientManager<KEY, VALUE, LISTENER>::updatePriorities(
- const std::map<int32_t,int32_t>& ownerPriorityList) {
+ const std::map<int32_t,ClientPriority>& ownerPriorityList) {
Mutex::Autolock lock(mLock);
for (auto& i : mClients) {
auto j = ownerPriorityList.find(i->getOwnerId());
diff --git a/services/camera/libcameraservice/utils/LatencyHistogram.cpp b/services/camera/libcameraservice/utils/LatencyHistogram.cpp
new file mode 100644
index 0000000..538bb6e
--- /dev/null
+++ b/services/camera/libcameraservice/utils/LatencyHistogram.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraLatencyHistogram"
+#include <inttypes.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include "LatencyHistogram.h"
+
+namespace android {
+
+CameraLatencyHistogram::CameraLatencyHistogram(int32_t binSizeMs, int32_t binCount) :
+ mBinSizeMs(binSizeMs),
+ mBinCount(binCount),
+ mBins(binCount),
+ mTotalCount(0) {
+}
+
+void CameraLatencyHistogram::add(nsecs_t start, nsecs_t end) {
+ nsecs_t duration = end - start;
+ int32_t durationMs = static_cast<int32_t>(duration / 1000000LL);
+ int32_t binIndex = durationMs / mBinSizeMs;
+
+ if (binIndex < 0) {
+ binIndex = 0;
+ } else if (binIndex >= mBinCount) {
+ binIndex = mBinCount-1;
+ }
+
+ mBins[binIndex]++;
+ mTotalCount++;
+}
+
+void CameraLatencyHistogram::reset() {
+ mBins.clear();
+ mTotalCount = 0;
+}
+
+void CameraLatencyHistogram::dump(int fd, const char* name) const {
+ if (mTotalCount == 0) {
+ return;
+ }
+
+ String8 lines;
+ lines.appendFormat("%s (%" PRId64 ") samples\n", name, mTotalCount);
+
+ String8 lineBins, lineBinCounts;
+ formatHistogramText(lineBins, lineBinCounts);
+
+ lineBins.append("\n");
+ lineBinCounts.append("\n");
+ lines.append(lineBins);
+ lines.append(lineBinCounts);
+
+ write(fd, lines.string(), lines.size());
+}
+
+void CameraLatencyHistogram::log(const char* fmt, ...) {
+ if (mTotalCount == 0) {
+ return;
+ }
+
+ va_list args;
+ va_start(args, fmt);
+ String8 histogramName = String8::formatV(fmt, args);
+ ALOGI("%s (%" PRId64 ") samples:", histogramName.string(), mTotalCount);
+ va_end(args);
+
+ String8 lineBins, lineBinCounts;
+ formatHistogramText(lineBins, lineBinCounts);
+
+ ALOGI("%s", lineBins.c_str());
+ ALOGI("%s", lineBinCounts.c_str());
+}
+
+void CameraLatencyHistogram::formatHistogramText(
+ String8& lineBins, String8& lineBinCounts) const {
+ lineBins = " ";
+ lineBinCounts = " ";
+
+ for (int32_t i = 0; i < mBinCount; i++) {
+ if (i == mBinCount - 1) {
+ lineBins.append(" inf (max ms)");
+ } else {
+ lineBins.appendFormat("%7d", mBinSizeMs*(i+1));
+ }
+ lineBinCounts.appendFormat(" %02.2f", 100.0*mBins[i]/mTotalCount);
+ }
+ lineBinCounts.append(" (%)");
+}
+
+}; //namespace android
diff --git a/services/camera/libcameraservice/utils/LatencyHistogram.h b/services/camera/libcameraservice/utils/LatencyHistogram.h
new file mode 100644
index 0000000..bfd9b1b
--- /dev/null
+++ b/services/camera/libcameraservice/utils/LatencyHistogram.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
+#define ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
+
+#include <vector>
+
+#include <utils/Timers.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+// Histogram for camera latency characteristic
+class CameraLatencyHistogram {
+public:
+ CameraLatencyHistogram() = delete;
+ CameraLatencyHistogram(int32_t binSizeMs, int32_t binCount=10);
+ void add(nsecs_t start, nsecs_t end);
+ void reset();
+
+ void dump(int fd, const char* name) const;
+ void log(const char* format, ...);
+private:
+ int32_t mBinSizeMs;
+ int32_t mBinCount;
+ std::vector<int64_t> mBins;
+ uint64_t mTotalCount;
+
+ void formatHistogramText(String8& lineBins, String8& lineBinCounts) const;
+}; // class CameraLatencyHistogram
+
+}; // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_LATENCY_HISTOGRAM_H_
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index f1b65bd..dec97d7 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -23,12 +23,14 @@
#include <inttypes.h>
#include <utils/Log.h>
#include <camera/VendorTagDescriptor.h>
+#include <camera_metadata_hidden.h>
namespace android {
TagMonitor::TagMonitor():
mMonitoringEnabled(false),
- mMonitoringEvents(kMaxMonitorEvents)
+ mMonitoringEvents(kMaxMonitorEvents),
+ mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
{}
const char* TagMonitor::k3aTags =
@@ -55,6 +57,13 @@
sp<VendorTagDescriptor> vTags =
VendorTagDescriptor::getGlobalVendorTagDescriptor();
+ if ((nullptr == vTags.get()) || (0 >= vTags->getTagCount())) {
+ sp<VendorTagDescriptorCache> cache =
+ VendorTagDescriptorCache::getGlobalVendorTagCache();
+ if (cache.get()) {
+ cache->getVendorTagDescriptor(mVendorTagId, &vTags);
+ }
+ }
bool gotTag = false;
@@ -104,6 +113,15 @@
camera_metadata_ro_entry entry = metadata.find(tag);
CameraMetadata &lastValues = (source == REQUEST) ?
mLastMonitoredRequestValues : mLastMonitoredResultValues;
+ if (lastValues.isEmpty()) {
+ lastValues = CameraMetadata(mMonitoredTagList.size());
+ const camera_metadata_t *metaBuffer =
+ lastValues.getAndLock();
+ set_camera_metadata_vendor_id(
+ const_cast<camera_metadata_t *> (metaBuffer), mVendorTagId);
+ lastValues.unlock(metaBuffer);
+ }
+
camera_metadata_entry lastEntry = lastValues.find(tag);
if (entry.count > 0) {
@@ -129,16 +147,21 @@
}
if (isDifferent) {
- ALOGV("%s: Tag %s changed", __FUNCTION__, get_camera_metadata_tag_name(tag));
+ ALOGV("%s: Tag %s changed", __FUNCTION__,
+ get_local_camera_metadata_tag_name_vendor_id(
+ tag, mVendorTagId));
lastValues.update(entry);
mMonitoringEvents.emplace(source, frameNumber, timestamp, entry);
}
} else if (lastEntry.count > 0) {
// Value has been removed
- ALOGV("%s: Tag %s removed", __FUNCTION__, get_camera_metadata_tag_name(tag));
+ ALOGV("%s: Tag %s removed", __FUNCTION__,
+ get_local_camera_metadata_tag_name_vendor_id(
+ tag, mVendorTagId));
lastValues.erase(tag);
entry.tag = tag;
- entry.type = get_camera_metadata_tag_type(tag);
+ entry.type = get_local_camera_metadata_tag_type_vendor_id(tag,
+ mVendorTagId);
entry.count = 0;
mMonitoringEvents.emplace(source, frameNumber, timestamp, entry);
}
@@ -152,8 +175,10 @@
dprintf(fd, " Tag monitoring enabled for tags:\n");
for (uint32_t tag : mMonitoredTagList) {
dprintf(fd, " %s.%s\n",
- get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag));
+ get_local_camera_metadata_section_name_vendor_id(tag,
+ mVendorTagId),
+ get_local_camera_metadata_tag_name_vendor_id(tag,
+ mVendorTagId));
}
} else {
dprintf(fd, " Tag monitoring disabled (enable with -m <name1,..,nameN>)\n");
@@ -166,8 +191,10 @@
event.frameNumber, event.timestamp,
indentation,
event.source == REQUEST ? "REQ:" : "RES:",
- get_camera_metadata_section_name(event.tag),
- get_camera_metadata_tag_name(event.tag));
+ get_local_camera_metadata_section_name_vendor_id(event.tag,
+ mVendorTagId),
+ get_local_camera_metadata_tag_name_vendor_id(event.tag,
+ mVendorTagId));
if (event.newData.size() == 0) {
dprintf(fd, " (Removed)\n");
} else {
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index d7aa419..7155314 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -27,6 +27,7 @@
#include <media/RingBuffer.h>
#include <system/camera_metadata.h>
+#include <system/camera_vendor_tags.h>
#include <camera/CameraMetadata.h>
namespace android {
@@ -44,6 +45,8 @@
TagMonitor();
+ void initialize(metadata_vendor_id_t id) { mVendorTagId = id; }
+
// Parse tag name list (comma-separated) and if valid, enable monitoring
// If invalid, do nothing.
// Recognizes "3a" as a shortcut for enabling tracking 3A state, mode, and
@@ -100,6 +103,7 @@
// 3A fields to use with the "3a" option
static const char *k3aTags;
+ metadata_vendor_id_t mVendorTagId;
};
} // namespace android
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
new file mode 100644
index 0000000..9e2813e
--- /dev/null
+++ b/services/mediaanalytics/Android.mk
@@ -0,0 +1,53 @@
+# Media Statistics service
+#
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ main_mediametrics.cpp \
+ MetricsSummarizerCodec.cpp \
+ MetricsSummarizerExtractor.cpp \
+ MetricsSummarizerPlayer.cpp \
+ MetricsSummarizerRecorder.cpp \
+ MetricsSummarizer.cpp \
+ MediaAnalyticsService.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ liblog \
+ libmedia \
+ libutils \
+ libbinder \
+ libdl \
+ libgui \
+ libmedia \
+ libmediautils \
+ libmediametrics \
+ libstagefright_foundation \
+ libutils
+
+LOCAL_STATIC_LIBRARIES := \
+ libregistermsext
+
+LOCAL_C_INCLUDES := \
+ $(TOP)/frameworks/av/media/libstagefright/include \
+ $(TOP)/frameworks/av/media/libstagefright/rtsp \
+ $(TOP)/frameworks/av/media/libstagefright/wifi-display \
+ $(TOP)/frameworks/av/media/libstagefright/webm \
+ $(TOP)/frameworks/av/include/media \
+ $(TOP)/frameworks/av/include/camera \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/native/include/media/hardware \
+ $(TOP)/external/tremolo/Tremolo \
+ libcore/include
+
+
+LOCAL_MODULE:= mediametrics
+
+LOCAL_INIT_RC := mediametrics.rc
+
+LOCAL_CFLAGS := -Werror -Wall -Wno-error=deprecated-declarations
+LOCAL_CLANG := true
+
+include $(BUILD_EXECUTABLE)
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
new file mode 100644
index 0000000..876c685
--- /dev/null
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Proxy for media player implementations
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaAnalyticsService"
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <dirent.h>
+#include <unistd.h>
+
+#include <string.h>
+
+#include <cutils/atomic.h>
+#include <cutils/properties.h> // for property_get
+
+#include <utils/misc.h>
+
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <binder/MemoryHeapBase.h>
+#include <binder/MemoryBase.h>
+#include <gui/Surface.h>
+#include <utils/Errors.h> // for status_t
+#include <utils/List.h>
+#include <utils/String8.h>
+#include <utils/SystemClock.h>
+#include <utils/Timers.h>
+#include <utils/Vector.h>
+
+#include <media/AudioPolicyHelper.h>
+#include <media/IMediaHTTPService.h>
+#include <media/IRemoteDisplay.h>
+#include <media/IRemoteDisplayClient.h>
+#include <media/MediaPlayerInterface.h>
+#include <media/mediarecorder.h>
+#include <media/MediaMetadataRetrieverInterface.h>
+#include <media/Metadata.h>
+#include <media/AudioTrack.h>
+#include <media/MemoryLeakTrackUtil.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooperRoster.h>
+#include <mediautils/BatteryNotifier.h>
+
+//#include <memunreachable/memunreachable.h>
+#include <system/audio.h>
+
+#include <private/android_filesystem_config.h>
+
+#include "MediaAnalyticsService.h"
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerCodec.h"
+#include "MetricsSummarizerExtractor.h"
+#include "MetricsSummarizerPlayer.h"
+#include "MetricsSummarizerRecorder.h"
+
+
+namespace android {
+
+
+
+// summarized records
+// up to 48 sets, each covering an hour -- at least 2 days of coverage
+// (will be longer if there are hours without any media action)
+static const nsecs_t kNewSetIntervalNs = 3600*(1000*1000*1000ll);
+static const int kMaxRecordSets = 48;
+// individual records kept in memory
+static const int kMaxRecords = 100;
+
+
+static const char *kServiceName = "media.metrics";
+
+
+//using android::status_t;
+//using android::OK;
+//using android::BAD_VALUE;
+//using android::NOT_ENOUGH_DATA;
+//using android::Parcel;
+
+
+void MediaAnalyticsService::instantiate() {
+ defaultServiceManager()->addService(
+ String16(kServiceName), new MediaAnalyticsService());
+}
+
+// handle sets of summarizers
+MediaAnalyticsService::SummarizerSet::SummarizerSet() {
+ mSummarizers = new List<MetricsSummarizer *>();
+}
+MediaAnalyticsService::SummarizerSet::~SummarizerSet() {
+ // empty the list
+ List<MetricsSummarizer *> *l = mSummarizers;
+ while (l->size() > 0) {
+ MetricsSummarizer *summarizer = *(l->begin());
+ l->erase(l->begin());
+ delete summarizer;
+ }
+}
+
+void MediaAnalyticsService::newSummarizerSet() {
+ ALOGD("MediaAnalyticsService::newSummarizerSet");
+ MediaAnalyticsService::SummarizerSet *set = new MediaAnalyticsService::SummarizerSet();
+ nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
+ set->setStarted(now);
+
+ set->appendSummarizer(new MetricsSummarizerExtractor("extractor"));
+ set->appendSummarizer(new MetricsSummarizerCodec("codec"));
+ set->appendSummarizer(new MetricsSummarizerPlayer("nuplayer"));
+ set->appendSummarizer(new MetricsSummarizerRecorder("recorder"));
+
+ // ALWAYS at the end, since it catches everything
+ set->appendSummarizer(new MetricsSummarizer(NULL));
+
+ // inject this set at the BACK of the list.
+ mSummarizerSets->push_back(set);
+ mCurrentSet = set;
+
+ // limit the # that we have
+ if (mMaxRecordSets > 0) {
+ List<SummarizerSet *> *l = mSummarizerSets;
+ while (l->size() > (size_t) mMaxRecordSets) {
+ ALOGD("Deleting oldest record set....");
+ MediaAnalyticsService::SummarizerSet *oset = *(l->begin());
+ l->erase(l->begin());
+ delete oset;
+ mSetsDiscarded++;
+ }
+ }
+}
+
+MediaAnalyticsService::MediaAnalyticsService()
+ : mMaxRecords(kMaxRecords),
+ mMaxRecordSets(kMaxRecordSets),
+ mNewSetInterval(kNewSetIntervalNs) {
+
+ ALOGD("MediaAnalyticsService created");
+ // clear our queues
+ mOpen = new List<MediaAnalyticsItem *>();
+ mFinalized = new List<MediaAnalyticsItem *>();
+
+ mSummarizerSets = new List<MediaAnalyticsService::SummarizerSet *>();
+ newSummarizerSet();
+
+ mItemsSubmitted = 0;
+ mItemsFinalized = 0;
+ mItemsDiscarded = 0;
+
+ mLastSessionID = 0;
+ // recover any persistency we set up
+ // etc
+}
+
+MediaAnalyticsService::~MediaAnalyticsService() {
+ ALOGD("MediaAnalyticsService destroyed");
+
+ // clean out mOpen and mFinalized
+ delete mOpen;
+ mOpen = NULL;
+ delete mFinalized;
+ mFinalized = NULL;
+
+ // XXX: clean out the summaries
+}
+
+
+MediaAnalyticsItem::SessionID_t MediaAnalyticsService::generateUniqueSessionID() {
+ // generate a new sessionid
+
+ Mutex::Autolock _l(mLock_ids);
+ return (++mLastSessionID);
+}
+
+// caller surrenders ownership of 'item'
+MediaAnalyticsItem::SessionID_t MediaAnalyticsService::submit(MediaAnalyticsItem *item, bool forcenew) {
+
+ MediaAnalyticsItem::SessionID_t id = MediaAnalyticsItem::SessionIDInvalid;
+
+ // we control these, generally not trusting user input
+ nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
+ item->setTimestamp(now);
+ int pid = IPCThreadState::self()->getCallingPid();
+ int uid = IPCThreadState::self()->getCallingUid();
+
+ int uid_given = item->getUid();
+ int pid_given = item->getPid();
+
+ // although we do make exceptions for particular client uids
+ // that we know we trust.
+ //
+ bool isTrusted = false;
+
+ switch (uid) {
+ case AID_MEDIA:
+ case AID_MEDIA_CODEC:
+ case AID_MEDIA_EX:
+ case AID_MEDIA_DRM:
+ // trusted source, only override default values
+ isTrusted = true;
+ if (uid_given == (-1)) {
+ item->setUid(uid);
+ }
+ if (pid_given == (-1)) {
+ item->setPid(pid);
+ }
+ break;
+ default:
+ isTrusted = false;
+ item->setPid(pid);
+ item->setUid(uid);
+ break;
+ }
+
+
+ mItemsSubmitted++;
+
+ // validate the record; we discard if we don't like it
+ if (contentValid(item, isTrusted) == false) {
+ delete item;
+ return MediaAnalyticsItem::SessionIDInvalid;
+ }
+
+
+ // if we have a sesisonid in the new record, look to make
+ // sure it doesn't appear in the finalized list.
+ // XXX: this is for security / DOS prevention.
+ // may also require that we persist the unique sessionIDs
+ // across boots [instead of within a single boot]
+
+
+ // match this new record up against records in the open
+ // list...
+ // if there's a match, merge them together
+ // deal with moving the old / merged record into the finalized que
+
+ bool finalizing = item->getFinalized();
+
+ // if finalizing, we'll remove it
+ MediaAnalyticsItem *oitem = findItem(mOpen, item, finalizing | forcenew);
+ if (oitem != NULL) {
+ if (forcenew) {
+ // old one gets finalized, then we insert the new one
+ // so we'll have 2 records at the end of this.
+ // but don't finalize an empty record
+ if (oitem->count() == 0) {
+ // we're responsible for disposing of the dead record
+ delete oitem;
+ oitem = NULL;
+ } else {
+ oitem->setFinalized(true);
+ summarize(oitem);
+ saveItem(mFinalized, oitem, 0);
+ }
+ // new record could itself be marked finalized...
+ if (finalizing) {
+ summarize(item);
+ saveItem(mFinalized, item, 0);
+ mItemsFinalized++;
+ } else {
+ saveItem(mOpen, item, 1);
+ }
+ id = item->getSessionID();
+ } else {
+ // combine the records, send it to finalized if appropriate
+ oitem->merge(item);
+ if (finalizing) {
+ summarize(oitem);
+ saveItem(mFinalized, oitem, 0);
+ mItemsFinalized++;
+ }
+ id = oitem->getSessionID();
+
+ // we're responsible for disposing of the dead record
+ delete item;
+ item = NULL;
+ }
+ } else {
+ // nothing to merge, save the new record
+ id = item->getSessionID();
+ if (finalizing) {
+ if (item->count() == 0) {
+ // drop empty records
+ delete item;
+ item = NULL;
+ } else {
+ summarize(item);
+ saveItem(mFinalized, item, 0);
+ mItemsFinalized++;
+ }
+ } else {
+ saveItem(mOpen, item, 1);
+ }
+ }
+ return id;
+}
+
+status_t MediaAnalyticsService::dump(int fd, const Vector<String16>& args)
+{
+ const size_t SIZE = 512;
+ char buffer[SIZE];
+ String8 result;
+
+ if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+ snprintf(buffer, SIZE, "Permission Denial: "
+ "can't dump MediaAnalyticsService from pid=%d, uid=%d\n",
+ IPCThreadState::self()->getCallingPid(),
+ IPCThreadState::self()->getCallingUid());
+ result.append(buffer);
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+ }
+
+ // crack any parameters
+ bool clear = false;
+ bool summary = false;
+ nsecs_t ts_since = 0;
+ String16 summaryOption("-summary");
+ String16 clearOption("-clear");
+ String16 sinceOption("-since");
+ String16 helpOption("-help");
+ String16 onlyOption("-only");
+ const char *only = NULL;
+ int n = args.size();
+ for (int i = 0; i < n; i++) {
+ String8 myarg(args[i]);
+ if (args[i] == clearOption) {
+ clear = true;
+ } else if (args[i] == summaryOption) {
+ summary = true;
+ } else if (args[i] == sinceOption) {
+ i++;
+ if (i < n) {
+ String8 value(args[i]);
+ char *endp;
+ const char *p = value.string();
+ ts_since = strtoll(p, &endp, 10);
+ if (endp == p || *endp != '\0') {
+ ts_since = 0;
+ }
+ } else {
+ ts_since = 0;
+ }
+ // command line is milliseconds; internal units are nano-seconds
+ ts_since *= 1000*1000;
+ } else if (args[i] == onlyOption) {
+ i++;
+ if (i < n) {
+ String8 value(args[i]);
+ const char *p = value.string();
+ char *q = strdup(p);
+ if (q != NULL) {
+ if (only != NULL) {
+ free((void*)only);
+ }
+ only = q;
+ }
+ }
+ } else if (args[i] == helpOption) {
+ result.append("Recognized parameters:\n");
+ result.append("-help this help message\n");
+ result.append("-summary show summary info\n");
+ result.append("-clear clears out saved records\n");
+ result.append("-only X process records for component X\n");
+ result.append("-since X include records since X\n");
+ result.append(" (X is milliseconds since the UNIX epoch)\n");
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+ }
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ // we ALWAYS dump this piece
+ snprintf(buffer, SIZE, "Dump of the %s process:\n", kServiceName);
+ result.append(buffer);
+
+ dumpHeaders(result, ts_since);
+
+ // only want 1, to avoid confusing folks that parse the output
+ if (summary) {
+ dumpSummaries(result, ts_since, only);
+ } else {
+ dumpRecent(result, ts_since, only);
+ }
+
+
+ if (clear) {
+ // remove everything from the finalized queue
+ while (mFinalized->size() > 0) {
+ MediaAnalyticsItem * oitem = *(mFinalized->begin());
+ mFinalized->erase(mFinalized->begin());
+ delete oitem;
+ mItemsDiscarded++;
+ }
+
+ // shall we clear the summary data too?
+
+ }
+
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
+
+// dump headers
+void MediaAnalyticsService::dumpHeaders(String8 &result, nsecs_t ts_since) {
+ const size_t SIZE = 512;
+ char buffer[SIZE];
+
+ int enabled = MediaAnalyticsItem::isEnabled();
+ if (enabled) {
+ snprintf(buffer, SIZE, "Metrics gathering: enabled\n");
+ } else {
+ snprintf(buffer, SIZE, "Metrics gathering: DISABLED via property\n");
+ }
+ result.append(buffer);
+
+ snprintf(buffer, SIZE,
+ "Since Boot: Submissions: %" PRId64
+ " Finalizations: %" PRId64
+ " Discarded: %" PRId64 "\n",
+ mItemsSubmitted, mItemsFinalized, mItemsDiscarded);
+ result.append(buffer);
+ snprintf(buffer, SIZE,
+ "Summary Sets Discarded: %" PRId64 "\n", mSetsDiscarded);
+ result.append(buffer);
+ if (ts_since != 0) {
+ snprintf(buffer, SIZE,
+ "Dumping Queue entries more recent than: %" PRId64 "\n",
+ (int64_t) ts_since);
+ result.append(buffer);
+ }
+}
+
+// dump summary info
+void MediaAnalyticsService::dumpSummaries(String8 &result, nsecs_t ts_since, const char *only) {
+ const size_t SIZE = 512;
+ char buffer[SIZE];
+ int slot = 0;
+
+ snprintf(buffer, SIZE, "\nSummarized Metrics:\n");
+ result.append(buffer);
+
+ // have each of the distillers dump records
+ if (mSummarizerSets != NULL) {
+ List<SummarizerSet *>::iterator itSet = mSummarizerSets->begin();
+ for (; itSet != mSummarizerSets->end(); itSet++) {
+ nsecs_t when = (*itSet)->getStarted();
+ if (when < ts_since) {
+ continue;
+ }
+ List<MetricsSummarizer *> *list = (*itSet)->getSummarizers();
+ List<MetricsSummarizer *>::iterator it = list->begin();
+ for (; it != list->end(); it++) {
+ if (only != NULL && strcmp(only, (*it)->getKey()) != 0) {
+ ALOGV("Told to omit '%s'", (*it)->getKey());
+ }
+ AString distilled = (*it)->dumpSummary(slot, only);
+ result.append(distilled.c_str());
+ }
+ }
+ }
+}
+
+// the recent, detailed queues
+void MediaAnalyticsService::dumpRecent(String8 &result, nsecs_t ts_since, const char * only) {
+ const size_t SIZE = 512;
+ char buffer[SIZE];
+
+ // show the recently recorded records
+ snprintf(buffer, sizeof(buffer), "\nFinalized Metrics (oldest first):\n");
+ result.append(buffer);
+ result.append(this->dumpQueue(mFinalized, ts_since, only));
+
+ snprintf(buffer, sizeof(buffer), "\nIn-Progress Metrics (newest first):\n");
+ result.append(buffer);
+ result.append(this->dumpQueue(mOpen, ts_since, only));
+
+ // show who is connected and injecting records?
+ // talk about # records fed to the 'readers'
+ // talk about # records we discarded, perhaps "discarded w/o reading" too
+}
+// caller has locked mLock...
+String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList) {
+ return dumpQueue(theList, (nsecs_t) 0, NULL);
+}
+
+String8 MediaAnalyticsService::dumpQueue(List<MediaAnalyticsItem *> *theList, nsecs_t ts_since, const char * only) {
+ String8 result;
+ int slot = 0;
+
+ if (theList->empty()) {
+ result.append("empty\n");
+ } else {
+ List<MediaAnalyticsItem *>::iterator it = theList->begin();
+ for (; it != theList->end(); it++) {
+ nsecs_t when = (*it)->getTimestamp();
+ if (when < ts_since) {
+ continue;
+ }
+ if (only != NULL &&
+ strcmp(only, (*it)->getKey().c_str()) != 0) {
+ ALOGV("Omit '%s', it's not '%s'", (*it)->getKey().c_str(), only);
+ continue;
+ }
+ AString entry = (*it)->toString();
+ result.appendFormat("%5d: %s\n", slot, entry.c_str());
+ slot++;
+ }
+ }
+
+ return result;
+}
+
+//
+// Our Cheap in-core, non-persistent records management.
+// XXX: rewrite this to manage persistence, etc.
+
+// insert appropriately into queue
+void MediaAnalyticsService::saveItem(List<MediaAnalyticsItem *> *l, MediaAnalyticsItem * item, int front) {
+
+ Mutex::Autolock _l(mLock);
+
+ // adding at back of queue (fifo order)
+ if (front) {
+ l->push_front(item);
+ } else {
+ l->push_back(item);
+ }
+
+ // keep removing old records the front until we're in-bounds
+ if (mMaxRecords > 0) {
+ while (l->size() > (size_t) mMaxRecords) {
+ MediaAnalyticsItem * oitem = *(l->begin());
+ l->erase(l->begin());
+ delete oitem;
+ mItemsDiscarded++;
+ }
+ }
+}
+
+// are they alike enough that nitem can be folded into oitem?
+static bool compatibleItems(MediaAnalyticsItem * oitem, MediaAnalyticsItem * nitem) {
+
+ if (0) {
+ ALOGD("Compare: o %s n %s",
+ oitem->toString().c_str(), nitem->toString().c_str());
+ }
+
+ // general safety
+ if (nitem->getUid() != oitem->getUid()) {
+ return false;
+ }
+ if (nitem->getPid() != oitem->getPid()) {
+ return false;
+ }
+
+ // key -- needs to match
+ if (nitem->getKey() == oitem->getKey()) {
+ // still in the game.
+ } else {
+ return false;
+ }
+
+ // session id -- empty field in new is allowed
+ MediaAnalyticsItem::SessionID_t osession = oitem->getSessionID();
+ MediaAnalyticsItem::SessionID_t nsession = nitem->getSessionID();
+ if (nsession != osession) {
+ // incoming '0' matches value in osession
+ if (nsession != 0) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// find the incomplete record that this will overlay
+MediaAnalyticsItem *MediaAnalyticsService::findItem(List<MediaAnalyticsItem*> *theList, MediaAnalyticsItem *nitem, bool removeit) {
+ if (nitem == NULL) {
+ return NULL;
+ }
+
+ MediaAnalyticsItem *item = NULL;
+
+ Mutex::Autolock _l(mLock);
+
+ for (List<MediaAnalyticsItem *>::iterator it = theList->begin();
+ it != theList->end(); it++) {
+ MediaAnalyticsItem *tmp = (*it);
+
+ if (!compatibleItems(tmp, nitem)) {
+ continue;
+ }
+
+ // we match! this is the one I want.
+ if (removeit) {
+ theList->erase(it);
+ }
+ item = tmp;
+ break;
+ }
+ return item;
+}
+
+
+// delete the indicated record
+void MediaAnalyticsService::deleteItem(List<MediaAnalyticsItem *> *l, MediaAnalyticsItem *item) {
+
+ Mutex::Autolock _l(mLock);
+
+ for (List<MediaAnalyticsItem *>::iterator it = l->begin();
+ it != l->end(); it++) {
+ if ((*it)->getSessionID() != item->getSessionID())
+ continue;
+ delete *it;
+ l->erase(it);
+ break;
+ }
+}
+
+static AString allowedKeys[] =
+{
+ "codec",
+ "extractor"
+};
+
+static const int nAllowedKeys = sizeof(allowedKeys) / sizeof(allowedKeys[0]);
+
+// are the contents good
+bool MediaAnalyticsService::contentValid(MediaAnalyticsItem *item, bool isTrusted) {
+
+ // untrusted uids can only send us a limited set of keys
+ if (isTrusted == false) {
+ // restrict to a specific set of keys
+ AString key = item->getKey();
+
+ size_t i;
+ for(i = 0; i < nAllowedKeys; i++) {
+ if (key == allowedKeys[i]) {
+ break;
+ }
+ }
+ if (i == nAllowedKeys) {
+ ALOGD("Ignoring (key): %s", item->toString().c_str());
+ return false;
+ }
+ }
+
+ // internal consistency
+
+ return true;
+}
+
+// are we rate limited, normally false
+bool MediaAnalyticsService::rateLimited(MediaAnalyticsItem *) {
+
+ return false;
+}
+
+// insert into the appropriate summarizer.
+// we make our own copy to save/summarize
+void MediaAnalyticsService::summarize(MediaAnalyticsItem *item) {
+
+ ALOGV("MediaAnalyticsService::summarize()");
+
+ if (item == NULL) {
+ return;
+ }
+
+ nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
+ if (mCurrentSet == NULL
+ || (mCurrentSet->getStarted() + mNewSetInterval < now)) {
+ newSummarizerSet();
+ }
+
+ if (mCurrentSet == NULL) {
+ return;
+ }
+
+ List<MetricsSummarizer *> *summarizers = mCurrentSet->getSummarizers();
+ List<MetricsSummarizer *>::iterator it = summarizers->begin();
+ for (; it != summarizers->end(); it++) {
+ if ((*it)->isMine(*item)) {
+ break;
+ }
+ }
+ if (it == summarizers->end()) {
+ ALOGD("no handler for type %s", item->getKey().c_str());
+ return; // no handler
+ }
+
+ // invoke the summarizer. summarizer will make whatever copies
+ // it wants; the caller retains ownership of item.
+
+ (*it)->handleRecord(item);
+
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
new file mode 100644
index 0000000..6685967
--- /dev/null
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_MEDIAANALYTICSSERVICE_H
+#define ANDROID_MEDIAANALYTICSSERVICE_H
+
+#include <arpa/inet.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MediaAnalyticsService : public BnMediaAnalyticsService
+{
+
+ public:
+
+ // on this side, caller surrenders ownership
+ virtual int64_t submit(MediaAnalyticsItem *item, bool forcenew);
+
+ static void instantiate();
+ virtual status_t dump(int fd, const Vector<String16>& args);
+
+ MediaAnalyticsService();
+ virtual ~MediaAnalyticsService();
+
+ private:
+ MediaAnalyticsItem::SessionID_t generateUniqueSessionID();
+
+ // statistics about our analytics
+ int64_t mItemsSubmitted;
+ int64_t mItemsFinalized;
+ int64_t mItemsDiscarded;
+ int64_t mSetsDiscarded;
+ MediaAnalyticsItem::SessionID_t mLastSessionID;
+
+ // partitioned a bit so we don't over serialize
+ mutable Mutex mLock;
+ mutable Mutex mLock_ids;
+
+ // the most we hold in memory
+ // up to this many in each queue (open, finalized)
+ int32_t mMaxRecords;
+ // # of sets of summaries
+ int32_t mMaxRecordSets;
+ // nsecs until we start a new record set
+ nsecs_t mNewSetInterval;
+
+ // input validation after arrival from client
+ bool contentValid(MediaAnalyticsItem *item, bool isTrusted);
+ bool rateLimited(MediaAnalyticsItem *);
+
+ // the ones that are still open
+ // (newest at front) since we keep looking for them
+ List<MediaAnalyticsItem *> *mOpen;
+ // the ones we've finalized
+ // (oldest at front) so it prints nicely for dumpsys
+ List<MediaAnalyticsItem *> *mFinalized;
+ // searching within these queues: queue, key
+ MediaAnalyticsItem *findItem(List<MediaAnalyticsItem *> *,
+ MediaAnalyticsItem *, bool removeit);
+
+ // summarizers
+ void summarize(MediaAnalyticsItem *item);
+ class SummarizerSet {
+ nsecs_t mStarted;
+ List<MetricsSummarizer *> *mSummarizers;
+
+ public:
+ void appendSummarizer(MetricsSummarizer *s) {
+ if (s) {
+ mSummarizers->push_back(s);
+ }
+ };
+ nsecs_t getStarted() { return mStarted;}
+ void setStarted(nsecs_t started) {mStarted = started;}
+ List<MetricsSummarizer *> *getSummarizers() { return mSummarizers;}
+
+ SummarizerSet();
+ ~SummarizerSet();
+ };
+ void newSummarizerSet();
+ List<SummarizerSet *> *mSummarizerSets;
+ SummarizerSet *mCurrentSet;
+ List<MetricsSummarizer *> *getFirstSet() {
+ List<SummarizerSet *>::iterator first = mSummarizerSets->begin();
+ if (first != mSummarizerSets->end()) {
+ return (*first)->getSummarizers();
+ }
+ return NULL;
+ }
+
+ void saveItem(MediaAnalyticsItem);
+ void saveItem(List<MediaAnalyticsItem *> *, MediaAnalyticsItem *, int);
+ void deleteItem(List<MediaAnalyticsItem *> *, MediaAnalyticsItem *);
+
+ // support for generating output
+ String8 dumpQueue(List<MediaAnalyticsItem*> *);
+ String8 dumpQueue(List<MediaAnalyticsItem*> *, nsecs_t, const char *only);
+
+ void dumpHeaders(String8 &result, nsecs_t ts_since);
+ void dumpSummaries(String8 &result, nsecs_t ts_since, const char * only);
+ void dumpRecent(String8 &result, nsecs_t ts_since, const char * only);
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_MEDIAANALYTICSSERVICE_H
diff --git a/services/mediaanalytics/MetricsSummarizer.cpp b/services/mediaanalytics/MetricsSummarizer.cpp
new file mode 100644
index 0000000..6d5787e
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizer.cpp
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizer"
+#include <utils/Log.h>
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+#define DEBUG_SORT 0
+#define DEBUG_QUEUE 0
+
+
+MetricsSummarizer::MetricsSummarizer(const char *key)
+ : mIgnorables(NULL)
+{
+ ALOGV("MetricsSummarizer::MetricsSummarizer");
+
+ if (key == NULL) {
+ mKey = key;
+ } else {
+ mKey = strdup(key);
+ }
+
+ mSummaries = new List<MediaAnalyticsItem *>();
+}
+
+MetricsSummarizer::~MetricsSummarizer()
+{
+ ALOGV("MetricsSummarizer::~MetricsSummarizer");
+ if (mKey) {
+ free((void *)mKey);
+ mKey = NULL;
+ }
+
+ // clear the list of items we have saved
+ while (mSummaries->size() > 0) {
+ MediaAnalyticsItem * oitem = *(mSummaries->begin());
+ if (DEBUG_QUEUE) {
+ ALOGD("zap old record: key %s sessionID %" PRId64 " ts %" PRId64 "",
+ oitem->getKey().c_str(), oitem->getSessionID(),
+ oitem->getTimestamp());
+ }
+ mSummaries->erase(mSummaries->begin());
+ delete oitem;
+ }
+}
+
+// so we know what summarizer we were using
+const char *MetricsSummarizer::getKey() {
+ const char *value = mKey;
+ if (value == NULL) {
+ value = "unknown";
+ }
+ return value;
+}
+
+// should the record be given to this summarizer
+bool MetricsSummarizer::isMine(MediaAnalyticsItem &item)
+{
+ if (mKey == NULL)
+ return true;
+ AString itemKey = item.getKey();
+ if (strcmp(mKey, itemKey.c_str()) != 0) {
+ return false;
+ }
+ return true;
+}
+
+AString MetricsSummarizer::dumpSummary(int &slot)
+{
+ return dumpSummary(slot, NULL);
+}
+
+AString MetricsSummarizer::dumpSummary(int &slot, const char *only)
+{
+ AString value = "";
+
+ List<MediaAnalyticsItem *>::iterator it = mSummaries->begin();
+ if (it != mSummaries->end()) {
+ char buf[16]; // enough for "#####: "
+ for (; it != mSummaries->end(); it++) {
+ if (only != NULL && strcmp(only, (*it)->getKey().c_str()) != 0) {
+ continue;
+ }
+ AString entry = (*it)->toString();
+ snprintf(buf, sizeof(buf), "%5d: ", slot);
+ value.append(buf);
+ value.append(entry.c_str());
+ value.append("\n");
+ slot++;
+ }
+ }
+ return value;
+}
+
+void MetricsSummarizer::setIgnorables(const char **ignorables) {
+ mIgnorables = ignorables;
+}
+
+const char **MetricsSummarizer::getIgnorables() {
+ return mIgnorables;
+}
+
+void MetricsSummarizer::handleRecord(MediaAnalyticsItem *item) {
+
+ ALOGV("MetricsSummarizer::handleRecord() for %s",
+ item == NULL ? "<nothing>" : item->toString().c_str());
+
+ if (item == NULL) {
+ return;
+ }
+
+ List<MediaAnalyticsItem *>::iterator it = mSummaries->begin();
+ for (; it != mSummaries->end(); it++) {
+ bool good = sameAttributes((*it), item, getIgnorables());
+ ALOGV("Match against %s says %d",
+ (*it)->toString().c_str(), good);
+ if (good)
+ break;
+ }
+ if (it == mSummaries->end()) {
+ ALOGV("save new record");
+ item = item->dup();
+ if (item == NULL) {
+ ALOGE("unable to save MediaMetrics record");
+ }
+ sortProps(item);
+ item->setInt32("aggregated",1);
+ mSummaries->push_back(item);
+ } else {
+ ALOGV("increment existing record");
+ (*it)->addInt32("aggregated",1);
+ mergeRecord(*(*it), *item);
+ }
+}
+
+void MetricsSummarizer::mergeRecord(MediaAnalyticsItem &/*have*/, MediaAnalyticsItem &/*item*/) {
+ // default is no further massaging.
+ ALOGV("MetricsSummarizer::mergeRecord() [default]");
+ return;
+}
+
+
+//
+// Comparators
+//
+
+// testing that all of 'single' is in 'summ'
+// and that the values match.
+// 'summ' may have extra fields.
+// 'ignorable' is a set of things that we don't worry about matching up
+// (usually time- or count-based values we'll sum elsewhere)
+bool MetricsSummarizer::sameAttributes(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignorable) {
+
+ if (single == NULL || summ == NULL) {
+ return false;
+ }
+ ALOGV("MetricsSummarizer::sameAttributes(): summ %s", summ->toString().c_str());
+ ALOGV("MetricsSummarizer::sameAttributes(): single %s", single->toString().c_str());
+
+ // this can be made better.
+ for(size_t i=0;i<single->mPropCount;i++) {
+ MediaAnalyticsItem::Prop *prop1 = &(single->mProps[i]);
+ const char *attrName = prop1->mName;
+ ALOGV("compare on attr '%s'", attrName);
+
+ // is it something we should ignore
+ if (ignorable != NULL) {
+ const char **ig = ignorable;
+ while (*ig) {
+ if (strcmp(*ig, attrName) == 0) {
+ break;
+ }
+ ig++;
+ }
+ if (*ig) {
+ ALOGV("we don't mind that it has attr '%s'", attrName);
+ continue;
+ }
+ }
+
+ MediaAnalyticsItem::Prop *prop2 = summ->findProp(attrName);
+ if (prop2 == NULL) {
+ ALOGV("summ doesn't have this attr");
+ return false;
+ }
+ if (prop1->mType != prop2->mType) {
+ ALOGV("mismatched attr types");
+ return false;
+ }
+ switch (prop1->mType) {
+ case MediaAnalyticsItem::kTypeInt32:
+ if (prop1->u.int32Value != prop2->u.int32Value)
+ return false;
+ break;
+ case MediaAnalyticsItem::kTypeInt64:
+ if (prop1->u.int64Value != prop2->u.int64Value)
+ return false;
+ break;
+ case MediaAnalyticsItem::kTypeDouble:
+ // XXX: watch out for floating point comparisons!
+ if (prop1->u.doubleValue != prop2->u.doubleValue)
+ return false;
+ break;
+ case MediaAnalyticsItem::kTypeCString:
+ if (strcmp(prop1->u.CStringValue, prop2->u.CStringValue) != 0)
+ return false;
+ break;
+ case MediaAnalyticsItem::kTypeRate:
+ if (prop1->u.rate.count != prop2->u.rate.count)
+ return false;
+ if (prop1->u.rate.duration != prop2->u.rate.duration)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool MetricsSummarizer::sameAttributesId(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignorable) {
+
+ // verify same user
+ if (summ->mPid != single->mPid)
+ return false;
+
+ // and finally do the more expensive validation of the attributes
+ return sameAttributes(summ, single, ignorable);
+}
+
+int MetricsSummarizer::PropSorter(const void *a, const void *b) {
+ MediaAnalyticsItem::Prop *ai = (MediaAnalyticsItem::Prop *)a;
+ MediaAnalyticsItem::Prop *bi = (MediaAnalyticsItem::Prop *)b;
+ return strcmp(ai->mName, bi->mName);
+}
+
+// we sort in the summaries so that it looks pretty in the dumpsys
+void MetricsSummarizer::sortProps(MediaAnalyticsItem *item) {
+ if (item->mPropCount != 0) {
+ if (DEBUG_SORT) {
+ ALOGD("sortProps(pre): %s", item->toString().c_str());
+ }
+ qsort(item->mProps, item->mPropCount,
+ sizeof(MediaAnalyticsItem::Prop), MetricsSummarizer::PropSorter);
+ if (DEBUG_SORT) {
+ ALOGD("sortProps(pst): %s", item->toString().c_str());
+ }
+ }
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizer.h b/services/mediaanalytics/MetricsSummarizer.h
new file mode 100644
index 0000000..0b64eac
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizer.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZER_H
+#define ANDROID_METRICSSUMMARIZER_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+
+namespace android {
+
+class MetricsSummarizer
+{
+
+ public:
+
+ MetricsSummarizer(const char *key);
+ virtual ~MetricsSummarizer();
+
+ // show the key
+ const char * getKey();
+
+ // should the record be given to this summarizer
+ bool isMine(MediaAnalyticsItem &item);
+
+ // hand the record to this summarizer
+ void handleRecord(MediaAnalyticsItem *item);
+
+ virtual void mergeRecord(MediaAnalyticsItem &have, MediaAnalyticsItem &incoming);
+
+ // dump the summarized records (for dumpsys)
+ AString dumpSummary(int &slot);
+ AString dumpSummary(int &slot, const char *only);
+
+ void setIgnorables(const char **);
+ const char **getIgnorables();
+
+ protected:
+
+ // various comparators
+ // "do these records have same attributes and values in those attrs"
+ // ditto, but watch for "error" fields
+ bool sameAttributes(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignoreables);
+ // attributes + from the same app/userid
+ bool sameAttributesId(MediaAnalyticsItem *summ, MediaAnalyticsItem *single, const char **ignoreables);
+
+ static int PropSorter(const void *a, const void *b);
+ void sortProps(MediaAnalyticsItem *item);
+
+ private:
+ const char *mKey;
+ const char **mIgnorables;
+ List<MediaAnalyticsItem *> *mSummaries;
+
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZER_H
diff --git a/services/mediaanalytics/MetricsSummarizerCodec.cpp b/services/mediaanalytics/MetricsSummarizerCodec.cpp
new file mode 100644
index 0000000..8c74782
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerCodec.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizerCodec"
+#include <utils/Log.h>
+
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerCodec.h"
+
+
+
+
+namespace android {
+
+MetricsSummarizerCodec::MetricsSummarizerCodec(const char *key)
+ : MetricsSummarizer(key)
+{
+ ALOGV("MetricsSummarizerCodec::MetricsSummarizerCodec");
+}
+
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerCodec.h b/services/mediaanalytics/MetricsSummarizerCodec.h
new file mode 100644
index 0000000..c01196f
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerCodec.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZERCODEC_H
+#define ANDROID_METRICSSUMMARIZERCODEC_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MetricsSummarizerCodec : public MetricsSummarizer
+{
+
+ public:
+
+ MetricsSummarizerCodec(const char *key);
+ virtual ~MetricsSummarizerCodec() {};
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZERCODEC_H
diff --git a/services/mediaanalytics/MetricsSummarizerExtractor.cpp b/services/mediaanalytics/MetricsSummarizerExtractor.cpp
new file mode 100644
index 0000000..190f87d
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerExtractor.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizerExtractor"
+#include <utils/Log.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerExtractor.h"
+
+
+
+
+namespace android {
+
+MetricsSummarizerExtractor::MetricsSummarizerExtractor(const char *key)
+ : MetricsSummarizer(key)
+{
+ ALOGV("MetricsSummarizerExtractor::MetricsSummarizerExtractor");
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerExtractor.h b/services/mediaanalytics/MetricsSummarizerExtractor.h
new file mode 100644
index 0000000..eee052b
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerExtractor.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZEREXTRACTOR_H
+#define ANDROID_METRICSSUMMARIZEREXTRACTOR_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MetricsSummarizerExtractor : public MetricsSummarizer
+{
+
+ public:
+
+ MetricsSummarizerExtractor(const char *key);
+ virtual ~MetricsSummarizerExtractor() {};
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZEREXTRACTOR_H
diff --git a/services/mediaanalytics/MetricsSummarizerPlayer.cpp b/services/mediaanalytics/MetricsSummarizerPlayer.cpp
new file mode 100644
index 0000000..5162059
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerPlayer.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizerPlayer"
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerPlayer.h"
+
+
+
+
+namespace android {
+
+static const char *player_ignorable[] = {
+ "android.media.mediaplayer.durationMs",
+ "android.media.mediaplayer.playingMs",
+ "android.media.mediaplayer.frames",
+ "android.media.mediaplayer.dropped",
+ 0
+};
+
+MetricsSummarizerPlayer::MetricsSummarizerPlayer(const char *key)
+ : MetricsSummarizer(key)
+{
+ ALOGV("MetricsSummarizerPlayer::MetricsSummarizerPlayer");
+ setIgnorables(player_ignorable);
+}
+
+void MetricsSummarizerPlayer::mergeRecord(MediaAnalyticsItem &summation, MediaAnalyticsItem &item) {
+
+ ALOGV("MetricsSummarizerPlayer::mergeRecord()");
+
+ //
+ // we sum time & frames.
+ // be careful about our special "-1" values that indicate 'unknown'
+ // treat those as 0 [basically, not summing them into the totals].
+ int64_t duration = 0;
+ if (item.getInt64("android.media.mediaplayer.durationMs", &duration)) {
+ ALOGV("found durationMs of %" PRId64, duration);
+ summation.addInt64("android.media.mediaplayer.durationMs",duration);
+ }
+ int64_t playing = 0;
+ if (item.getInt64("android.media.mediaplayer.playingMs", &playing))
+ ALOGV("found playingMs of %" PRId64, playing);
+ if (playing >= 0) {
+ summation.addInt64("android.media.mediaplayer.playingMs",playing);
+ }
+ int64_t frames = 0;
+ if (item.getInt64("android.media.mediaplayer.frames", &frames))
+ ALOGV("found framess of %" PRId64, frames);
+ if (frames >= 0) {
+ summation.addInt64("android.media.mediaplayer.frames",frames);
+ }
+ int64_t dropped = 0;
+ if (item.getInt64("android.media.mediaplayer.dropped", &dropped))
+ ALOGV("found dropped of %" PRId64, dropped);
+ if (dropped >= 0) {
+ summation.addInt64("android.media.mediaplayer.dropped",dropped);
+ }
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerPlayer.h b/services/mediaanalytics/MetricsSummarizerPlayer.h
new file mode 100644
index 0000000..ad1bf74
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerPlayer.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZERPLAYER_H
+#define ANDROID_METRICSSUMMARIZERPLAYER_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MetricsSummarizerPlayer : public MetricsSummarizer
+{
+
+ public:
+
+ MetricsSummarizerPlayer(const char *key);
+ virtual ~MetricsSummarizerPlayer() {};
+
+ virtual void mergeRecord(MediaAnalyticsItem &have, MediaAnalyticsItem &incoming);
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZERPLAYER_H
diff --git a/services/mediaanalytics/MetricsSummarizerRecorder.cpp b/services/mediaanalytics/MetricsSummarizerRecorder.cpp
new file mode 100644
index 0000000..c2919c3
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerRecorder.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MetricsSummarizerRecorder"
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+
+#include "MetricsSummarizer.h"
+#include "MetricsSummarizerRecorder.h"
+
+
+
+
+namespace android {
+
+MetricsSummarizerRecorder::MetricsSummarizerRecorder(const char *key)
+ : MetricsSummarizer(key)
+{
+ ALOGV("MetricsSummarizerRecorder::MetricsSummarizerRecorder");
+}
+
+} // namespace android
diff --git a/services/mediaanalytics/MetricsSummarizerRecorder.h b/services/mediaanalytics/MetricsSummarizerRecorder.h
new file mode 100644
index 0000000..963baab
--- /dev/null
+++ b/services/mediaanalytics/MetricsSummarizerRecorder.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_METRICSSUMMARIZERRECORDER_H
+#define ANDROID_METRICSSUMMARIZERRECORDER_H
+
+#include <utils/threads.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/List.h>
+
+#include <media/IMediaAnalyticsService.h>
+#include "MetricsSummarizer.h"
+
+
+namespace android {
+
+class MetricsSummarizerRecorder : public MetricsSummarizer
+{
+
+ public:
+
+ MetricsSummarizerRecorder(const char *key);
+ virtual ~MetricsSummarizerRecorder() {};
+
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_METRICSSUMMARIZERRECORDER_H
diff --git a/services/mediaanalytics/main_mediametrics.cpp b/services/mediaanalytics/main_mediametrics.cpp
new file mode 100644
index 0000000..8020a03
--- /dev/null
+++ b/services/mediaanalytics/main_mediametrics.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "mediametrics"
+//#define LOG_NDEBUG 0
+
+#include <binder/IPCThreadState.h>
+#include <binder/ProcessState.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+//#include "RegisterExtensions.h"
+
+// from LOCAL_C_INCLUDES
+#include "MediaAnalyticsService.h"
+
+using namespace android;
+
+int main(int argc __unused, char **argv __unused)
+{
+ signal(SIGPIPE, SIG_IGN);
+
+ // to match the service name
+ // we're replacing "/system/bin/mediametrics" with "media.metrics"
+ // we add a ".", but discard the path components: we finish with a shorter string
+ strcpy(argv[0], "media.metrics");
+
+ sp<ProcessState> proc(ProcessState::self());
+ sp<IServiceManager> sm(defaultServiceManager());
+ ALOGI("ServiceManager: %p", sm.get());
+
+ MediaAnalyticsService::instantiate();
+
+ ProcessState::self()->startThreadPool();
+ IPCThreadState::self()->joinThreadPool();
+}
diff --git a/services/mediaanalytics/mediametrics.rc b/services/mediaanalytics/mediametrics.rc
new file mode 100644
index 0000000..3829f8c
--- /dev/null
+++ b/services/mediaanalytics/mediametrics.rc
@@ -0,0 +1,5 @@
+service mediametrics /system/bin/mediametrics
+ class main
+ user media
+ ioprio rt 4
+ writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index de0bdfd..d3df52c 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -3,7 +3,13 @@
# service library
include $(CLEAR_VARS)
LOCAL_SRC_FILES := MediaCodecService.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libbinder libutils liblog libstagefright_omx
+LOCAL_SHARED_LIBRARIES := \
+ libmedia \
+ libbinder \
+ libgui \
+ libutils \
+ liblog \
+ libstagefright_omx
LOCAL_C_INCLUDES := \
frameworks/av/media/libstagefright \
frameworks/native/include/media/openmax
@@ -14,17 +20,48 @@
# service executable
include $(CLEAR_VARS)
-LOCAL_REQUIRED_MODULES_arm := mediacodec-seccomp.policy
-LOCAL_SRC_FILES := main_codecservice.cpp minijail/minijail.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libmediacodecservice libbinder libutils \
- liblog libminijail
+LOCAL_REQUIRED_MODULES_arm := mediacodec.policy
+LOCAL_SRC_FILES := main_codecservice.cpp
+LOCAL_SHARED_LIBRARIES := \
+ libmedia \
+ libmediacodecservice \
+ libbinder \
+ libutils \
+ libgui \
+ liblog \
+ libbase \
+ libavservices_minijail \
+ libcutils \
+ libhwbinder \
+ libhidltransport \
+ libstagefright_omx \
+ android.hardware.media.omx@1.0 \
+ android.hidl.memory@1.0
LOCAL_C_INCLUDES := \
frameworks/av/media/libstagefright \
+ frameworks/av/media/libstagefright/include \
frameworks/native/include/media/openmax
-LOCAL_MODULE:= mediacodec
+LOCAL_MODULE := android.hardware.media.omx@1.0-service
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_PROPRIETARY_MODULE := true
LOCAL_32_BIT_ONLY := true
-LOCAL_INIT_RC := mediacodec.rc
+LOCAL_INIT_RC := android.hardware.media.omx@1.0-service.rc
include $(BUILD_EXECUTABLE)
-include $(call all-makefiles-under, $(LOCAL_PATH))
+# service seccomp policy
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64))
+include $(CLEAR_VARS)
+LOCAL_MODULE := mediacodec.policy
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+# mediacodec runs in 32-bit combatibility mode. For 64 bit architectures,
+# use the 32 bit policy
+ifdef TARGET_2ND_ARCH
+ LOCAL_SRC_FILES := seccomp_policy/mediacodec-$(TARGET_2ND_ARCH).policy
+else
+ LOCAL_SRC_FILES := seccomp_policy/mediacodec-$(TARGET_ARCH).policy
+endif
+include $(BUILD_PREBUILT)
+endif
+include $(call all-makefiles-under, $(LOCAL_PATH))
diff --git a/services/mediacodec/android.hardware.media.omx@1.0-service.rc b/services/mediacodec/android.hardware.media.omx@1.0-service.rc
new file mode 100644
index 0000000..ec51d65
--- /dev/null
+++ b/services/mediacodec/android.hardware.media.omx@1.0-service.rc
@@ -0,0 +1,6 @@
+service mediacodec /vendor/bin/hw/android.hardware.media.omx@1.0-service
+ class main
+ user mediacodec
+ group camera drmrpc mediadrm
+ ioprio rt 4
+ writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index a2868c1..c59944a 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -15,33 +15,71 @@
** limitations under the License.
*/
-#define LOG_TAG "mediacodec"
-//#define LOG_NDEBUG 0
-
#include <fcntl.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <binder/IPCThreadState.h>
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
-#include <utils/Log.h>
+#include <cutils/properties.h>
+
+#include <string>
+
+#include <android-base/logging.h>
// from LOCAL_C_INCLUDES
#include "MediaCodecService.h"
-#include "minijail/minijail.h"
+#include "minijail.h"
+
+#include <hidl/HidlTransportSupport.h>
+#include <omx/1.0/Omx.h>
+#include <omx/1.0/OmxStore.h>
using namespace android;
+// Must match location in Android.mk.
+static const char kSystemSeccompPolicyPath[] =
+ "/system/etc/seccomp_policy/mediacodec.policy";
+static const char kVendorSeccompPolicyPath[] =
+ "/vendor/etc/seccomp_policy/mediacodec.policy";
+
int main(int argc __unused, char** argv)
{
- ALOGI("@@@ mediacodecservice starting");
+ LOG(INFO) << "mediacodecservice starting";
+ bool treble = property_get_bool("persist.media.treble_omx", true);
+ if (treble) {
+ android::ProcessState::initWithDriver("/dev/vndbinder");
+ }
+
signal(SIGPIPE, SIG_IGN);
- MiniJail();
+ SetUpMinijail(kSystemSeccompPolicyPath, kVendorSeccompPolicyPath);
strcpy(argv[0], "media.codec");
+
+ ::android::hardware::configureRpcThreadpool(64, false);
sp<ProcessState> proc(ProcessState::self());
- sp<IServiceManager> sm = defaultServiceManager();
- MediaCodecService::instantiate();
+
+ if (treble) {
+ using namespace ::android::hardware::media::omx::V1_0;
+ sp<IOmxStore> omxStore = new implementation::OmxStore();
+ if (omxStore == nullptr) {
+ LOG(ERROR) << "Cannot create IOmxStore HAL service.";
+ } else if (omxStore->registerAsService() != OK) {
+ LOG(ERROR) << "Cannot register IOmxStore HAL service.";
+ }
+ sp<IOmx> omx = new implementation::Omx();
+ if (omx == nullptr) {
+ LOG(ERROR) << "Cannot create IOmx HAL service.";
+ } else if (omx->registerAsService() != OK) {
+ LOG(ERROR) << "Cannot register IOmx HAL service.";
+ } else {
+ LOG(INFO) << "Treble OMX service created.";
+ }
+ } else {
+ MediaCodecService::instantiate();
+ LOG(INFO) << "Non-Treble OMX service created.";
+ }
+
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
diff --git a/services/mediacodec/mediacodec.rc b/services/mediacodec/mediacodec.rc
deleted file mode 100644
index d78e0a4..0000000
--- a/services/mediacodec/mediacodec.rc
+++ /dev/null
@@ -1,6 +0,0 @@
-service mediacodec /system/bin/mediacodec
- class main
- user mediacodec
- group camera drmrpc mediadrm
- ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediacodec/minijail/Android.mk b/services/mediacodec/minijail/Android.mk
deleted file mode 100644
index de05bc3..0000000
--- a/services/mediacodec/minijail/Android.mk
+++ /dev/null
@@ -1,27 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64))
-include $(CLEAR_VARS)
-LOCAL_MODULE := mediacodec-seccomp.policy
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
-
-# mediacodec runs in 32-bit combatibility mode. For 64 bit architectures,
-# use the 32 bit policy
-ifdef TARGET_2ND_ARCH
- LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediacodec-seccomp-$(TARGET_2ND_ARCH).policy
-else
- LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediacodec-seccomp-$(TARGET_ARCH).policy
-endif
-
-# allow device specific additions to the syscall whitelist
-LOCAL_SRC_FILES += $(wildcard $(foreach dir, $(BOARD_SECCOMP_POLICY), \
- $(dir)/mediacodec-seccomp.policy))
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_SRC_FILES)
- @mkdir -p $(dir $@)
- $(hide) cat > $@ $^
-
-endif
diff --git a/services/mediacodec/minijail/minijail.cpp b/services/mediacodec/minijail/minijail.cpp
deleted file mode 100644
index 463f161..0000000
--- a/services/mediacodec/minijail/minijail.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-**
-** Copyright 2016, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "minijail"
-
-#include <unistd.h>
-
-#include <log/log.h>
-
-#include <libminijail.h>
-
-#include "minijail.h"
-
-namespace android {
-
-/* Must match location in Android.mk */
-static const char kSeccompFilePath[] = "/system/etc/seccomp_policy/mediacodec-seccomp.policy";
-
-int MiniJail()
-{
- /* no seccomp policy for this architecture */
- if (access(kSeccompFilePath, R_OK) == -1) {
- ALOGW("No seccomp filter defined for this architecture.");
- return 0;
- }
-
- struct minijail *jail = minijail_new();
- if (jail == NULL) {
- ALOGW("Failed to create minijail.");
- return -1;
- }
-
- minijail_no_new_privs(jail);
- minijail_log_seccomp_filter_failures(jail);
- minijail_use_seccomp_filter(jail);
- minijail_parse_seccomp_filters(jail, kSeccompFilePath);
- minijail_enter(jail);
- minijail_destroy(jail);
- return 0;
-}
-}
diff --git a/services/mediacodec/minijail/minijail.h b/services/mediacodec/minijail/minijail.h
deleted file mode 100644
index ae01470..0000000
--- a/services/mediacodec/minijail/minijail.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-**
-** Copyright 2016, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-namespace android {
-int MiniJail();
-}
diff --git a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
deleted file mode 100644
index 1a0e76d..0000000
--- a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
+++ /dev/null
@@ -1,52 +0,0 @@
-# Organized by frequency of systemcall - in descending order for
-# best performance.
-futex: 1
-ioctl: 1
-write: 1
-prctl: 1
-clock_gettime: 1
-getpriority: 1
-read: 1
-close: 1
-writev: 1
-dup: 1
-ppoll: 1
-mmap2: 1
-munmap: 1
-mprotect: 1
-madvise: 1
-openat: 1
-sigaltstack: 1
-clone: 1
-setpriority: 1
-getuid32: 1
-fstat64: 1
-fstatfs64: 1
-pread64: 1
-faccessat: 1
-readlinkat: 1
-exit: 1
-rt_sigprocmask: 1
-set_tid_address: 1
-restart_syscall: 1
-exit_group: 1
-rt_sigreturn: 1
-pipe2: 1
-gettimeofday: 1
-sched_yield: 1
-nanosleep: 1
-lseek: 1
-sched_get_priority_max: 1
-sched_get_priority_min: 1
-statfs64: 1
-sched_setscheduler: 1
-fstatat64: 1
-ugetrlimit: 1
-
-# for attaching to debuggerd on process crash
-sigaction: 1
-tgkill: 1
-socket: 1
-connect: 1
-fcntl64: 1
-rt_tgsigqueueinfo: 1
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
new file mode 100644
index 0000000..73857f8
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -0,0 +1,70 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+futex: 1
+ioctl: 1
+write: 1
+prctl: 1
+clock_gettime: 1
+getpriority: 1
+read: 1
+close: 1
+writev: 1
+dup: 1
+ppoll: 1
+mmap2: 1
+
+# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
+# parser support for '<' is in this needs to be modified to also prevent
+# |old_address| and |new_address| from touching the exception vector page, which
+# on ARM is statically loaded at 0xffff 0000. See
+# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
+# for more details.
+mremap: arg3 == 3
+munmap: 1
+mprotect: 1
+madvise: 1
+openat: 1
+sigaltstack: 1
+clone: 1
+setpriority: 1
+getuid32: 1
+fstat64: 1
+fstatfs64: 1
+pread64: 1
+faccessat: 1
+readlinkat: 1
+exit: 1
+rt_sigprocmask: 1
+set_tid_address: 1
+restart_syscall: 1
+exit_group: 1
+rt_sigreturn: 1
+pipe2: 1
+gettimeofday: 1
+sched_yield: 1
+nanosleep: 1
+lseek: 1
+_llseek: 1
+sched_get_priority_max: 1
+sched_get_priority_min: 1
+statfs64: 1
+sched_setscheduler: 1
+fstatat64: 1
+ugetrlimit: 1
+getdents64: 1
+
+# for attaching to debuggerd on process crash
+sigaction: 1
+tgkill: 1
+socket: 1
+connect: 1
+fcntl64: 1
+rt_tgsigqueueinfo: 1
+geteuid32: 1
+getgid32: 1
+getegid32: 1
+getgroups32: 1
+recvmsg: 1
+getpid: 1
+gettid: 1
+process_vm_readv: 1
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index 38aa472..6b30db6 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -16,22 +16,32 @@
include $(CLEAR_VARS)
-
LOCAL_SRC_FILES:= \
+ MediaCasService.cpp \
MediaDrmService.cpp \
main_mediadrmserver.cpp
LOCAL_SHARED_LIBRARIES:= \
libbinder \
- libcutils \
liblog \
libmediadrm \
libutils \
+ libhidlbase \
+ libhidlmemory \
+ libhidltransport \
+ android.hardware.drm@1.0
LOCAL_CFLAGS += -Wall -Wextra -Werror
LOCAL_MODULE:= mediadrmserver
+
+# TODO: Some legacy DRM plugins only support 32-bit. They need to be migrated to
+# 64-bit. (b/18948909) Once all of a device's legacy DRM plugins support 64-bit,
+# that device can turn on TARGET_ENABLE_MEDIADRM_64 to build this service as
+# 64-bit.
+ifneq ($(TARGET_ENABLE_MEDIADRM_64), true)
LOCAL_32_BIT_ONLY := true
+endif
LOCAL_INIT_RC := mediadrmserver.rc
diff --git a/services/mediadrm/FactoryLoader.h b/services/mediadrm/FactoryLoader.h
new file mode 100644
index 0000000..d7f1118
--- /dev/null
+++ b/services/mediadrm/FactoryLoader.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CAS_LOADER_H_
+#define MEDIA_CAS_LOADER_H_
+
+#include <dirent.h>
+#include <dlfcn.h>
+#include <media/SharedLibrary.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
+
+namespace android {
+using namespace std;
+using namespace media;
+using namespace MediaCas;
+
+template <class T>
+class FactoryLoader {
+public:
+ FactoryLoader(const char *name) :
+ mFactory(NULL), mCreateFactoryFuncName(name) {}
+
+ virtual ~FactoryLoader() { closeFactory(); }
+
+ bool findFactoryForScheme(
+ int32_t CA_system_id,
+ sp<SharedLibrary> *library = NULL,
+ T** factory = NULL);
+
+ bool enumeratePlugins(vector<ParcelableCasPluginDescriptor>* results);
+
+private:
+ typedef T*(*CreateFactoryFunc)();
+
+ Mutex mMapLock;
+ T* mFactory;
+ const char *mCreateFactoryFuncName;
+ sp<SharedLibrary> mLibrary;
+ KeyedVector<int32_t, String8> mCASystemIdToLibraryPathMap;
+ KeyedVector<String8, wp<SharedLibrary> > mLibraryPathToOpenLibraryMap;
+
+ bool loadFactoryForSchemeFromPath(
+ const String8 &path,
+ int32_t CA_system_id,
+ sp<SharedLibrary> *library,
+ T** factory);
+
+ bool queryPluginsFromPath(
+ const String8 &path,
+ vector<ParcelableCasPluginDescriptor>* results);
+
+ bool openFactory(const String8 &path);
+ void closeFactory();
+};
+
+template <class T>
+bool FactoryLoader<T>::findFactoryForScheme(
+ int32_t CA_system_id, sp<SharedLibrary> *library, T** factory) {
+ if (library != NULL) {
+ library->clear();
+ }
+ if (factory != NULL) {
+ *factory = NULL;
+ }
+
+ Mutex::Autolock autoLock(mMapLock);
+
+ // first check cache
+ ssize_t index = mCASystemIdToLibraryPathMap.indexOfKey(CA_system_id);
+ if (index >= 0) {
+ return loadFactoryForSchemeFromPath(
+ mCASystemIdToLibraryPathMap[index],
+ CA_system_id, library, factory);
+ }
+
+ // no luck, have to search
+ String8 dirPath("/system/lib/mediacas");
+ DIR* pDir = opendir(dirPath.string());
+
+ if (pDir == NULL) {
+ ALOGE("Failed to open plugin directory %s", dirPath.string());
+ return false;
+ }
+
+ struct dirent* pEntry;
+ while ((pEntry = readdir(pDir))) {
+ String8 pluginPath = dirPath + "/" + pEntry->d_name;
+ if (pluginPath.getPathExtension() == ".so") {
+ if (loadFactoryForSchemeFromPath(
+ pluginPath, CA_system_id, library, factory)) {
+ mCASystemIdToLibraryPathMap.add(CA_system_id, pluginPath);
+ closedir(pDir);
+
+ return true;
+ }
+ }
+ }
+
+ closedir(pDir);
+
+ ALOGE("Failed to find plugin");
+ return false;
+}
+
+template <class T>
+bool FactoryLoader<T>::enumeratePlugins(
+ vector<ParcelableCasPluginDescriptor>* results) {
+ ALOGI("enumeratePlugins");
+
+ results->clear();
+
+ String8 dirPath("/system/lib/mediacas");
+ DIR* pDir = opendir(dirPath.string());
+
+ if (pDir == NULL) {
+ ALOGE("Failed to open plugin directory %s", dirPath.string());
+ return false;
+ }
+
+ Mutex::Autolock autoLock(mMapLock);
+
+ struct dirent* pEntry;
+ while ((pEntry = readdir(pDir))) {
+ String8 pluginPath = dirPath + "/" + pEntry->d_name;
+ if (pluginPath.getPathExtension() == ".so") {
+ queryPluginsFromPath(pluginPath, results);
+ }
+ }
+ return true;
+}
+
+template <class T>
+bool FactoryLoader<T>::loadFactoryForSchemeFromPath(
+ const String8 &path, int32_t CA_system_id,
+ sp<SharedLibrary> *library, T** factory) {
+ closeFactory();
+
+ if (!openFactory(path) || !mFactory->isSystemIdSupported(CA_system_id)) {
+ closeFactory();
+ return false;
+ }
+
+ if (library != NULL) {
+ *library = mLibrary;
+ }
+ if (factory != NULL) {
+ *factory = mFactory;
+ }
+ return true;
+}
+
+template <class T>
+bool FactoryLoader<T>::queryPluginsFromPath(
+ const String8 &path, vector<ParcelableCasPluginDescriptor>* results) {
+ closeFactory();
+
+ vector<CasPluginDescriptor> descriptors;
+ if (!openFactory(path) || mFactory->queryPlugins(&descriptors) != OK) {
+ closeFactory();
+ return false;
+ }
+
+ for (auto it = descriptors.begin(); it != descriptors.end(); it++) {
+ results->push_back(ParcelableCasPluginDescriptor(
+ it->CA_system_id, it->name));
+ }
+ return true;
+}
+
+template <class T>
+bool FactoryLoader<T>::openFactory(const String8 &path) {
+ // get strong pointer to open shared library
+ ssize_t index = mLibraryPathToOpenLibraryMap.indexOfKey(path);
+ if (index >= 0) {
+ mLibrary = mLibraryPathToOpenLibraryMap[index].promote();
+ } else {
+ index = mLibraryPathToOpenLibraryMap.add(path, NULL);
+ }
+
+ if (!mLibrary.get()) {
+ mLibrary = new SharedLibrary(path);
+ if (!*mLibrary) {
+ return false;
+ }
+
+ mLibraryPathToOpenLibraryMap.replaceValueAt(index, mLibrary);
+ }
+
+ CreateFactoryFunc createFactory =
+ (CreateFactoryFunc)mLibrary->lookup(mCreateFactoryFuncName);
+ if (createFactory == NULL || (mFactory = createFactory()) == NULL) {
+ return false;
+ }
+ return true;
+}
+
+template <class T>
+void FactoryLoader<T>::closeFactory() {
+ delete mFactory;
+ mFactory = NULL;
+ mLibrary.clear();
+}
+
+} // namespace android
+
+#endif // MEDIA_CAS_LOADER_H_
diff --git a/services/mediadrm/MediaCasService.cpp b/services/mediadrm/MediaCasService.cpp
new file mode 100644
index 0000000..c111283
--- /dev/null
+++ b/services/mediadrm/MediaCasService.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCasService"
+
+#include <binder/IServiceManager.h>
+#include <media/cas/CasAPI.h>
+#include <media/cas/DescramblerAPI.h>
+#include <media/CasImpl.h>
+#include <media/DescramblerImpl.h>
+#include <utils/Log.h>
+#include <utils/List.h>
+#include "MediaCasService.h"
+#include <android/media/ICasListener.h>
+
+namespace android {
+
+//static
+void MediaCasService::instantiate() {
+ defaultServiceManager()->addService(
+ String16("media.cas"), new MediaCasService());
+}
+
+MediaCasService::MediaCasService() :
+ mCasLoader(new FactoryLoader<CasFactory>("createCasFactory")),
+ mDescramblerLoader(new FactoryLoader<DescramblerFactory>(
+ "createDescramblerFactory")) {
+}
+
+MediaCasService::~MediaCasService() {
+ delete mCasLoader;
+ delete mDescramblerLoader;
+}
+
+Status MediaCasService::enumeratePlugins(
+ vector<ParcelableCasPluginDescriptor>* results) {
+ ALOGV("enumeratePlugins");
+
+ mCasLoader->enumeratePlugins(results);
+
+ return Status::ok();
+}
+
+Status MediaCasService::isSystemIdSupported(
+ int32_t CA_system_id, bool* result) {
+ ALOGV("isSystemIdSupported: CA_system_id=%d", CA_system_id);
+
+ *result = mCasLoader->findFactoryForScheme(CA_system_id);
+
+ return Status::ok();
+}
+
+Status MediaCasService::createPlugin(
+ int32_t CA_system_id,
+ const sp<ICasListener> &listener,
+ sp<ICas>* result) {
+ ALOGV("createPlugin: CA_system_id=%d", CA_system_id);
+
+ result->clear();
+
+ CasFactory *factory;
+ sp<SharedLibrary> library;
+ if (mCasLoader->findFactoryForScheme(CA_system_id, &library, &factory)) {
+ CasPlugin *plugin = NULL;
+ sp<CasImpl> casImpl = new CasImpl(listener);
+ if (factory->createPlugin(CA_system_id, (uint64_t)casImpl.get(),
+ &CasImpl::OnEvent, &plugin) == OK && plugin != NULL) {
+ casImpl->init(library, plugin);
+ *result = casImpl;
+ }
+ }
+
+ return Status::ok();
+}
+
+Status MediaCasService::isDescramblerSupported(
+ int32_t CA_system_id, bool* result) {
+ ALOGV("isDescramblerSupported: CA_system_id=%d", CA_system_id);
+
+ *result = mDescramblerLoader->findFactoryForScheme(CA_system_id);
+
+ return Status::ok();
+}
+
+Status MediaCasService::createDescrambler(
+ int32_t CA_system_id, sp<IDescrambler>* result) {
+ ALOGV("createDescrambler: CA_system_id=%d", CA_system_id);
+
+ result->clear();
+
+ DescramblerFactory *factory;
+ sp<SharedLibrary> library;
+ if (mDescramblerLoader->findFactoryForScheme(
+ CA_system_id, &library, &factory)) {
+ DescramblerPlugin *plugin = NULL;
+ if (factory->createPlugin(CA_system_id, &plugin) == OK
+ && plugin != NULL) {
+ *result = new DescramblerImpl(library, plugin);
+ }
+ }
+
+ return Status::ok();
+}
+
+} // namespace android
diff --git a/services/mediadrm/MediaCasService.h b/services/mediadrm/MediaCasService.h
new file mode 100644
index 0000000..cb828f2
--- /dev/null
+++ b/services/mediadrm/MediaCasService.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CAS_SERVICE_H_
+#define MEDIA_CAS_SERVICE_H_
+
+#include <android/media/BnMediaCasService.h>
+
+#include "FactoryLoader.h"
+
+namespace android {
+using binder::Status;
+struct CasFactory;
+struct DescramblerFactory;
+
+class MediaCasService : public BnMediaCasService {
+public:
+ static void instantiate();
+
+ virtual Status enumeratePlugins(
+ vector<ParcelableCasPluginDescriptor>* results) override;
+
+ virtual Status isSystemIdSupported(
+ int32_t CA_system_id, bool* result) override;
+
+ virtual Status createPlugin(
+ int32_t CA_system_id,
+ const sp<ICasListener> &listener,
+ sp<ICas>* result) override;
+
+ virtual Status isDescramblerSupported(
+ int32_t CA_system_id, bool* result) override;
+
+ virtual Status createDescrambler(
+ int32_t CA_system_id, sp<IDescrambler>* result) override;
+
+private:
+ FactoryLoader<CasFactory> *mCasLoader;
+ FactoryLoader<DescramblerFactory> *mDescramblerLoader;
+
+ MediaCasService();
+ virtual ~MediaCasService();
+};
+
+} // namespace android
+
+#endif // MEDIA_CAS_SERVICE_H_
diff --git a/services/mediadrm/MediaDrmService.cpp b/services/mediadrm/MediaDrmService.cpp
index 331c568..a368c11 100644
--- a/services/mediadrm/MediaDrmService.cpp
+++ b/services/mediadrm/MediaDrmService.cpp
@@ -21,12 +21,12 @@
#define LOG_TAG "MediaDrmService"
#include "MediaDrmService.h"
-
#include <binder/IServiceManager.h>
-#include <media/Crypto.h>
-#include <media/Drm.h>
#include <utils/Log.h>
+#include <media/CryptoHal.h>
+#include <media/DrmHal.h>
+
namespace android {
void MediaDrmService::instantiate() {
@@ -35,11 +35,11 @@
}
sp<ICrypto> MediaDrmService::makeCrypto() {
- return new Crypto;
+ return new CryptoHal;
}
sp<IDrm> MediaDrmService::makeDrm() {
- return new Drm;
+ return new DrmHal;
}
} // namespace android
diff --git a/services/mediadrm/main_mediadrmserver.cpp b/services/mediadrm/main_mediadrmserver.cpp
index b767b8c..b685ae0 100644
--- a/services/mediadrm/main_mediadrmserver.cpp
+++ b/services/mediadrm/main_mediadrmserver.cpp
@@ -27,6 +27,7 @@
#include <cutils/properties.h>
#include <utils/Log.h>
#include "MediaDrmService.h"
+#include "MediaCasService.h"
using namespace android;
@@ -38,6 +39,7 @@
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
MediaDrmService::instantiate();
+ MediaCasService::instantiate();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index a9a2d3c..1ebb7ff 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -5,21 +5,30 @@
LOCAL_SRC_FILES := MediaExtractorService.cpp
LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
LOCAL_MODULE:= libmediaextractorservice
-LOCAL_32_BIT_ONLY := true
include $(BUILD_SHARED_LIBRARY)
# service executable
include $(CLEAR_VARS)
-LOCAL_REQUIRED_MODULES_arm := mediaextractor-seccomp.policy
-LOCAL_REQUIRED_MODULES_x86 := mediaextractor-seccomp.policy
-LOCAL_SRC_FILES := main_extractorservice.cpp minijail/minijail.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils liblog libicuuc libminijail
+# seccomp filters are defined for the following architectures:
+LOCAL_REQUIRED_MODULES_arm := mediaextractor.policy
+LOCAL_REQUIRED_MODULES_arm64 := mediaextractor.policy
+LOCAL_REQUIRED_MODULES_x86 := mediaextractor.policy
+LOCAL_SRC_FILES := main_extractorservice.cpp
+LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
+ liblog libbase libicuuc libavservices_minijail
LOCAL_STATIC_LIBRARIES := libicuandroid_utils
LOCAL_MODULE:= mediaextractor
-LOCAL_32_BIT_ONLY := true
LOCAL_INIT_RC := mediaextractor.rc
LOCAL_C_INCLUDES := frameworks/av/media/libmedia
include $(BUILD_EXECUTABLE)
-include $(call all-makefiles-under, $(LOCAL_PATH))
+# service seccomp filter
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86))
+include $(CLEAR_VARS)
+LOCAL_MODULE := mediaextractor.policy
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+LOCAL_SRC_FILES := seccomp_policy/mediaextractor-$(TARGET_ARCH).policy
+include $(BUILD_PREBUILT)
+endif
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index 4a80166..08cbef6 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -22,6 +22,7 @@
#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/RemoteDataSource.h>
#include "MediaExtractorService.h"
namespace android {
@@ -45,6 +46,12 @@
return ret;
}
+sp<IDataSource> MediaExtractorService::makeIDataSource(int fd, int64_t offset, int64_t length)
+{
+ sp<DataSource> source = DataSource::CreateFromFd(fd, offset, length);
+ return source.get() != nullptr ? source->asIDataSource() : nullptr;
+}
+
status_t MediaExtractorService::dump(int fd, const Vector<String16>& args) {
return dumpExtractors(fd, args);
}
diff --git a/services/mediaextractor/MediaExtractorService.h b/services/mediaextractor/MediaExtractorService.h
index 078af0c..9df3ecd 100644
--- a/services/mediaextractor/MediaExtractorService.h
+++ b/services/mediaextractor/MediaExtractorService.h
@@ -34,6 +34,9 @@
static const char* getServiceName() { return "media.extractor"; }
virtual sp<IMediaExtractor> makeExtractor(const sp<IDataSource> &source, const char *mime);
+
+ virtual sp<IDataSource> makeIDataSource(int fd, int64_t offset, int64_t length);
+
virtual status_t dump(int fd, const Vector<String16>& args);
virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
diff --git a/services/mediaextractor/main_extractorservice.cpp b/services/mediaextractor/main_extractorservice.cpp
index 245489e..6a5320d 100644
--- a/services/mediaextractor/main_extractorservice.cpp
+++ b/services/mediaextractor/main_extractorservice.cpp
@@ -15,25 +15,31 @@
** limitations under the License.
*/
-#define LOG_TAG "mediaextractor"
-//#define LOG_NDEBUG 0
-
#include <fcntl.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <binder/IPCThreadState.h>
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
-#include <utils/Log.h>
+
+#include <string>
+
+#include <android-base/logging.h>
+#include <utils/misc.h>
// from LOCAL_C_INCLUDES
#include "IcuUtils.h"
#include "MediaExtractorService.h"
#include "MediaUtils.h"
-#include "minijail/minijail.h"
+#include "minijail.h"
using namespace android;
+static const char kSystemSeccompPolicyPath[] =
+ "/system/etc/seccomp_policy/mediaextractor.policy";
+static const char kVendorSeccompPolicyPath[] =
+ "/vendor/etc/seccomp_policy/mediaextractor.policy";
+
int main(int argc __unused, char** argv)
{
limitProcessMemory(
@@ -42,7 +48,14 @@
20 /* upper limit as percentage of physical RAM */);
signal(SIGPIPE, SIG_IGN);
- MiniJail();
+
+ //b/62255959: this forces libutis.so to dlopen vendor version of libutils.so
+ //before minijail is on. This is dirty but required since some syscalls such
+ //as pread64 are used by linker but aren't allowed in the minijail. By
+ //calling the function before entering minijail, we can force dlopen.
+ android::report_sysprop_change();
+
+ SetUpMinijail(kSystemSeccompPolicyPath, kVendorSeccompPolicyPath);
InitializeIcuOrDie();
diff --git a/services/mediaextractor/minijail/Android.mk b/services/mediaextractor/minijail/Android.mk
deleted file mode 100644
index 0cf8eff..0000000
--- a/services/mediaextractor/minijail/Android.mk
+++ /dev/null
@@ -1,27 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86 x86_64))
-include $(CLEAR_VARS)
-LOCAL_MODULE := mediaextractor-seccomp.policy
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
-
-# mediaextractor runs in 32-bit combatibility mode. For 64 bit architectures,
-# use the 32 bit policy
-ifdef TARGET_2ND_ARCH
- LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_2ND_ARCH).policy
-else
- LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
-endif
-
-# allow device specific additions to the syscall whitelist
-LOCAL_SRC_FILES += $(wildcard $(foreach dir, $(BOARD_SECCOMP_POLICY), \
- $(dir)/mediaextractor-seccomp.policy))
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_SRC_FILES)
- @mkdir -p $(dir $@)
- $(hide) cat > $@ $^
-
-endif
diff --git a/services/mediaextractor/minijail/minijail.cpp b/services/mediaextractor/minijail/minijail.cpp
deleted file mode 100644
index c44d00d..0000000
--- a/services/mediaextractor/minijail/minijail.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "minijail"
-
-#include <unistd.h>
-
-#include <log/log.h>
-
-#include <libminijail.h>
-
-#include "minijail.h"
-
-namespace android {
-
-/* Must match location in Android.mk */
-static const char kSeccompFilePath[] = "/system/etc/seccomp_policy/mediaextractor-seccomp.policy";
-
-int MiniJail()
-{
- /* no seccomp policy for this architecture */
- if (access(kSeccompFilePath, R_OK) == -1) {
- ALOGW("No seccomp filter defined for this architecture.");
- return 0;
- }
-
- struct minijail *jail = minijail_new();
- if (jail == NULL) {
- ALOGW("Failed to create minijail.");
- return -1;
- }
-
- minijail_no_new_privs(jail);
- minijail_log_seccomp_filter_failures(jail);
- minijail_use_seccomp_filter(jail);
- minijail_parse_seccomp_filters(jail, kSeccompFilePath);
- minijail_enter(jail);
- minijail_destroy(jail);
- return 0;
-}
-}
diff --git a/services/mediaextractor/minijail/minijail.h b/services/mediaextractor/minijail/minijail.h
deleted file mode 100644
index 6ea4487..0000000
--- a/services/mediaextractor/minijail/minijail.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-namespace android {
-int MiniJail();
-}
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
deleted file mode 100644
index cb2ac1a..0000000
--- a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
+++ /dev/null
@@ -1,48 +0,0 @@
-# Organized by frequency of systemcall - in descending order for
-# best performance.
-ioctl: 1
-futex: 1
-prctl: 1
-write: 1
-getpriority: 1
-mmap2: 1
-close: 1
-munmap: 1
-dup: 1
-mprotect: 1
-getuid32: 1
-setpriority: 1
-sigaltstack: 1
-openat: 1
-clone: 1
-read: 1
-clock_gettime: 1
-lseek: 1
-writev: 1
-fstatat64: 1
-fstatfs64: 1
-fstat64: 1
-restart_syscall: 1
-exit: 1
-exit_group: 1
-rt_sigreturn: 1
-faccessat: 1
-madvise: 1
-brk: 1
-sched_setscheduler: 1
-gettid: 1
-rt_sigprocmask: 1
-sched_yield: 1
-ugetrlimit: 1
-geteuid32: 1
-getgid32: 1
-getegid32: 1
-getgroups32: 1
-
-# for attaching to debuggerd on process crash
-sigaction: 1
-tgkill: 1
-socket: 1
-connect: 1
-fcntl64: 1
-rt_tgsigqueueinfo: 1
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
deleted file mode 100644
index 25d9786..0000000
--- a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
+++ /dev/null
@@ -1,46 +0,0 @@
-# Organized by frequency of systemcall - in descending order for
-# best performance.
-ioctl: 1
-futex: 1
-prctl: 1
-write: 1
-getpriority: 1
-close: 1
-dup: 1
-munmap: 1
-mmap2: 1
-madvise: 1
-openat: 1
-clock_gettime: 1
-writev: 1
-brk: 1
-mprotect: 1
-read: 1
-lseek: 1
-getuid32: 1
-clone: 1
-setpriority: 1
-sigaltstack: 1
-fstatat64: 1
-fstatfs64: 1
-fstat64: 1
-restart_syscall: 1
-exit: 1
-exit_group: 1
-rt_sigreturn: 1
-faccessat: 1
-sched_setscheduler: 1
-ugetrlimit: 1
-getrlimit: 1
-geteuid32: 1
-getgid32: 1
-getegid32: 1
-getgroups32: 1
-
-# for attaching to debuggerd on process crash
-socketcall: 1
-sigaction: 1
-tgkill: 1
-rt_sigprocmask: 1
-fcntl64: 1
-rt_tgsigqueueinfo: 1
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy
new file mode 100644
index 0000000..e06ac8c
--- /dev/null
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm.policy
@@ -0,0 +1,64 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+mmap2: 1
+close: 1
+munmap: 1
+dup: 1
+mprotect: 1
+getuid32: 1
+setpriority: 1
+sigaltstack: 1
+openat: 1
+clone: 1
+read: 1
+clock_gettime: 1
+lseek: 1
+writev: 1
+fstatat64: 1
+fstatfs64: 1
+fstat64: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+faccessat: 1
+madvise: 1
+brk: 1
+sched_setscheduler: 1
+gettid: 1
+rt_sigprocmask: 1
+sched_yield: 1
+ugetrlimit: 1
+geteuid32: 1
+getgid32: 1
+getegid32: 1
+getgroups32: 1
+nanosleep: 1
+
+# for FileSource
+readlinkat: 1
+_llseek: 1
+
+# for attaching to debuggerd on process crash
+sigaction: 1
+tgkill: 1
+socket: 1
+connect: 1
+recvmsg: 1
+fcntl64: 1
+rt_tgsigqueueinfo: 1
+geteuid32: 1
+getgid32: 1
+getegid32: 1
+getgroups32: 1
+getdents64: 1
+pipe2: 1
+ppoll: 1
+getpid: 1
+gettid: 1
+process_vm_readv: 1
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
new file mode 100644
index 0000000..4b51457
--- /dev/null
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
@@ -0,0 +1,55 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+close: 1
+dup: 1
+mmap: 1
+munmap: 1
+openat: 1
+mprotect: 1
+madvise: 1
+getuid: 1
+fstat: 1
+fstatfs: 1
+read: 1
+setpriority: 1
+sigaltstack: 1
+clone: 1
+lseek: 1
+newfstatat: 1
+faccessat: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+getrlimit: 1
+nanosleep: 1
+
+# for FileSource
+readlinkat: 1
+_llseek: 1
+
+# for attaching to debuggerd on process crash
+tgkill: 1
+rt_sigprocmask: 1
+rt_sigaction: 1
+# socket: arg0 == AF_LOCAL
+socket: arg0 == 1
+connect: 1
+recvmsg: 1
+rt_tgsigqueueinfo: 1
+writev: 1
+geteuid: 1
+getgid: 1
+getegid: 1
+getgroups: 1
+getdents64: 1
+pipe2: 1
+ppoll: 1
+getpid: 1
+gettid: 1
+process_vm_readv: 1
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy b/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy
new file mode 100644
index 0000000..cdff4db
--- /dev/null
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-x86.policy
@@ -0,0 +1,65 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+close: 1
+dup: 1
+munmap: 1
+mmap2: 1
+madvise: 1
+openat: 1
+clock_gettime: 1
+writev: 1
+brk: 1
+mprotect: 1
+read: 1
+lseek: 1
+getuid32: 1
+clone: 1
+setpriority: 1
+sigaltstack: 1
+fstatat64: 1
+fstatfs64: 1
+fstat64: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+faccessat: 1
+sched_setscheduler: 1
+ugetrlimit: 1
+getrlimit: 1
+geteuid32: 1
+getgid32: 1
+getegid32: 1
+getgroups32: 1
+nanosleep: 1
+
+# for FileSource
+readlinkat: 1
+_llseek: 1
+
+# for attaching to debuggerd on process crash
+socketcall: 1
+sigaction: 1
+tgkill: 1
+rt_sigprocmask: 1
+fcntl64: 1
+rt_tgsigqueueinfo: 1
+geteuid32: 1
+getgid32: 1
+getegid32: 1
+getgroups32: 1
+getdents64: 1
+pipe2: 1
+ppoll: 1
+process_vm_readv: 1
+
+# Required by AddressSanitizer
+gettid: 1
+sched_yield: 1
+getpid: 1
+gettid: 1
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index a1da63d..423b186 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -4,7 +4,7 @@
LOCAL_SRC_FILES := MediaLogService.cpp IMediaLogService.cpp
-LOCAL_SHARED_LIBRARIES := libbinder libutils liblog libnbaio
+LOCAL_SHARED_LIBRARIES := libbinder libutils liblog libnbaio libaudioutils
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/medialog/IMediaLogService.cpp b/services/medialog/IMediaLogService.cpp
index bc445ff..0e9b01e 100644
--- a/services/medialog/IMediaLogService.cpp
+++ b/services/medialog/IMediaLogService.cpp
@@ -29,6 +29,7 @@
enum {
REGISTER_WRITER = IBinder::FIRST_CALL_TRANSACTION,
UNREGISTER_WRITER,
+ REQUEST_MERGE_WAKEUP,
};
class BpMediaLogService : public BpInterface<IMediaLogService>
@@ -57,6 +58,13 @@
// FIXME ignores status
}
+ virtual void requestMergeWakeup() {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaLogService::getInterfaceDescriptor());
+ status_t status __unused = remote()->transact(REQUEST_MERGE_WAKEUP, data, &reply);
+ // FIXME ignores status
+ }
+
};
IMPLEMENT_META_INTERFACE(MediaLogService, "android.media.IMediaLogService");
@@ -84,6 +92,12 @@
return NO_ERROR;
}
+ case REQUEST_MERGE_WAKEUP: {
+ CHECK_INTERFACE(IMediaLogService, data, reply);
+ requestMergeWakeup();
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index f85aa13..aaf1018 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -26,7 +26,24 @@
namespace android {
-static const char kDeadlockedString[] = "MediaLogService may be deadlocked\n";
+// static const char kDeadlockedString[] = "MediaLogService may be deadlocked\n";
+MediaLogService::MediaLogService() :
+ BnMediaLogService(),
+ mMergerShared((NBLog::Shared*) malloc(NBLog::Timeline::sharedSize(kMergeBufferSize))),
+ mMerger(mMergerShared, kMergeBufferSize),
+ mMergeReader(mMergerShared, kMergeBufferSize, mMerger),
+ mMergeThread(new NBLog::MergeThread(mMerger))
+{
+ mMergeThread->run("MergeThread");
+}
+
+MediaLogService::~MediaLogService()
+{
+ mMergeThread->requestExit();
+ mMergeThread->setTimeoutUs(0);
+ mMergeThread->join();
+ free(mMergerShared);
+}
void MediaLogService::registerWriter(const sp<IMemory>& shared, size_t size, const char *name)
{
@@ -35,10 +52,11 @@
shared->size() < NBLog::Timeline::sharedSize(size)) {
return;
}
- sp<NBLog::Reader> reader(new NBLog::Reader(size, shared));
- NamedReader namedReader(reader, name);
+ sp<NBLog::Reader> reader(new NBLog::Reader(shared, size));
+ NBLog::NamedReader namedReader(reader, name);
Mutex::Autolock _l(mLock);
mNamedReaders.add(namedReader);
+ mMerger.addReader(namedReader);
}
void MediaLogService::unregisterWriter(const sp<IMemory>& shared)
@@ -81,7 +99,8 @@
return NO_ERROR;
}
- Vector<NamedReader> namedReaders;
+#if 0
+ Vector<NBLog::NamedReader> namedReaders;
{
bool locked = dumpTryLock(mLock);
@@ -95,19 +114,22 @@
}
return NO_ERROR;
}
- namedReaders = mNamedReaders;
+ // namedReaders = mNamedReaders;
+ // for (size_t i = 0; i < namedReaders.size(); i++) {
+ // const NBLog::NamedReader& namedReader = namedReaders[i];
+ // if (fd >= 0) {
+ // dprintf(fd, "\n%s:\n", namedReader.name());
+ // } else {
+ // ALOGI("%s:", namedReader.name());
+ // }
+ // namedReader.reader()->dump(fd, 0 /*indent*/);
+ // }
+
mLock.unlock();
}
-
- for (size_t i = 0; i < namedReaders.size(); i++) {
- const NamedReader& namedReader = namedReaders[i];
- if (fd >= 0) {
- dprintf(fd, "\n%s:\n", namedReader.name());
- } else {
- ALOGI("%s:", namedReader.name());
- }
- namedReader.reader()->dump(fd, 0 /*indent*/);
- }
+#endif
+ // FIXME request merge to make sure log is up to date
+ mMergeReader.dump(fd);
return NO_ERROR;
}
@@ -117,4 +139,8 @@
return BnMediaLogService::onTransact(code, data, reply, flags);
}
+void MediaLogService::requestMergeWakeup() {
+ mMergeThread->wakeup();
+}
+
} // namespace android
diff --git a/services/medialog/MediaLogService.h b/services/medialog/MediaLogService.h
index c9bf2eb..c6b99f1 100644
--- a/services/medialog/MediaLogService.h
+++ b/services/medialog/MediaLogService.h
@@ -27,8 +27,8 @@
{
friend class BinderService<MediaLogService>; // for MediaLogService()
public:
- MediaLogService() : BnMediaLogService() { }
- virtual ~MediaLogService() { }
+ MediaLogService();
+ virtual ~MediaLogService() override;
virtual void onFirstRef() { }
static const char* getServiceName() { return "media.log"; }
@@ -42,28 +42,23 @@
virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
uint32_t flags);
+ virtual void requestMergeWakeup() override;
+
private:
// Internal dump
static const int kDumpLockRetries = 50;
static const int kDumpLockSleepUs = 20000;
+ static const size_t kMergeBufferSize = 16 * 1024; // TODO determine good value for this
static bool dumpTryLock(Mutex& mutex);
Mutex mLock;
- class NamedReader {
- public:
- NamedReader() : mReader(0) { mName[0] = '\0'; } // for Vector
- NamedReader(const sp<NBLog::Reader>& reader, const char *name) : mReader(reader)
- { strlcpy(mName, name, sizeof(mName)); }
- ~NamedReader() { }
- const sp<NBLog::Reader>& reader() const { return mReader; }
- const char* name() const { return mName; }
- private:
- sp<NBLog::Reader> mReader;
- static const size_t kMaxName = 32;
- char mName[kMaxName];
- };
- Vector<NamedReader> mNamedReaders;
+
+ Vector<NBLog::NamedReader> mNamedReaders;
+ NBLog::Shared *mMergerShared;
+ NBLog::Merger mMerger;
+ NBLog::MergeReader mMergeReader;
+ const sp<NBLog::MergeThread> mMergeThread;
};
} // namespace android
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 7346f51..78bb587 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -34,6 +34,31 @@
namespace android {
+namespace {
+
+class DeathNotifier : public IBinder::DeathRecipient {
+public:
+ DeathNotifier(const wp<ResourceManagerService> &service, int pid, int64_t clientId)
+ : mService(service), mPid(pid), mClientId(clientId) {}
+
+ virtual void binderDied(const wp<IBinder> & /* who */) override {
+ // Don't check for pid validity since we know it's already dead.
+ sp<ResourceManagerService> service = mService.promote();
+ if (service == nullptr) {
+ ALOGW("ResourceManagerService is dead as well.");
+ return;
+ }
+ service->removeResource(mPid, mClientId, false);
+ }
+
+private:
+ wp<ResourceManagerService> mService;
+ int mPid;
+ int64_t mClientId;
+};
+
+} // namespace
+
template <typename T>
static String8 getString(const Vector<T> &items) {
String8 itemsStr;
@@ -214,17 +239,25 @@
ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
// TODO: do the merge instead of append.
info.resources.appendVector(resources);
+ if (info.deathNotifier == nullptr) {
+ info.deathNotifier = new DeathNotifier(this, pid, clientId);
+ IInterface::asBinder(client)->linkToDeath(info.deathNotifier);
+ }
notifyResourceGranted(pid, resources);
}
void ResourceManagerService::removeResource(int pid, int64_t clientId) {
+ removeResource(pid, clientId, true);
+}
+
+void ResourceManagerService::removeResource(int pid, int64_t clientId, bool checkValid) {
String8 log = String8::format(
"removeResource(pid %d, clientId %lld)",
pid, (long long) clientId);
mServiceLog->add(log);
Mutex::Autolock lock(mLock);
- if (!mProcessInfo->isValidPid(pid)) {
+ if (checkValid && !mProcessInfo->isValidPid(pid)) {
ALOGE("Rejected removeResource call with invalid pid.");
return;
}
@@ -237,6 +270,7 @@
ResourceInfos &infos = mMap.editValueAt(index);
for (size_t j = 0; j < infos.size(); ++j) {
if (infos[j].clientId == clientId) {
+ IInterface::asBinder(infos[j].client)->unlinkToDeath(infos[j].deathNotifier);
j = infos.removeAt(j);
found = true;
break;
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 2a4a6b2..9e97ac0 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -36,6 +36,7 @@
struct ResourceInfo {
int64_t clientId;
sp<IResourceManagerClient> client;
+ sp<IBinder::DeathRecipient> deathNotifier;
Vector<MediaResource> resources;
};
@@ -70,6 +71,8 @@
// Returns true if any resource has been reclaimed, otherwise returns false.
virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources);
+ void removeResource(int pid, int64_t clientId, bool checkValid);
+
protected:
virtual ~ResourceManagerService();
diff --git a/services/minijail/Android.mk b/services/minijail/Android.mk
new file mode 100644
index 0000000..3e63f97
--- /dev/null
+++ b/services/minijail/Android.mk
@@ -0,0 +1,17 @@
+LOCAL_PATH := $(call my-dir)
+
+# Small library for media.extractor and media.codec sandboxing.
+include $(CLEAR_VARS)
+LOCAL_MODULE := libavservices_minijail
+LOCAL_SRC_FILES := minijail.cpp
+LOCAL_SHARED_LIBRARIES := libbase libminijail
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
+include $(BUILD_SHARED_LIBRARY)
+
+
+# Unit tests.
+include $(CLEAR_VARS)
+LOCAL_MODULE := libavservices_minijail_unittest
+LOCAL_SRC_FILES := minijail.cpp av_services_minijail_unittest.cpp
+LOCAL_SHARED_LIBRARIES := libbase libminijail
+include $(BUILD_NATIVE_TEST)
diff --git a/services/minijail/av_services_minijail_unittest.cpp b/services/minijail/av_services_minijail_unittest.cpp
new file mode 100644
index 0000000..31313f8
--- /dev/null
+++ b/services/minijail/av_services_minijail_unittest.cpp
@@ -0,0 +1,58 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <string>
+
+#include <android-base/file.h>
+#include <android-base/unique_fd.h>
+
+#include <gtest/gtest.h>
+
+#include "minijail.h"
+
+class WritePolicyTest : public ::testing::Test
+{
+ protected:
+ const std::string base_policy_ =
+ "read: 1\n"
+ "write: 1\n"
+ "rt_sigreturn: 1\n"
+ "exit: 1\n";
+
+ const std::string additional_policy_ =
+ "mmap: 1\n"
+ "munmap: 1\n";
+
+ const std::string full_policy_ = base_policy_ + std::string("\n") + additional_policy_;
+};
+
+TEST_F(WritePolicyTest, OneFile)
+{
+ std::string final_string;
+ android::base::unique_fd fd(android::WritePolicyToPipe(base_policy_, std::string()));
+ EXPECT_LE(0, fd.get());
+ bool success = android::base::ReadFdToString(fd.get(), &final_string);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(final_string, base_policy_);
+}
+
+TEST_F(WritePolicyTest, TwoFiles)
+{
+ std::string final_string;
+ android::base::unique_fd fd(android::WritePolicyToPipe(base_policy_, additional_policy_));
+ EXPECT_LE(0, fd.get());
+ bool success = android::base::ReadFdToString(fd.get(), &final_string);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(final_string, full_policy_);
+}
diff --git a/services/minijail/minijail.cpp b/services/minijail/minijail.cpp
new file mode 100644
index 0000000..f213287
--- /dev/null
+++ b/services/minijail/minijail.cpp
@@ -0,0 +1,95 @@
+// Copyright 2015, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
+
+#include <libminijail.h>
+#include <scoped_minijail.h>
+
+#include "minijail.h"
+
+namespace android {
+
+int WritePolicyToPipe(const std::string& base_policy_content,
+ const std::string& additional_policy_content)
+{
+ int pipefd[2];
+ if (pipe(pipefd) == -1) {
+ PLOG(ERROR) << "pipe() failed";
+ return -1;
+ }
+
+ base::unique_fd write_end(pipefd[1]);
+ std::string content = base_policy_content;
+
+ if (additional_policy_content.length() > 0) {
+ content += "\n";
+ content += additional_policy_content;
+ }
+
+ if (!base::WriteStringToFd(content, write_end.get())) {
+ LOG(ERROR) << "Could not write policy to fd";
+ return -1;
+ }
+
+ return pipefd[0];
+}
+
+void SetUpMinijail(const std::string& base_policy_path, const std::string& additional_policy_path)
+{
+ // No seccomp policy defined for this architecture.
+ if (access(base_policy_path.c_str(), R_OK) == -1) {
+ LOG(WARNING) << "No seccomp policy defined for this architecture.";
+ return;
+ }
+
+ std::string base_policy_content;
+ std::string additional_policy_content;
+ if (!base::ReadFileToString(base_policy_path, &base_policy_content,
+ false /* follow_symlinks */)) {
+ LOG(FATAL) << "Could not read base policy file '" << base_policy_path << "'";
+ }
+
+ if (additional_policy_path.length() > 0 &&
+ !base::ReadFileToString(additional_policy_path, &additional_policy_content,
+ false /* follow_symlinks */)) {
+ LOG(WARNING) << "Could not read additional policy file '" << additional_policy_path << "'";
+ additional_policy_content = std::string();
+ }
+
+ base::unique_fd policy_fd(WritePolicyToPipe(base_policy_content, additional_policy_content));
+ if (policy_fd.get() == -1) {
+ LOG(FATAL) << "Could not write seccomp policy to fd";
+ }
+
+ ScopedMinijail jail{minijail_new()};
+ if (!jail) {
+ LOG(FATAL) << "Failed to create minijail.";
+ }
+
+ minijail_no_new_privs(jail.get());
+ minijail_log_seccomp_filter_failures(jail.get());
+ minijail_use_seccomp_filter(jail.get());
+ // Transfer ownership of |policy_fd|.
+ minijail_parse_seccomp_filters_from_fd(jail.get(), policy_fd.release());
+ minijail_enter(jail.get());
+}
+}
diff --git a/services/minijail/minijail.h b/services/minijail/minijail.h
new file mode 100644
index 0000000..c8a2149
--- /dev/null
+++ b/services/minijail/minijail.h
@@ -0,0 +1,26 @@
+// Copyright 2015, The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef AV_SERVICES_MINIJAIL_MINIJAIL
+#define AV_SERVICES_MINIJAIL_MINIJAIL
+
+#include <string>
+
+namespace android {
+int WritePolicyToPipe(const std::string& base_policy_content,
+ const std::string& additional_policy_content);
+void SetUpMinijail(const std::string& base_policy_path, const std::string& additional_policy_path);
+}
+
+#endif // AV_SERVICES_MINIJAIL_MINIJAIL
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
new file mode 100644
index 0000000..3dc1feb
--- /dev/null
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+
+#include "AAudioEndpointManager.h"
+
+using namespace android;
+using namespace aaudio;
+
+ANDROID_SINGLETON_STATIC_INSTANCE(AAudioEndpointManager);
+
+AAudioEndpointManager::AAudioEndpointManager()
+ : Singleton<AAudioEndpointManager>()
+ , mInputs()
+ , mOutputs() {
+}
+
+AAudioServiceEndpoint *AAudioEndpointManager::openEndpoint(AAudioService &audioService, int32_t deviceId,
+ aaudio_direction_t direction) {
+ AAudioServiceEndpoint *endpoint = nullptr;
+ std::lock_guard<std::mutex> lock(mLock);
+
+ // Try to find an existing endpoint.
+ switch (direction) {
+ case AAUDIO_DIRECTION_INPUT:
+ endpoint = mInputs[deviceId];
+ break;
+ case AAUDIO_DIRECTION_OUTPUT:
+ endpoint = mOutputs[deviceId];
+ break;
+ default:
+ assert(false); // There are only two possible directions.
+ break;
+ }
+ ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
+ endpoint, deviceId, (int)direction);
+
+ // If we can't find an existing one then open a new one.
+ if (endpoint == nullptr) {
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ AAudioServiceEndpointCapture *capture = new AAudioServiceEndpointCapture(audioService);
+ if (capture->open(deviceId) != AAUDIO_OK) {
+ ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+ delete capture;
+ } else {
+ mInputs[deviceId] = capture;
+ endpoint = capture;
+ }
+ } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
+ AAudioServiceEndpointPlay *player = new AAudioServiceEndpointPlay(audioService);
+ if (player->open(deviceId) != AAUDIO_OK) {
+ ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+ delete player;
+ } else {
+ mOutputs[deviceId] = player;
+ endpoint = player;
+ }
+ }
+
+ }
+
+ if (endpoint != nullptr) {
+ // Increment the reference count under this lock.
+ endpoint->setReferenceCount(endpoint->getReferenceCount() + 1);
+ }
+ return endpoint;
+}
+
+void AAudioEndpointManager::closeEndpoint(AAudioServiceEndpoint *serviceEndpoint) {
+ std::lock_guard<std::mutex> lock(mLock);
+ if (serviceEndpoint == nullptr) {
+ return;
+ }
+
+ // Decrement the reference count under this lock.
+ int32_t newRefCount = serviceEndpoint->getReferenceCount() - 1;
+ serviceEndpoint->setReferenceCount(newRefCount);
+ if (newRefCount <= 0) {
+ aaudio_direction_t direction = serviceEndpoint->getDirection();
+ int32_t deviceId = serviceEndpoint->getDeviceId();
+
+ switch (direction) {
+ case AAUDIO_DIRECTION_INPUT:
+ mInputs.erase(deviceId);
+ break;
+ case AAUDIO_DIRECTION_OUTPUT:
+ mOutputs.erase(deviceId);
+ break;
+ }
+
+ serviceEndpoint->close();
+ delete serviceEndpoint;
+ }
+}
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
new file mode 100644
index 0000000..db1103d
--- /dev/null
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
+#define AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
+
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceEndpointCapture.h"
+#include "AAudioServiceEndpointPlay.h"
+
+namespace aaudio {
+
+class AAudioEndpointManager : public android::Singleton<AAudioEndpointManager>{
+public:
+ AAudioEndpointManager();
+ ~AAudioEndpointManager() = default;
+
+ /**
+ * Find a service endpoint for the given deviceId and direction.
+ * If an endpoint does not already exist then it will try to create one.
+ *
+ * @param deviceId
+ * @param direction
+ * @return endpoint or nullptr
+ */
+ AAudioServiceEndpoint *openEndpoint(android::AAudioService &audioService,
+ int32_t deviceId,
+ aaudio_direction_t direction);
+
+ void closeEndpoint(AAudioServiceEndpoint *serviceEndpoint);
+
+private:
+
+ std::mutex mLock;
+
+ std::map<int32_t, AAudioServiceEndpointCapture *> mInputs;
+ std::map<int32_t, AAudioServiceEndpointPlay *> mOutputs;
+
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_ENDPOINT_MANAGER_H
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
new file mode 100644
index 0000000..43203d4
--- /dev/null
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <cstring>
+#include "AAudioMixer.h"
+
+using android::WrappingBuffer;
+using android::FifoBuffer;
+using android::fifo_frames_t;
+
+AAudioMixer::~AAudioMixer() {
+ delete[] mOutputBuffer;
+}
+
+void AAudioMixer::allocate(int32_t samplesPerFrame, int32_t framesPerBurst) {
+ mSamplesPerFrame = samplesPerFrame;
+ mFramesPerBurst = framesPerBurst;
+ int32_t samplesPerBuffer = samplesPerFrame * framesPerBurst;
+ mOutputBuffer = new float[samplesPerBuffer];
+ mBufferSizeInBytes = samplesPerBuffer * sizeof(float);
+}
+
+void AAudioMixer::clear() {
+ memset(mOutputBuffer, 0, mBufferSizeInBytes);
+}
+
+bool AAudioMixer::mix(FifoBuffer *fifo, float volume) {
+ WrappingBuffer wrappingBuffer;
+ float *destination = mOutputBuffer;
+ fifo_frames_t framesLeft = mFramesPerBurst;
+
+ // Gather the data from the client. May be in two parts.
+ fifo->getFullDataAvailable(&wrappingBuffer);
+
+ // Mix data in one or two parts.
+ int partIndex = 0;
+ while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+ fifo_frames_t framesToMix = framesLeft;
+ fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable > 0) {
+ if (framesToMix > framesAvailable) {
+ framesToMix = framesAvailable;
+ }
+ mixPart(destination, (float *)wrappingBuffer.data[partIndex], framesToMix, volume);
+
+ destination += framesToMix * mSamplesPerFrame;
+ framesLeft -= framesToMix;
+ }
+ partIndex++;
+ }
+ fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst - framesLeft);
+ if (framesLeft > 0) {
+ //ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
+ // framesLeft, mFramesPerBurst);
+ }
+ return (framesLeft > 0); // did not get all the frames we needed, ie. "underflow"
+}
+
+void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames, float volume) {
+ int32_t numSamples = numFrames * mSamplesPerFrame;
+ // TODO maybe optimize using SIMD
+ for (int sampleIndex = 0; sampleIndex < numSamples; sampleIndex++) {
+ *destination++ += *source++ * volume;
+ }
+}
+
+float *AAudioMixer::getOutputBuffer() {
+ return mOutputBuffer;
+}
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
new file mode 100644
index 0000000..9155fec
--- /dev/null
+++ b/services/oboeservice/AAudioMixer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_MIXER_H
+#define AAUDIO_AAUDIO_MIXER_H
+
+#include <stdint.h>
+
+#include <aaudio/AAudio.h>
+#include <fifo/FifoBuffer.h>
+
+class AAudioMixer {
+public:
+ AAudioMixer() {}
+ ~AAudioMixer();
+
+ void allocate(int32_t samplesPerFrame, int32_t framesPerBurst);
+
+ void clear();
+
+ /**
+ * Mix from this FIFO
+ * @param fifo
+ * @param volume
+ * @return true if underflowed
+ */
+ bool mix(android::FifoBuffer *fifo, float volume);
+
+ void mixPart(float *destination, float *source, int32_t numFrames, float volume);
+
+ float *getOutputBuffer();
+
+private:
+ float *mOutputBuffer = nullptr;
+ int32_t mSamplesPerFrame = 0;
+ int32_t mFramesPerBurst = 0;
+ int32_t mBufferSizeInBytes = 0;
+};
+
+
+#endif //AAUDIO_AAUDIO_MIXER_H
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
new file mode 100644
index 0000000..c9b9065
--- /dev/null
+++ b/services/oboeservice/AAudioService.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+//#include <time.h>
+//#include <pthread.h>
+
+#include <aaudio/AAudio.h>
+#include <mediautils/SchedulingPolicyService.h>
+#include <utils/String16.h>
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioService.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "binding/IAAudioService.h"
+#include "utility/HandleTracker.h"
+
+using namespace android;
+using namespace aaudio;
+
+typedef enum
+{
+ AAUDIO_HANDLE_TYPE_STREAM
+} aaudio_service_handle_type_t;
+static_assert(AAUDIO_HANDLE_TYPE_STREAM < HANDLE_TRACKER_MAX_TYPES, "Too many handle types.");
+
+android::AAudioService::AAudioService()
+ : BnAAudioService() {
+}
+
+AAudioService::~AAudioService() {
+}
+
+aaudio_handle_t AAudioService::openStream(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) {
+ aaudio_result_t result = AAUDIO_OK;
+ AAudioServiceStreamBase *serviceStream = nullptr;
+ const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+ bool sharingModeMatchRequired = request.isSharingModeMatchRequired();
+ aaudio_sharing_mode_t sharingMode = configurationInput.getSharingMode();
+
+ if (sharingMode != AAUDIO_SHARING_MODE_EXCLUSIVE && sharingMode != AAUDIO_SHARING_MODE_SHARED) {
+ ALOGE("AAudioService::openStream(): unrecognized sharing mode = %d", sharingMode);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+
+ if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE) {
+ serviceStream = new AAudioServiceStreamMMAP();
+ result = serviceStream->open(request, configurationOutput);
+ if (result != AAUDIO_OK) {
+ // fall back to using a shared stream
+ ALOGD("AAudioService::openStream(), EXCLUSIVE mode failed");
+ delete serviceStream;
+ serviceStream = nullptr;
+ } else {
+ configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+ }
+ }
+
+ // if SHARED requested or if EXCLUSIVE failed
+ if (sharingMode == AAUDIO_SHARING_MODE_SHARED
+ || (serviceStream == nullptr && !sharingModeMatchRequired)) {
+ serviceStream = new AAudioServiceStreamShared(*this);
+ result = serviceStream->open(request, configurationOutput);
+ configurationOutput.setSharingMode(AAUDIO_SHARING_MODE_SHARED);
+ }
+
+ if (result != AAUDIO_OK) {
+ delete serviceStream;
+ ALOGE("AAudioService::openStream(): failed, return %d", result);
+ return result;
+ } else {
+ aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
+ ALOGV("AAudioService::openStream(): handle = 0x%08X", handle);
+ if (handle < 0) {
+ ALOGE("AAudioService::openStream(): handle table full");
+ delete serviceStream;
+ }
+ return handle;
+ }
+}
+
+aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = (AAudioServiceStreamBase *)
+ mHandleTracker.remove(AAUDIO_HANDLE_TYPE_STREAM,
+ streamHandle);
+ ALOGV("AAudioService.closeStream(0x%08X)", streamHandle);
+ if (serviceStream != nullptr) {
+ serviceStream->close();
+ delete serviceStream;
+ return AAUDIO_OK;
+ }
+ return AAUDIO_ERROR_INVALID_HANDLE;
+}
+
+AAudioServiceStreamBase *AAudioService::convertHandleToServiceStream(
+ aaudio_handle_t streamHandle) const {
+ return (AAudioServiceStreamBase *) mHandleTracker.get(AAUDIO_HANDLE_TYPE_STREAM,
+ (aaudio_handle_t)streamHandle);
+}
+
+aaudio_result_t AAudioService::getStreamDescription(
+ aaudio_handle_t streamHandle,
+ aaudio::AudioEndpointParcelable &parcelable) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->getDescription(parcelable);
+ // parcelable.dump();
+ return result;
+}
+
+aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::startStream(), illegal stream handle = 0x%0x", streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->start();
+ return result;
+}
+
+aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::pauseStream(), illegal stream handle = 0x%0x", streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->pause();
+ return result;
+}
+
+aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::pauseStream(), illegal stream handle = 0x%0x", streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ aaudio_result_t result = serviceStream->stop();
+ return result;
+}
+
+aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::flushStream(), illegal stream handle = 0x%0x", streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ return serviceStream->flush();
+}
+
+aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
+ ALOGE("AAudioService::registerAudioThread(), thread already registered");
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ serviceStream->setRegisteredThread(clientThreadId);
+ int err = android::requestPriority(clientProcessId, clientThreadId,
+ DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
+ if (err != 0){
+ ALOGE("AAudioService::registerAudioThread() failed, errno = %d, priority = %d",
+ errno, DEFAULT_AUDIO_PRIORITY);
+ return AAUDIO_ERROR_INTERNAL;
+ } else {
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientProcessId,
+ pid_t clientThreadId) {
+ AAudioServiceStreamBase *serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream == nullptr) {
+ ALOGE("AAudioService::unregisterAudioThread(), illegal stream handle = 0x%0x",
+ streamHandle);
+ return AAUDIO_ERROR_INVALID_HANDLE;
+ }
+ if (serviceStream->getRegisteredThread() != clientThreadId) {
+ ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ serviceStream->setRegisteredThread(0);
+ return AAUDIO_OK;
+}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
new file mode 100644
index 0000000..f5a7d2f
--- /dev/null
+++ b/services/oboeservice/AAudioService.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_H
+#define AAUDIO_AAUDIO_SERVICE_H
+
+#include <time.h>
+#include <pthread.h>
+
+#include <binder/BinderService.h>
+
+#include <aaudio/AAudio.h>
+#include "utility/HandleTracker.h"
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceInterface.h"
+
+#include "AAudioServiceStreamBase.h"
+
+namespace android {
+
+class AAudioService :
+ public BinderService<AAudioService>,
+ public BnAAudioService,
+ public aaudio::AAudioServiceInterface
+{
+ friend class BinderService<AAudioService>;
+
+public:
+ AAudioService();
+ virtual ~AAudioService();
+
+ static const char* getServiceName() { return AAUDIO_SERVICE_NAME; }
+
+ virtual aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configuration);
+
+ virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t getStreamDescription(
+ aaudio_handle_t streamHandle,
+ aaudio::AudioEndpointParcelable &parcelable);
+
+ virtual aaudio_result_t startStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle);
+
+ virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t pid, pid_t tid,
+ int64_t periodNanoseconds) ;
+
+ virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t pid, pid_t tid);
+
+private:
+
+ aaudio::AAudioServiceStreamBase *convertHandleToServiceStream(aaudio_handle_t streamHandle) const;
+
+ HandleTracker mHandleTracker;
+
+ enum constants {
+ DEFAULT_AUDIO_PRIORITY = 2
+ };
+};
+
+} /* namespace android */
+
+#endif //AAUDIO_AAUDIO_SERVICE_H
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
new file mode 100644
index 0000000..d8ae284
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
+#include <algorithm>
+#include <mutex>
+#include <vector>
+
+#include "core/AudioStreamBuilder.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceStreamShared.h"
+
+using namespace android; // TODO just import names needed
+using namespace aaudio; // TODO just import names needed
+
+#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
+
+// Wait at least this many times longer than the operation should take.
+#define MIN_TIMEOUT_OPERATIONS 4
+
+// This is the maximum size in frames. The effective size can be tuned smaller at runtime.
+#define DEFAULT_BUFFER_CAPACITY (48 * 8)
+
+// Set up an EXCLUSIVE MMAP stream that will be shared.
+aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId) {
+ mStreamInternal = getStreamInternal();
+
+ AudioStreamBuilder builder;
+ builder.setSharingMode(AAUDIO_SHARING_MODE_EXCLUSIVE);
+ // Don't fall back to SHARED because that would cause recursion.
+ builder.setSharingModeMatchRequired(true);
+ builder.setDeviceId(deviceId);
+ builder.setDirection(getDirection());
+ builder.setBufferCapacity(DEFAULT_BUFFER_CAPACITY);
+
+ return getStreamInternal()->open(builder);
+}
+
+aaudio_result_t AAudioServiceEndpoint::close() {
+ return getStreamInternal()->close();
+}
+
+// TODO, maybe use an interface to reduce exposure
+aaudio_result_t AAudioServiceEndpoint::registerStream(AAudioServiceStreamShared *sharedStream) {
+ std::lock_guard<std::mutex> lock(mLockStreams);
+ mRegisteredStreams.push_back(sharedStream);
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::unregisterStream(AAudioServiceStreamShared *sharedStream) {
+ std::lock_guard<std::mutex> lock(mLockStreams);
+ mRegisteredStreams.erase(std::remove(mRegisteredStreams.begin(), mRegisteredStreams.end(), sharedStream),
+ mRegisteredStreams.end());
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::startStream(AAudioServiceStreamShared *sharedStream) {
+ // TODO use real-time technique to avoid mutex, eg. atomic command FIFO
+ std::lock_guard<std::mutex> lock(mLockStreams);
+ mRunningStreams.push_back(sharedStream);
+ if (mRunningStreams.size() == 1) {
+ startSharingThread_l();
+ }
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceEndpoint::stopStream(AAudioServiceStreamShared *sharedStream) {
+ int numRunningStreams = 0;
+ {
+ std::lock_guard<std::mutex> lock(mLockStreams);
+ mRunningStreams.erase(
+ std::remove(mRunningStreams.begin(), mRunningStreams.end(), sharedStream),
+ mRunningStreams.end());
+ numRunningStreams = mRunningStreams.size();
+ }
+ if (numRunningStreams == 0) {
+ // Don't call this under a lock because the callbackLoop also uses the lock.
+ stopSharingThread();
+ }
+ return AAUDIO_OK;
+}
+
+static void *aaudio_endpoint_thread_proc(void *context) {
+ AAudioServiceEndpoint *endpoint = (AAudioServiceEndpoint *) context;
+ if (endpoint != NULL) {
+ return endpoint->callbackLoop();
+ } else {
+ return NULL;
+ }
+}
+
+aaudio_result_t AAudioServiceEndpoint::startSharingThread_l() {
+ // Launch the callback loop thread.
+ int64_t periodNanos = getStreamInternal()->getFramesPerBurst()
+ * AAUDIO_NANOS_PER_SECOND
+ / getSampleRate();
+ mCallbackEnabled.store(true);
+ return getStreamInternal()->createThread(periodNanos, aaudio_endpoint_thread_proc, this);
+}
+
+aaudio_result_t AAudioServiceEndpoint::stopSharingThread() {
+ mCallbackEnabled.store(false);
+ aaudio_result_t result = getStreamInternal()->joinThread(NULL);
+ return result;
+}
+
+void AAudioServiceEndpoint::disconnectRegisteredStreams() {
+ std::lock_guard<std::mutex> lock(mLockStreams);
+ for(AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+ sharedStream->onStop();
+ }
+ mRunningStreams.clear();
+ for(AAudioServiceStreamShared *sharedStream : mRegisteredStreams) {
+ sharedStream->onDisconnect();
+ }
+ mRegisteredStreams.clear();
+}
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
new file mode 100644
index 0000000..50bf049
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_H
+#define AAUDIO_SERVICE_ENDPOINT_H
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalPlay.h"
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioMixer.h"
+#include "AAudioService.h"
+
+namespace aaudio {
+
+class AAudioServiceEndpoint {
+public:
+ virtual ~AAudioServiceEndpoint() = default;
+
+ virtual aaudio_result_t open(int32_t deviceId);
+
+ int32_t getSampleRate() const { return mStreamInternal->getSampleRate(); }
+ int32_t getSamplesPerFrame() const { return mStreamInternal->getSamplesPerFrame(); }
+ int32_t getFramesPerBurst() const { return mStreamInternal->getFramesPerBurst(); }
+
+ aaudio_result_t registerStream(AAudioServiceStreamShared *sharedStream);
+ aaudio_result_t unregisterStream(AAudioServiceStreamShared *sharedStream);
+ aaudio_result_t startStream(AAudioServiceStreamShared *sharedStream);
+ aaudio_result_t stopStream(AAudioServiceStreamShared *sharedStream);
+ aaudio_result_t close();
+
+ int32_t getDeviceId() const { return mStreamInternal->getDeviceId(); }
+
+ aaudio_direction_t getDirection() const { return mStreamInternal->getDirection(); }
+
+ void disconnectRegisteredStreams();
+
+ virtual void *callbackLoop() = 0;
+
+ // This should only be called from the AAudioEndpointManager under a mutex.
+ int32_t getReferenceCount() const {
+ return mReferenceCount;
+ }
+
+ // This should only be called from the AAudioEndpointManager under a mutex.
+ void setReferenceCount(int32_t count) {
+ mReferenceCount = count;
+ }
+
+ virtual AudioStreamInternal *getStreamInternal() = 0;
+
+ std::atomic<bool> mCallbackEnabled;
+
+ std::mutex mLockStreams;
+
+ std::vector<AAudioServiceStreamShared *> mRegisteredStreams;
+ std::vector<AAudioServiceStreamShared *> mRunningStreams;
+
+private:
+ aaudio_result_t startSharingThread_l();
+ aaudio_result_t stopSharingThread();
+
+ AudioStreamInternal *mStreamInternal = nullptr;
+ int32_t mReferenceCount = 0;
+};
+
+} /* namespace aaudio */
+
+
+#endif //AAUDIO_SERVICE_ENDPOINT_H
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
new file mode 100644
index 0000000..29d6cb9
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
+
+#include "core/AudioStreamBuilder.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceEndpointCapture.h"
+
+using namespace android; // TODO just import names needed
+using namespace aaudio; // TODO just import names needed
+
+AAudioServiceEndpointCapture::AAudioServiceEndpointCapture(AAudioService &audioService)
+ : mStreamInternalCapture(audioService, true) {
+}
+
+AAudioServiceEndpointCapture::~AAudioServiceEndpointCapture() {
+ delete mDistributionBuffer;
+}
+
+aaudio_result_t AAudioServiceEndpointCapture::open(int32_t deviceId) {
+ aaudio_result_t result = AAudioServiceEndpoint::open(deviceId);
+ if (result == AAUDIO_OK) {
+ delete mDistributionBuffer;
+ int distributionBufferSizeBytes = getStreamInternal()->getFramesPerBurst()
+ * getStreamInternal()->getBytesPerFrame();
+ mDistributionBuffer = new uint8_t[distributionBufferSizeBytes];
+ }
+ return result;
+}
+
+// Read data from the shared MMAP stream and then distribute it to the client streams.
+void *AAudioServiceEndpointCapture::callbackLoop() {
+ ALOGD("AAudioServiceEndpointCapture(): callbackLoop() entering");
+ int32_t underflowCount = 0;
+
+ aaudio_result_t result = getStreamInternal()->requestStart();
+
+ int64_t timeoutNanos = getStreamInternal()->calculateReasonableTimeout();
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) {
+ // Read audio data from stream using a blocking read.
+ result = getStreamInternal()->read(mDistributionBuffer, getFramesPerBurst(), timeoutNanos);
+ if (result == AAUDIO_ERROR_DISCONNECTED) {
+ disconnectRegisteredStreams();
+ break;
+ } else if (result != getFramesPerBurst()) {
+ ALOGW("AAudioServiceEndpointCapture(): callbackLoop() read %d / %d",
+ result, getFramesPerBurst());
+ break;
+ }
+
+ // Distribute data to each active stream.
+ { // use lock guard
+ std::lock_guard <std::mutex> lock(mLockStreams);
+ for (AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+ FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
+ if (fifo->getFifoControllerBase()->getEmptyFramesAvailable() <
+ getFramesPerBurst()) {
+ underflowCount++;
+ } else {
+ fifo->write(mDistributionBuffer, getFramesPerBurst());
+ }
+ sharedStream->markTransferTime(AudioClock::getNanoseconds());
+ }
+ }
+ }
+
+ result = getStreamInternal()->requestStop();
+
+ ALOGD("AAudioServiceEndpointCapture(): callbackLoop() exiting, %d underflows", underflowCount);
+ return NULL; // TODO review
+}
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.h b/services/oboeservice/AAudioServiceEndpointCapture.h
new file mode 100644
index 0000000..35857d1
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointCapture.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_CAPTURE_H
+#define AAUDIO_SERVICE_ENDPOINT_CAPTURE_H
+
+#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalCapture.h"
+
+namespace aaudio {
+
+class AAudioServiceEndpointCapture : public AAudioServiceEndpoint {
+public:
+ explicit AAudioServiceEndpointCapture(android::AAudioService &audioService);
+ virtual ~AAudioServiceEndpointCapture();
+
+ aaudio_result_t open(int32_t deviceId) override;
+
+ AudioStreamInternal *getStreamInternal() override {
+ return &mStreamInternalCapture;
+ }
+
+ void *callbackLoop() override;
+
+private:
+ AudioStreamInternalCapture mStreamInternalCapture;
+ uint8_t *mDistributionBuffer = nullptr;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SERVICE_ENDPOINT_CAPTURE_H
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
new file mode 100644
index 0000000..cc09cc3
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <map>
+#include <mutex>
+#include <utils/Singleton.h>
+
+#include "AAudioEndpointManager.h"
+#include "AAudioServiceEndpoint.h"
+#include <algorithm>
+#include <mutex>
+#include <vector>
+
+#include "core/AudioStreamBuilder.h"
+#include "AAudioServiceEndpoint.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceEndpointPlay.h"
+
+using namespace android; // TODO just import names needed
+using namespace aaudio; // TODO just import names needed
+
+#define BURSTS_PER_BUFFER_DEFAULT 2
+
+AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService &audioService)
+ : mStreamInternalPlay(audioService, true) {
+}
+
+AAudioServiceEndpointPlay::~AAudioServiceEndpointPlay() {
+}
+
+aaudio_result_t AAudioServiceEndpointPlay::open(int32_t deviceId) {
+ aaudio_result_t result = AAudioServiceEndpoint::open(deviceId);
+ if (result == AAUDIO_OK) {
+ mMixer.allocate(getStreamInternal()->getSamplesPerFrame(),
+ getStreamInternal()->getFramesPerBurst());
+
+ int32_t burstsPerBuffer = AAudioProperty_getMixerBursts();
+ if (burstsPerBuffer == 0) {
+ mLatencyTuningEnabled = true;
+ burstsPerBuffer = BURSTS_PER_BUFFER_DEFAULT;
+ }
+ ALOGD("AAudioServiceEndpoint(): burstsPerBuffer = %d", burstsPerBuffer);
+ int32_t desiredBufferSize = burstsPerBuffer * getStreamInternal()->getFramesPerBurst();
+ getStreamInternal()->setBufferSize(desiredBufferSize);
+ }
+ return result;
+}
+
+// Mix data from each application stream and write result to the shared MMAP stream.
+void *AAudioServiceEndpointPlay::callbackLoop() {
+ ALOGD("AAudioServiceEndpointPlay(): callbackLoop() entering");
+ int32_t underflowCount = 0;
+
+ aaudio_result_t result = getStreamInternal()->requestStart();
+
+ int64_t timeoutNanos = getStreamInternal()->calculateReasonableTimeout();
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) {
+ // Mix data from each active stream.
+ mMixer.clear();
+ { // use lock guard
+ std::lock_guard <std::mutex> lock(mLockStreams);
+ for (AAudioServiceStreamShared *sharedStream : mRunningStreams) {
+ FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
+ float volume = 0.5; // TODO get from system
+ bool underflowed = mMixer.mix(fifo, volume);
+ underflowCount += underflowed ? 1 : 0;
+ // TODO log underflows in each stream
+ sharedStream->markTransferTime(AudioClock::getNanoseconds());
+ }
+ }
+
+ // Write mixer output to stream using a blocking write.
+ result = getStreamInternal()->write(mMixer.getOutputBuffer(),
+ getFramesPerBurst(), timeoutNanos);
+ if (result == AAUDIO_ERROR_DISCONNECTED) {
+ disconnectRegisteredStreams();
+ break;
+ } else if (result != getFramesPerBurst()) {
+ ALOGW("AAudioServiceEndpoint(): callbackLoop() wrote %d / %d",
+ result, getFramesPerBurst());
+ break;
+ }
+ }
+
+ result = getStreamInternal()->requestStop();
+
+ ALOGD("AAudioServiceEndpointPlay(): callbackLoop() exiting, %d underflows", underflowCount);
+ return NULL; // TODO review
+}
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.h b/services/oboeservice/AAudioServiceEndpointPlay.h
new file mode 100644
index 0000000..b977960
--- /dev/null
+++ b/services/oboeservice/AAudioServiceEndpointPlay.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SERVICE_ENDPOINT_PLAY_H
+#define AAUDIO_SERVICE_ENDPOINT_PLAY_H
+
+#include <atomic>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalPlay.h"
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "AAudioMixer.h"
+#include "AAudioService.h"
+
+namespace aaudio {
+
+class AAudioServiceEndpointPlay : public AAudioServiceEndpoint {
+public:
+ explicit AAudioServiceEndpointPlay(android::AAudioService &audioService);
+ virtual ~AAudioServiceEndpointPlay();
+
+ aaudio_result_t open(int32_t deviceId) override;
+
+ AudioStreamInternal *getStreamInternal() override {
+ return &mStreamInternalPlay;
+ }
+
+ void *callbackLoop() override;
+
+private:
+ AudioStreamInternalPlay mStreamInternalPlay; // for playing output of mixer
+ bool mLatencyTuningEnabled = false; // TODO implement tuning
+ AAudioMixer mMixer; //
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SERVICE_ENDPOINT_PLAY_H
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
new file mode 100644
index 0000000..8f0abc2
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <mutex>
+
+#include "binding/IAAudioService.h"
+#include "binding/AAudioServiceMessage.h"
+#include "utility/AudioClock.h"
+
+#include "AAudioServiceStreamBase.h"
+#include "TimestampScheduler.h"
+
+using namespace android; // TODO just import names needed
+using namespace aaudio; // TODO just import names needed
+
+/**
+ * Base class for streams in the service.
+ * @return
+ */
+
+AAudioServiceStreamBase::AAudioServiceStreamBase()
+ : mUpMessageQueue(nullptr)
+ , mAAudioThread() {
+}
+
+AAudioServiceStreamBase::~AAudioServiceStreamBase() {
+ close();
+}
+
+aaudio_result_t AAudioServiceStreamBase::open(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) {
+ std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+ if (mUpMessageQueue != nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ } else {
+ mUpMessageQueue = new SharedRingBuffer();
+ return mUpMessageQueue->allocate(sizeof(AAudioServiceMessage), QUEUE_UP_CAPACITY_COMMANDS);
+ }
+}
+
+aaudio_result_t AAudioServiceStreamBase::close() {
+ std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+ delete mUpMessageQueue;
+ mUpMessageQueue = nullptr;
+
+ return AAUDIO_OK;
+}
+
+aaudio_result_t AAudioServiceStreamBase::start() {
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_STARTED);
+ mState = AAUDIO_STREAM_STATE_STARTED;
+ mThreadEnabled.store(true);
+ return mAAudioThread.start(this);
+}
+
+aaudio_result_t AAudioServiceStreamBase::pause() {
+
+ sendCurrentTimestamp();
+ mThreadEnabled.store(false);
+ aaudio_result_t result = mAAudioThread.stop();
+ if (result != AAUDIO_OK) {
+ processError();
+ return result;
+ }
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
+ mState = AAUDIO_STREAM_STATE_PAUSED;
+ return result;
+}
+
+aaudio_result_t AAudioServiceStreamBase::stop() {
+ // TODO wait for data to be played out
+ sendCurrentTimestamp();
+ mThreadEnabled.store(false);
+ aaudio_result_t result = mAAudioThread.stop();
+ if (result != AAUDIO_OK) {
+ processError();
+ return result;
+ }
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
+ mState = AAUDIO_STREAM_STATE_STOPPED;
+ return result;
+}
+
+aaudio_result_t AAudioServiceStreamBase::flush() {
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
+ mState = AAUDIO_STREAM_STATE_FLUSHED;
+ return AAUDIO_OK;
+}
+
+// implement Runnable, periodically send timestamps to client
+void AAudioServiceStreamBase::run() {
+ ALOGD("AAudioServiceStreamBase::run() entering ----------------");
+ TimestampScheduler timestampScheduler;
+ timestampScheduler.setBurstPeriod(mFramesPerBurst, mSampleRate);
+ timestampScheduler.start(AudioClock::getNanoseconds());
+ int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+ while(mThreadEnabled.load()) {
+ if (AudioClock::getNanoseconds() >= nextTime) {
+ aaudio_result_t result = sendCurrentTimestamp();
+ if (result != AAUDIO_OK) {
+ break;
+ }
+ nextTime = timestampScheduler.nextAbsoluteTime();
+ } else {
+ // Sleep until it is time to send the next timestamp.
+ AudioClock::sleepUntilNanoTime(nextTime);
+ }
+ }
+ ALOGD("AAudioServiceStreamBase::run() exiting ----------------");
+}
+
+void AAudioServiceStreamBase::processError() {
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
+}
+
+aaudio_result_t AAudioServiceStreamBase::sendServiceEvent(aaudio_service_event_t event,
+ double dataDouble,
+ int64_t dataLong) {
+ AAudioServiceMessage command;
+ command.what = AAudioServiceMessage::code::EVENT;
+ command.event.event = event;
+ command.event.dataDouble = dataDouble;
+ command.event.dataLong = dataLong;
+ return writeUpMessageQueue(&command);
+}
+
+aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
+ std::lock_guard<std::mutex> lock(mLockUpMessageQueue);
+ if (mUpMessageQueue == nullptr) {
+ ALOGE("writeUpMessageQueue(): mUpMessageQueue null! - stream not open");
+ return AAUDIO_ERROR_NULL;
+ }
+ int32_t count = mUpMessageQueue->getFifoBuffer()->write(command, 1);
+ if (count != 1) {
+ ALOGE("writeUpMessageQueue(): Queue full. Did client die?");
+ return AAUDIO_ERROR_WOULD_BLOCK;
+ } else {
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() {
+ AAudioServiceMessage command;
+ aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position,
+ &command.timestamp.timestamp);
+ if (result == AAUDIO_OK) {
+ // ALOGD("sendCurrentTimestamp(): position = %lld, nanos = %lld",
+ // (long long) command.timestamp.position,
+ // (long long) command.timestamp.timestamp);
+ command.what = AAudioServiceMessage::code::TIMESTAMP;
+ result = writeUpMessageQueue(&command);
+ }
+ return result;
+}
+
+/**
+ * Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamBase::getDescription(AudioEndpointParcelable &parcelable) {
+ // Gather information on the message queue.
+ mUpMessageQueue->fillParcelable(parcelable,
+ parcelable.mUpMessageQueueParcelable);
+ return getDownDataDescription(parcelable);
+}
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
new file mode 100644
index 0000000..ee52c39
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
+
+#include <assert.h>
+#include <mutex>
+
+#include "fifo/FifoBuffer.h"
+#include "binding/IAAudioService.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "binding/AAudioServiceMessage.h"
+#include "utility/AAudioUtilities.h"
+
+#include "SharedRingBuffer.h"
+#include "AAudioThread.h"
+
+namespace aaudio {
+
+// We expect the queue to only have a few commands.
+// This should be way more than we need.
+#define QUEUE_UP_CAPACITY_COMMANDS (128)
+
+/**
+ * Base class for a stream in the AAudio service.
+ */
+class AAudioServiceStreamBase
+ : public Runnable {
+
+public:
+ AAudioServiceStreamBase();
+ virtual ~AAudioServiceStreamBase();
+
+ enum {
+ ILLEGAL_THREAD_ID = 0
+ };
+
+ // -------------------------------------------------------------------
+ /**
+ * Open the device.
+ */
+ virtual aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
+
+ virtual aaudio_result_t close();
+
+ /**
+ * Start the flow of data.
+ */
+ virtual aaudio_result_t start();
+
+ /**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+ virtual aaudio_result_t pause();
+
+ /**
+ * Stop the flow of data after data in buffer has played.
+ */
+ virtual aaudio_result_t stop();
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ */
+ virtual aaudio_result_t flush();
+
+ // -------------------------------------------------------------------
+
+ /**
+ * Send a message to the client.
+ */
+ aaudio_result_t sendServiceEvent(aaudio_service_event_t event,
+ double dataDouble = 0.0,
+ int64_t dataLong = 0);
+
+ /**
+ * Fill in a parcelable description of stream.
+ */
+ aaudio_result_t getDescription(AudioEndpointParcelable &parcelable);
+
+
+ void setRegisteredThread(pid_t pid) {
+ mRegisteredClientThread = pid;
+ }
+
+ pid_t getRegisteredThread() const {
+ return mRegisteredClientThread;
+ }
+
+ int32_t getFramesPerBurst() const {
+ return mFramesPerBurst;
+ }
+
+ int32_t calculateBytesPerFrame() const {
+ return mSamplesPerFrame * AAudioConvert_formatToSizeInBytes(mAudioFormat);
+ }
+
+ void run() override; // to implement Runnable
+
+ void processError();
+
+protected:
+ aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
+
+ aaudio_result_t sendCurrentTimestamp();
+
+ virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0;
+
+ virtual aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) = 0;
+
+ aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+
+ pid_t mRegisteredClientThread = ILLEGAL_THREAD_ID;
+
+ SharedRingBuffer* mUpMessageQueue;
+ std::mutex mLockUpMessageQueue;
+
+ AAudioThread mAAudioThread;
+ // This is used by one thread to tell another thread to exit. So it must be atomic.
+ std::atomic<bool> mThreadEnabled;
+
+ aaudio_format_t mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
+ int32_t mFramesPerBurst = 0;
+ int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ int32_t mSampleRate = AAUDIO_UNSPECIFIED;
+ int32_t mCapacityInFrames = AAUDIO_UNSPECIFIED;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_BASE_H
diff --git a/services/oboeservice/AAudioServiceStreamExclusive.h b/services/oboeservice/AAudioServiceStreamExclusive.h
new file mode 100644
index 0000000..db382a3
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamExclusive.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
+
+#include "AAudioServiceStreamMMAP.h"
+
+namespace aaudio {
+
+/**
+ * Exclusive mode stream in the AAudio service.
+ *
+ * This is currently a stub.
+ * We may move code from AAudioServiceStreamMMAP into this class.
+ * If not, then it will be removed.
+ */
+class AAudioServiceStreamExclusive : public AAudioServiceStreamMMAP {
+
+public:
+ AAudioServiceStreamExclusive() {};
+ virtual ~AAudioServiceStreamExclusive() = default;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_EXCLUSIVE_H
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
new file mode 100644
index 0000000..97b9937
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <atomic>
+#include <stdint.h>
+
+#include <utils/String16.h>
+#include <media/nbaio/AudioStreamOutSink.h>
+#include <media/MmapStreamInterface.h>
+
+#include "AAudioServiceStreamBase.h"
+#include "AAudioServiceStreamMMAP.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "SharedMemoryProxy.h"
+#include "utility/AAudioUtilities.h"
+
+using namespace android;
+using namespace aaudio;
+
+#define AAUDIO_BUFFER_CAPACITY_MIN 4 * 512
+#define AAUDIO_SAMPLE_RATE_DEFAULT 48000
+
+/**
+ * Stream that uses an MMAP buffer.
+ */
+
+AAudioServiceStreamMMAP::AAudioServiceStreamMMAP()
+ : AAudioServiceStreamBase()
+ , mMmapStreamCallback(new MyMmapStreamCallback(*this))
+ , mPreviousFrameCounter(0)
+ , mMmapStream(nullptr) {
+}
+
+AAudioServiceStreamMMAP::~AAudioServiceStreamMMAP() {
+ close();
+}
+
+aaudio_result_t AAudioServiceStreamMMAP::close() {
+ mMmapStream.clear(); // TODO review. Is that all we have to do?
+ // Apparently the above close is asynchronous. An attempt to open a new device
+ // right after a close can fail. Also some callbacks may still be in flight!
+ // FIXME Make closing synchronous.
+ AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
+
+ if (mAudioDataFileDescriptor != -1) {
+ ::close(mAudioDataFileDescriptor);
+ mAudioDataFileDescriptor = -1;
+ }
+
+ return AAudioServiceStreamBase::close();
+}
+
+// Open stream on HAL and pass information about the shared memory buffer back to the client.
+aaudio_result_t AAudioServiceStreamMMAP::open(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) {
+ const audio_attributes_t attributes = {
+ .content_type = AUDIO_CONTENT_TYPE_MUSIC,
+ .usage = AUDIO_USAGE_MEDIA,
+ .source = AUDIO_SOURCE_VOICE_RECOGNITION,
+ .flags = AUDIO_FLAG_LOW_LATENCY,
+ .tags = ""
+ };
+ audio_config_base_t config;
+
+ aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
+ if (result != AAUDIO_OK) {
+ ALOGE("AAudioServiceStreamBase open returned %d", result);
+ return result;
+ }
+
+ const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+ audio_port_handle_t deviceId = configurationInput.getDeviceId();
+
+ mMmapClient.clientUid = request.getUserId();
+ mMmapClient.clientPid = request.getProcessId();
+ aaudio_direction_t direction = request.getDirection();
+
+ // Fill in config
+ aaudio_format_t aaudioFormat = configurationInput.getAudioFormat();
+ if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ aaudioFormat = AAUDIO_FORMAT_PCM_I16;
+ }
+ config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat);
+
+ int32_t aaudioSampleRate = configurationInput.getSampleRate();
+ if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
+ aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
+ }
+ config.sample_rate = aaudioSampleRate;
+
+ int32_t aaudioSamplesPerFrame = configurationInput.getSamplesPerFrame();
+
+ if (direction == AAUDIO_DIRECTION_OUTPUT) {
+ config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
+ ? AUDIO_CHANNEL_OUT_STEREO
+ : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
+ } else if (direction == AAUDIO_DIRECTION_INPUT) {
+ config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
+ ? AUDIO_CHANNEL_IN_STEREO
+ : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
+ } else {
+ ALOGE("openMmapStream - invalid direction = %d", direction);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+
+ mMmapClient.packageName.setTo(String16("aaudio_service")); // FIXME what should we do here?
+
+ MmapStreamInterface::stream_direction_t streamDirection = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? MmapStreamInterface::DIRECTION_OUTPUT : MmapStreamInterface::DIRECTION_INPUT;
+
+ // Open HAL stream.
+ status_t status = MmapStreamInterface::openMmapStream(streamDirection,
+ &attributes,
+ &config,
+ mMmapClient,
+ &deviceId,
+ mMmapStreamCallback,
+ mMmapStream);
+ if (status != OK) {
+ ALOGE("openMmapStream returned status %d", status);
+ return AAUDIO_ERROR_UNAVAILABLE;
+ }
+
+ // Create MMAP/NOIRQ buffer.
+ int32_t minSizeFrames = configurationInput.getBufferCapacity();
+ if (minSizeFrames == 0) { // zero will get rejected
+ minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
+ }
+ status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+ if (status != OK) {
+ ALOGE("%s: createMmapBuffer() returned status %d, return AAUDIO_ERROR_UNAVAILABLE",
+ __FILE__, status);
+ return AAUDIO_ERROR_UNAVAILABLE;
+ } else {
+ ALOGD("createMmapBuffer status %d shared_address = %p buffer_size %d burst_size %d",
+ status, mMmapBufferinfo.shared_memory_address,
+ mMmapBufferinfo.buffer_size_frames,
+ mMmapBufferinfo.burst_size_frames);
+ }
+
+ // Get information about the stream and pass it back to the caller.
+ mSamplesPerFrame = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? audio_channel_count_from_out_mask(config.channel_mask)
+ : audio_channel_count_from_in_mask(config.channel_mask);
+
+ mAudioDataFileDescriptor = mMmapBufferinfo.shared_memory_fd;
+ mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
+ mCapacityInFrames = mMmapBufferinfo.buffer_size_frames;
+ mAudioFormat = AAudioConvert_androidToAAudioDataFormat(config.format);
+ mSampleRate = config.sample_rate;
+
+ // Scale up the burst size to meet the minimum equivalent in microseconds.
+ // This is to avoid waking the CPU too often when the HW burst is very small
+ // or at high sample rates.
+ int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
+ int32_t burstMicros = 0;
+ do {
+ if (burstMicros > 0) { // skip first loop
+ mFramesPerBurst *= 2;
+ }
+ burstMicros = mFramesPerBurst * static_cast<int64_t>(1000000) / mSampleRate;
+ } while (burstMicros < burstMinMicros);
+
+ ALOGD("AAudioServiceStreamMMAP::open() original burst = %d, minMicros = %d, final burst = %d\n",
+ mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
+
+ // Fill in AAudioStreamConfiguration
+ configurationOutput.setSampleRate(mSampleRate);
+ configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
+ configurationOutput.setAudioFormat(mAudioFormat);
+ configurationOutput.setDeviceId(deviceId);
+
+ return AAUDIO_OK;
+}
+
+/**
+ * Start the flow of data.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::start() {
+ if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+ aaudio_result_t result;
+ status_t status = mMmapStream->start(mMmapClient, &mPortHandle);
+ if (status != OK) {
+ ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", status);
+ processError();
+ result = AAudioConvert_androidToAAudioResult(status);
+ } else {
+ result = AAudioServiceStreamBase::start();
+ }
+ return result;
+}
+
+/**
+ * Stop the flow of data such that start() can resume with loss of data.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::pause() {
+ if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+
+ aaudio_result_t result1 = AAudioServiceStreamBase::pause();
+ status_t status = mMmapStream->stop(mPortHandle);
+ mFramesRead.reset32();
+ return (result1 != AAUDIO_OK) ? result1 : AAudioConvert_androidToAAudioResult(status);
+}
+
+aaudio_result_t AAudioServiceStreamMMAP::stop() {
+ if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+
+ aaudio_result_t result1 = AAudioServiceStreamBase::stop();
+ aaudio_result_t status = mMmapStream->stop(mPortHandle);
+ mFramesRead.reset32();
+ return (result1 != AAUDIO_OK) ? result1 : AAudioConvert_androidToAAudioResult(status);
+}
+
+/**
+ * Discard any data held by the underlying HAL or Service.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::flush() {
+ if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
+ // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
+ mState = AAUDIO_STREAM_STATE_FLUSHED;
+ return AAudioServiceStreamBase::flush();;
+}
+
+
+aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames,
+ int64_t *timeNanos) {
+ struct audio_mmap_position position;
+ if (mMmapStream == nullptr) {
+ processError();
+ return AAUDIO_ERROR_NULL;
+ }
+ status_t status = mMmapStream->getMmapPosition(&position);
+ if (status != OK) {
+ ALOGE("sendCurrentTimestamp(): getMmapPosition() returned %d", status);
+ processError();
+ return AAudioConvert_androidToAAudioResult(status);
+ } else {
+ mFramesRead.update32(position.position_frames);
+ *positionFrames = mFramesRead.get();
+ *timeNanos = position.time_nanoseconds;
+ }
+ return AAUDIO_OK;
+}
+
+void AAudioServiceStreamMMAP::onTearDown() {
+ ALOGE("AAudioServiceStreamMMAP::onTearDown() called - TODO");
+};
+
+void AAudioServiceStreamMMAP::onVolumeChanged(audio_channel_mask_t channels,
+ android::Vector<float> values) {
+ // TODO do we really need a different volume for each channel?
+ float volume = values[0];
+ ALOGD("AAudioServiceStreamMMAP::onVolumeChanged() volume[0] = %f", volume);
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
+};
+
+void AAudioServiceStreamMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
+ ALOGD("AAudioServiceStreamMMAP::onRoutingChanged() called with %d, old = %d",
+ deviceId, mPortHandle);
+ if (mPortHandle > 0 && mPortHandle != deviceId) {
+ sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
+ }
+ mPortHandle = deviceId;
+};
+
+/**
+ * Get an immutable description of the data queue from the HAL.
+ */
+aaudio_result_t AAudioServiceStreamMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
+{
+ // Gather information on the data queue based on HAL info.
+ int32_t bytesPerFrame = calculateBytesPerFrame();
+ int32_t capacityInBytes = mCapacityInFrames * bytesPerFrame;
+ int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+ parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
+ parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
+ parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
+ parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
+ return AAUDIO_OK;
+}
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
new file mode 100644
index 0000000..fe75a10
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
+
+#include <atomic>
+
+#include <media/audiohal/StreamHalInterface.h>
+#include <media/MmapStreamCallback.h>
+#include <media/MmapStreamInterface.h>
+#include <utils/RefBase.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamBase.h"
+#include "binding/AudioEndpointParcelable.h"
+#include "SharedMemoryProxy.h"
+#include "TimestampScheduler.h"
+#include "utility/MonotonicCounter.h"
+
+namespace aaudio {
+
+ /**
+ * Manage one memory mapped buffer that originated from a HAL.
+ */
+class AAudioServiceStreamMMAP
+ : public AAudioServiceStreamBase
+ , public android::MmapStreamCallback {
+
+public:
+ AAudioServiceStreamMMAP();
+ virtual ~AAudioServiceStreamMMAP();
+
+
+ aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) override;
+
+ /**
+ * Start the flow of audio data.
+ *
+ * This is not guaranteed to be synchronous but it currently is.
+ * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+ */
+ aaudio_result_t start() override;
+
+ /**
+ * Stop the flow of data so that start() can resume without loss of data.
+ *
+ * This is not guaranteed to be synchronous but it currently is.
+ * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+ */
+ aaudio_result_t pause() override;
+
+ aaudio_result_t stop() override;
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ *
+ * This is not guaranteed to be synchronous but it currently is.
+ * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+ */
+ aaudio_result_t flush() override;
+
+ aaudio_result_t close() override;
+
+ /**
+ * Send a MMAP/NOIRQ buffer timestamp to the client.
+ */
+ aaudio_result_t sendCurrentTimestamp();
+
+ // -------------- Callback functions ---------------------
+ void onTearDown() override;
+
+ void onVolumeChanged(audio_channel_mask_t channels,
+ android::Vector<float> values) override;
+
+ void onRoutingChanged(audio_port_handle_t deviceId) override;
+
+protected:
+
+ aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) override;
+
+ aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+
+private:
+ // This proxy class was needed to prevent a crash in AudioFlinger
+ // when the stream was closed.
+ class MyMmapStreamCallback : public android::MmapStreamCallback {
+ public:
+ explicit MyMmapStreamCallback(android::MmapStreamCallback &serviceCallback)
+ : mServiceCallback(serviceCallback){}
+ virtual ~MyMmapStreamCallback() = default;
+
+ void onTearDown() override {
+ mServiceCallback.onTearDown();
+ };
+
+ void onVolumeChanged(audio_channel_mask_t channels, android::Vector<float> values) override
+ {
+ mServiceCallback.onVolumeChanged(channels, values);
+ };
+
+ void onRoutingChanged(audio_port_handle_t deviceId) override {
+ mServiceCallback.onRoutingChanged(deviceId);
+ };
+
+ private:
+ android::MmapStreamCallback &mServiceCallback;
+ };
+
+ android::sp<MyMmapStreamCallback> mMmapStreamCallback;
+ MonotonicCounter mFramesWritten;
+ MonotonicCounter mFramesRead;
+ int32_t mPreviousFrameCounter = 0; // from HAL
+ int mAudioDataFileDescriptor = -1;
+
+ // Interface to the AudioFlinger MMAP support.
+ android::sp<android::MmapStreamInterface> mMmapStream;
+ struct audio_mmap_buffer_info mMmapBufferinfo;
+ android::MmapStreamInterface::Client mMmapClient;
+ audio_port_handle_t mPortHandle = -1; // TODO review best default
+};
+
+} // namespace aaudio
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_MMAP_H
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
new file mode 100644
index 0000000..494b18e
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <mutex>
+
+#include <aaudio/AAudio.h>
+
+#include "binding/IAAudioService.h"
+
+#include "binding/AAudioServiceMessage.h"
+#include "AAudioServiceStreamBase.h"
+#include "AAudioServiceStreamShared.h"
+#include "AAudioEndpointManager.h"
+#include "AAudioService.h"
+#include "AAudioServiceEndpoint.h"
+
+using namespace android;
+using namespace aaudio;
+
+#define MIN_BURSTS_PER_BUFFER 2
+#define MAX_BURSTS_PER_BUFFER 32
+
+AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService)
+ : mAudioService(audioService)
+ {
+}
+
+AAudioServiceStreamShared::~AAudioServiceStreamShared() {
+ close();
+}
+
+aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) {
+
+ aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
+ if (result != AAUDIO_OK) {
+ ALOGE("AAudioServiceStreamBase open returned %d", result);
+ return result;
+ }
+
+ const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
+ int32_t deviceId = configurationInput.getDeviceId();
+ aaudio_direction_t direction = request.getDirection();
+
+ AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
+ mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService, deviceId, direction);
+ if (mServiceEndpoint == nullptr) {
+ ALOGE("AAudioServiceStreamShared::open(), mServiceEndPoint = %p", mServiceEndpoint);
+ return AAUDIO_ERROR_UNAVAILABLE;
+ }
+
+ // Is the request compatible with the shared endpoint?
+ mAudioFormat = configurationInput.getAudioFormat();
+ if (mAudioFormat == AAUDIO_FORMAT_UNSPECIFIED) {
+ mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
+ } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
+ ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need FLOAT", mAudioFormat);
+ return AAUDIO_ERROR_INVALID_FORMAT;
+ }
+
+ mSampleRate = configurationInput.getSampleRate();
+ if (mSampleRate == AAUDIO_UNSPECIFIED) {
+ mSampleRate = mServiceEndpoint->getSampleRate();
+ } else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
+ ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need %d",
+ mSampleRate, mServiceEndpoint->getSampleRate());
+ return AAUDIO_ERROR_INVALID_RATE;
+ }
+
+ mSamplesPerFrame = configurationInput.getSamplesPerFrame();
+ if (mSamplesPerFrame == AAUDIO_UNSPECIFIED) {
+ mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame();
+ } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
+ ALOGE("AAudioServiceStreamShared::open(), mSamplesPerFrame = %d, need %d",
+ mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame());
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+
+ // Determine this stream's shared memory buffer capacity.
+ mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
+ int32_t minCapacityFrames = configurationInput.getBufferCapacity();
+ int32_t numBursts = MAX_BURSTS_PER_BUFFER;
+ if (minCapacityFrames != AAUDIO_UNSPECIFIED) {
+ numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
+ if (numBursts < MIN_BURSTS_PER_BUFFER) {
+ numBursts = MIN_BURSTS_PER_BUFFER;
+ } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
+ numBursts = MAX_BURSTS_PER_BUFFER;
+ }
+ }
+ mCapacityInFrames = numBursts * mFramesPerBurst;
+ ALOGD("AAudioServiceStreamShared::open(), mCapacityInFrames = %d", mCapacityInFrames);
+
+ // Create audio data shared memory buffer for client.
+ mAudioDataQueue = new SharedRingBuffer();
+ mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+
+ // Fill in configuration for client.
+ configurationOutput.setSampleRate(mSampleRate);
+ configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
+ configurationOutput.setAudioFormat(mAudioFormat);
+ configurationOutput.setDeviceId(deviceId);
+
+ mServiceEndpoint->registerStream(this);
+
+ return AAUDIO_OK;
+}
+
+/**
+ * Start the flow of audio data.
+ *
+ * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+ */
+aaudio_result_t AAudioServiceStreamShared::start() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // For output streams, this will add the stream to the mixer.
+ aaudio_result_t result = endpoint->startStream(this);
+ if (result != AAUDIO_OK) {
+ ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
+ processError();
+ } else {
+ result = AAudioServiceStreamBase::start();
+ }
+ return AAUDIO_OK;
+}
+
+/**
+ * Stop the flow of data so that start() can resume without loss of data.
+ *
+ * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+*/
+aaudio_result_t AAudioServiceStreamShared::pause() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // Add this stream to the mixer.
+ aaudio_result_t result = endpoint->stopStream(this);
+ if (result != AAUDIO_OK) {
+ ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
+ processError();
+ }
+ return AAudioServiceStreamBase::pause();
+}
+
+aaudio_result_t AAudioServiceStreamShared::stop() {
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // Add this stream to the mixer.
+ aaudio_result_t result = endpoint->stopStream(this);
+ if (result != AAUDIO_OK) {
+ ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
+ processError();
+ }
+ return AAudioServiceStreamBase::stop();
+}
+
+/**
+ * Discard any data held by the underlying HAL or Service.
+ *
+ * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+ */
+aaudio_result_t AAudioServiceStreamShared::flush() {
+ // TODO make sure we are paused
+ // TODO actually flush the data
+ return AAudioServiceStreamBase::flush() ;
+}
+
+aaudio_result_t AAudioServiceStreamShared::close() {
+ pause();
+ // TODO wait for pause() to synchronize
+ AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+ if (endpoint != nullptr) {
+ endpoint->unregisterStream(this);
+
+ AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance();
+ mEndpointManager.closeEndpoint(endpoint);
+ mServiceEndpoint = nullptr;
+ }
+ if (mAudioDataQueue != nullptr) {
+ delete mAudioDataQueue;
+ mAudioDataQueue = nullptr;
+ }
+ return AAudioServiceStreamBase::close();
+}
+
+/**
+ * Get an immutable description of the data queue created by this service.
+ */
+aaudio_result_t AAudioServiceStreamShared::getDownDataDescription(AudioEndpointParcelable &parcelable)
+{
+ // Gather information on the data queue.
+ mAudioDataQueue->fillParcelable(parcelable,
+ parcelable.mDownDataQueueParcelable);
+ parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst());
+ return AAUDIO_OK;
+}
+
+void AAudioServiceStreamShared::onStop() {
+}
+
+void AAudioServiceStreamShared::onDisconnect() {
+ mServiceEndpoint->close();
+ mServiceEndpoint = nullptr;
+}
+
+void AAudioServiceStreamShared::markTransferTime(int64_t nanoseconds) {
+ mMarkedPosition = mAudioDataQueue->getFifoBuffer()->getReadCounter();
+ mMarkedTime = nanoseconds;
+}
+
+aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
+ int64_t *timeNanos) {
+ // TODO get these two numbers as an atomic pair
+ *positionFrames = mMarkedPosition;
+ *timeNanos = mMarkedTime;
+ return AAUDIO_OK;
+}
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
new file mode 100644
index 0000000..dfdbbb3
--- /dev/null
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
+#define AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
+
+#include "fifo/FifoBuffer.h"
+#include "binding/AAudioServiceMessage.h"
+#include "binding/AAudioStreamRequest.h"
+#include "binding/AAudioStreamConfiguration.h"
+
+#include "AAudioService.h"
+#include "AAudioServiceStreamBase.h"
+
+namespace aaudio {
+
+// We expect the queue to only have a few commands.
+// This should be way more than we need.
+#define QUEUE_UP_CAPACITY_COMMANDS (128)
+
+class AAudioEndpointManager;
+class AAudioServiceEndpoint;
+class SharedRingBuffer;
+
+/**
+ * One of these is created for every MODE_SHARED stream in the AAudioService.
+ *
+ * Each Shared stream will register itself with an AAudioServiceEndpoint when it is opened.
+ */
+class AAudioServiceStreamShared : public AAudioServiceStreamBase {
+
+public:
+ AAudioServiceStreamShared(android::AAudioService &aAudioService);
+ virtual ~AAudioServiceStreamShared();
+
+ aaudio_result_t open(const aaudio::AAudioStreamRequest &request,
+ aaudio::AAudioStreamConfiguration &configurationOutput) override;
+
+ /**
+ * Start the flow of audio data.
+ *
+ * This is not guaranteed to be synchronous but it currently is.
+ * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
+ */
+ aaudio_result_t start() override;
+
+ /**
+ * Stop the flow of data so that start() can resume without loss of data.
+ *
+ * This is not guaranteed to be synchronous but it currently is.
+ * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete.
+ */
+ aaudio_result_t pause() override;
+
+ /**
+ * Stop the flow of data after data in buffer has played.
+ */
+ aaudio_result_t stop() override;
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ *
+ * This is not guaranteed to be synchronous but it currently is.
+ * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
+ */
+ aaudio_result_t flush() override;
+
+ aaudio_result_t close() override;
+
+ android::FifoBuffer *getDataFifoBuffer() { return mAudioDataQueue->getFifoBuffer(); }
+
+ /* Keep a record of when a buffer transfer completed.
+ * This allows for a more accurate timing model.
+ */
+ void markTransferTime(int64_t nanoseconds);
+
+ void onStop();
+
+ void onDisconnect();
+
+protected:
+
+ aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) override;
+
+ aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
+
+private:
+ android::AAudioService &mAudioService;
+ AAudioServiceEndpoint *mServiceEndpoint = nullptr;
+ SharedRingBuffer *mAudioDataQueue = nullptr;
+
+ int64_t mMarkedPosition = 0;
+ int64_t mMarkedTime = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_AAUDIO_SERVICE_STREAM_SHARED_H
diff --git a/services/oboeservice/AAudioThread.cpp b/services/oboeservice/AAudioThread.cpp
new file mode 100644
index 0000000..ebb50f8
--- /dev/null
+++ b/services/oboeservice/AAudioThread.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <pthread.h>
+
+#include <aaudio/AAudio.h>
+#include <utility/AAudioUtilities.h>
+
+#include "AAudioThread.h"
+
+using namespace aaudio;
+
+
+AAudioThread::AAudioThread()
+ : mRunnable(nullptr)
+ , mHasThread(false) {
+ // mThread is a pthread_t of unknown size so we need memset().
+ memset(&mThread, 0, sizeof(mThread));
+}
+
+void AAudioThread::dispatch() {
+ if (mRunnable != nullptr) {
+ mRunnable->run();
+ } else {
+ run();
+ }
+}
+
+// This is the entry point for the new thread created by createThread().
+// It converts the 'C' function call to a C++ method call.
+static void * AAudioThread_internalThreadProc(void *arg) {
+ AAudioThread *aaudioThread = (AAudioThread *) arg;
+ aaudioThread->dispatch();
+ return nullptr;
+}
+
+aaudio_result_t AAudioThread::start(Runnable *runnable) {
+ if (mHasThread) {
+ ALOGE("AAudioThread::start() - mHasThread.load() already true");
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ // mRunnable will be read by the new thread when it starts.
+ // pthread_create() forces a memory synchronization so mRunnable does not need to be atomic.
+ mRunnable = runnable;
+ int err = pthread_create(&mThread, nullptr, AAudioThread_internalThreadProc, this);
+ if (err != 0) {
+ ALOGE("AAudioThread::start() - pthread_create() returned %d %s", err, strerror(err));
+ return AAudioConvert_androidToAAudioResult(-err);
+ } else {
+ mHasThread = true;
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_result_t AAudioThread::stop() {
+ if (!mHasThread) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ int err = pthread_join(mThread, nullptr);
+ mHasThread = false;
+ if (err != 0) {
+ ALOGE("AAudioThread::stop() - pthread_join() returned %d %s", err, strerror(err));
+ return AAudioConvert_androidToAAudioResult(-err);
+ } else {
+ return AAUDIO_OK;
+ }
+}
+
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
new file mode 100644
index 0000000..02f1459
--- /dev/null
+++ b/services/oboeservice/AAudioThread.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_THREAD_H
+#define AAUDIO_THREAD_H
+
+#include <atomic>
+#include <pthread.h>
+
+#include <aaudio/AAudio.h>
+
+namespace aaudio {
+
+/**
+ * Abstract class similar to Java Runnable.
+ */
+class Runnable {
+public:
+ Runnable() {};
+ virtual ~Runnable() = default;
+
+ virtual void run() = 0;
+};
+
+/**
+ * Abstraction for a host dependent thread.
+ * TODO Consider using Android "Thread" class or std::thread instead.
+ */
+class AAudioThread
+{
+public:
+ AAudioThread();
+ AAudioThread(Runnable *runnable);
+ virtual ~AAudioThread() = default;
+
+ /**
+ * Start the thread running.
+ */
+ aaudio_result_t start(Runnable *runnable = nullptr);
+
+ /**
+ * Join the thread.
+ * The caller must somehow tell the thread to exit before calling join().
+ */
+ aaudio_result_t stop();
+
+ /**
+ * This will get called in the thread.
+ * Override this or pass a Runnable to start().
+ */
+ virtual void run() {};
+
+ void dispatch(); // called internally from 'C' thread wrapper
+
+private:
+ Runnable *mRunnable;
+ bool mHasThread;
+ pthread_t mThread; // initialized in constructor
+
+};
+
+} /* namespace aaudio */
+
+#endif ///AAUDIO_THREAD_H
diff --git a/services/oboeservice/Android.mk b/services/oboeservice/Android.mk
new file mode 100644
index 0000000..b447725
--- /dev/null
+++ b/services/oboeservice/Android.mk
@@ -0,0 +1,57 @@
+LOCAL_PATH:= $(call my-dir)
+
+# AAudio Service
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libaaudioservice
+LOCAL_MODULE_TAGS := optional
+
+LIBAAUDIO_DIR := ../../media/libaaudio
+LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
+
+LOCAL_C_INCLUDES := \
+ $(TOPDIR)frameworks/av/services/audioflinger \
+ $(call include-path-for, audio-utils) \
+ frameworks/native/include \
+ system/core/base/include \
+ $(TOP)/frameworks/native/media/libaaudio/include/include \
+ $(TOP)/frameworks/av/media/libaaudio/include \
+ $(TOP)/frameworks/av/media/utils/include \
+ frameworks/native/include \
+ $(TOP)/external/tinyalsa/include \
+ $(TOP)/frameworks/av/media/libaaudio/src
+
+LOCAL_SRC_FILES += \
+ $(LIBAAUDIO_SRC_DIR)/utility/HandleTracker.cpp \
+ SharedMemoryProxy.cpp \
+ SharedRingBuffer.cpp \
+ AAudioEndpointManager.cpp \
+ AAudioMixer.cpp \
+ AAudioService.cpp \
+ AAudioServiceEndpoint.cpp \
+ AAudioServiceEndpointCapture.cpp \
+ AAudioServiceEndpointPlay.cpp \
+ AAudioServiceStreamBase.cpp \
+ AAudioServiceStreamMMAP.cpp \
+ AAudioServiceStreamShared.cpp \
+ TimestampScheduler.cpp \
+ AAudioThread.cpp
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+# LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wno-unused-parameter
+LOCAL_CFLAGS += -Wall -Werror
+
+LOCAL_SHARED_LIBRARIES := \
+ libaaudio \
+ libaudioflinger \
+ libbinder \
+ libcutils \
+ libmediautils \
+ libutils \
+ liblog
+
+include $(BUILD_SHARED_LIBRARY)
+
+
diff --git a/services/oboeservice/SharedMemoryProxy.cpp b/services/oboeservice/SharedMemoryProxy.cpp
new file mode 100644
index 0000000..c31557e
--- /dev/null
+++ b/services/oboeservice/SharedMemoryProxy.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <log/log.h>
+
+#include <aaudio/AAudio.h>
+#include "SharedMemoryProxy.h"
+
+using namespace aaudio;
+
+SharedMemoryProxy::~SharedMemoryProxy()
+{
+ if (mOriginalSharedMemory != nullptr) {
+ munmap(mOriginalSharedMemory, mSharedMemorySizeInBytes);
+ mOriginalSharedMemory = nullptr;
+ }
+ if (mProxySharedMemory != nullptr) {
+ munmap(mProxySharedMemory, mSharedMemorySizeInBytes);
+ close(mProxyFileDescriptor);
+ mProxySharedMemory = nullptr;
+ }
+}
+
+aaudio_result_t SharedMemoryProxy::open(int originalFD, int32_t capacityInBytes) {
+ mOriginalFileDescriptor = originalFD;
+ mSharedMemorySizeInBytes = capacityInBytes;
+
+ mProxyFileDescriptor = ashmem_create_region("AAudioProxyDataBuffer", mSharedMemorySizeInBytes);
+ if (mProxyFileDescriptor < 0) {
+ ALOGE("SharedMemoryProxy::open() ashmem_create_region() failed %d", errno);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+ int err = ashmem_set_prot_region(mProxyFileDescriptor, PROT_READ|PROT_WRITE);
+ if (err < 0) {
+ ALOGE("SharedMemoryProxy::open() ashmem_set_prot_region() failed %d", errno);
+ close(mProxyFileDescriptor);
+ mProxyFileDescriptor = -1;
+ return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+ }
+
+ // Get original memory address.
+ mOriginalSharedMemory = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ mOriginalFileDescriptor, 0);
+ if (mOriginalSharedMemory == MAP_FAILED) {
+ ALOGE("SharedMemoryProxy::open() original mmap(%d) failed %d (%s)",
+ mOriginalFileDescriptor, errno, strerror(errno));
+ return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+ }
+
+ // Map the fd to the same memory addresses.
+ mProxySharedMemory = (uint8_t *) mmap(mOriginalSharedMemory, mSharedMemorySizeInBytes,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ mProxyFileDescriptor, 0);
+ if (mProxySharedMemory != mOriginalSharedMemory) {
+ ALOGE("SharedMemoryProxy::open() proxy mmap(%d) failed %d", mProxyFileDescriptor, errno);
+ munmap(mOriginalSharedMemory, mSharedMemorySizeInBytes);
+ mOriginalSharedMemory = nullptr;
+ close(mProxyFileDescriptor);
+ mProxyFileDescriptor = -1;
+ return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+ }
+
+ return AAUDIO_OK;
+}
diff --git a/services/oboeservice/SharedMemoryProxy.h b/services/oboeservice/SharedMemoryProxy.h
new file mode 100644
index 0000000..89eeb4b
--- /dev/null
+++ b/services/oboeservice/SharedMemoryProxy.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SHARED_MEMORY_PROXY_H
+#define AAUDIO_SHARED_MEMORY_PROXY_H
+
+#include <stdint.h>
+#include <cutils/ashmem.h>
+#include <sys/mman.h>
+
+#include <aaudio/AAudio.h>
+
+namespace aaudio {
+
+/**
+ * Proxy for sharing memory between two file descriptors.
+ */
+class SharedMemoryProxy {
+public:
+ SharedMemoryProxy() {}
+
+ ~SharedMemoryProxy();
+
+ aaudio_result_t open(int fd, int32_t capacityInBytes);
+
+ int getFileDescriptor() const {
+ return mProxyFileDescriptor;
+ }
+
+private:
+ int mOriginalFileDescriptor = -1;
+ int mProxyFileDescriptor = -1;
+ uint8_t *mOriginalSharedMemory = nullptr;
+ uint8_t *mProxySharedMemory = nullptr;
+ int32_t mSharedMemorySizeInBytes = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SHARED_MEMORY_PROXY_H
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
new file mode 100644
index 0000000..6b3fb4c
--- /dev/null
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudioService"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <sys/mman.h>
+
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
+
+#include "SharedRingBuffer.h"
+
+using namespace android;
+using namespace aaudio;
+
+SharedRingBuffer::~SharedRingBuffer()
+{
+ if (mSharedMemory != nullptr) {
+ delete mFifoBuffer;
+ munmap(mSharedMemory, mSharedMemorySizeInBytes);
+ mSharedMemory = nullptr;
+ }
+ if (mFileDescriptor != -1) {
+ ALOGV("SharedRingBuffer: LEAK? close(mFileDescriptor = %d)\n", mFileDescriptor);
+ close(mFileDescriptor);
+ mFileDescriptor = -1;
+ }
+}
+
+aaudio_result_t SharedRingBuffer::allocate(fifo_frames_t bytesPerFrame,
+ fifo_frames_t capacityInFrames) {
+ mCapacityInFrames = capacityInFrames;
+
+ // Create shared memory large enough to hold the data and the read and write counters.
+ mDataMemorySizeInBytes = bytesPerFrame * capacityInFrames;
+ mSharedMemorySizeInBytes = mDataMemorySizeInBytes + (2 * (sizeof(fifo_counter_t)));
+ mFileDescriptor = ashmem_create_region("AAudioSharedRingBuffer", mSharedMemorySizeInBytes);
+ ALOGV("SharedRingBuffer::allocate() LEAK? mFileDescriptor = %d\n", mFileDescriptor);
+ if (mFileDescriptor < 0) {
+ ALOGE("SharedRingBuffer::allocate() ashmem_create_region() failed %d", errno);
+ return AAUDIO_ERROR_INTERNAL;
+ }
+
+ int err = ashmem_set_prot_region(mFileDescriptor, PROT_READ|PROT_WRITE); // TODO error handling?
+ if (err < 0) {
+ ALOGE("SharedRingBuffer::allocate() ashmem_set_prot_region() failed %d", errno);
+ close(mFileDescriptor);
+ return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+ }
+
+ // Map the fd to memory addresses.
+ mSharedMemory = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+ PROT_READ|PROT_WRITE,
+ MAP_SHARED,
+ mFileDescriptor, 0);
+ if (mSharedMemory == MAP_FAILED) {
+ ALOGE("SharedRingBuffer::allocate() mmap() failed %d", errno);
+ close(mFileDescriptor);
+ return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
+ }
+
+ // Get addresses for our counters and data from the shared memory.
+ fifo_counter_t *readCounterAddress =
+ (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_READ_OFFSET];
+ fifo_counter_t *writeCounterAddress =
+ (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_WRITE_OFFSET];
+ uint8_t *dataAddress = &mSharedMemory[SHARED_RINGBUFFER_DATA_OFFSET];
+
+ mFifoBuffer = new FifoBuffer(bytesPerFrame, capacityInFrames,
+ readCounterAddress, writeCounterAddress, dataAddress);
+ return AAUDIO_OK;
+}
+
+void SharedRingBuffer::fillParcelable(AudioEndpointParcelable &endpointParcelable,
+ RingBufferParcelable &ringBufferParcelable) {
+ int fdIndex = endpointParcelable.addFileDescriptor(mFileDescriptor, mSharedMemorySizeInBytes);
+ ringBufferParcelable.setupMemory(fdIndex,
+ SHARED_RINGBUFFER_DATA_OFFSET,
+ mDataMemorySizeInBytes,
+ SHARED_RINGBUFFER_READ_OFFSET,
+ SHARED_RINGBUFFER_WRITE_OFFSET,
+ sizeof(fifo_counter_t));
+ ringBufferParcelable.setBytesPerFrame(mFifoBuffer->getBytesPerFrame());
+ ringBufferParcelable.setFramesPerBurst(1);
+ ringBufferParcelable.setCapacityInFrames(mCapacityInFrames);
+}
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
new file mode 100644
index 0000000..a2c3766
--- /dev/null
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_SHARED_RINGBUFFER_H
+#define AAUDIO_SHARED_RINGBUFFER_H
+
+#include <stdint.h>
+#include <cutils/ashmem.h>
+#include <sys/mman.h>
+
+#include "fifo/FifoBuffer.h"
+#include "binding/RingBufferParcelable.h"
+#include "binding/AudioEndpointParcelable.h"
+
+namespace aaudio {
+
+// Determine the placement of the counters and data in shared memory.
+#define SHARED_RINGBUFFER_READ_OFFSET 0
+#define SHARED_RINGBUFFER_WRITE_OFFSET sizeof(fifo_counter_t)
+#define SHARED_RINGBUFFER_DATA_OFFSET (SHARED_RINGBUFFER_WRITE_OFFSET + sizeof(fifo_counter_t))
+
+/**
+ * Atomic FIFO that uses shared memory.
+ */
+class SharedRingBuffer {
+public:
+ SharedRingBuffer() {}
+
+ virtual ~SharedRingBuffer();
+
+ aaudio_result_t allocate(android::fifo_frames_t bytesPerFrame, android::fifo_frames_t capacityInFrames);
+
+ void fillParcelable(AudioEndpointParcelable &endpointParcelable,
+ RingBufferParcelable &ringBufferParcelable);
+
+ android::FifoBuffer * getFifoBuffer() {
+ return mFifoBuffer;
+ }
+
+private:
+ int mFileDescriptor = -1;
+ android::FifoBuffer *mFifoBuffer = nullptr;
+ uint8_t *mSharedMemory = nullptr;
+ int32_t mSharedMemorySizeInBytes = 0;
+ int32_t mDataMemorySizeInBytes = 0;
+ android::fifo_frames_t mCapacityInFrames = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //AAUDIO_SHARED_RINGBUFFER_H
diff --git a/services/oboeservice/TimestampScheduler.cpp b/services/oboeservice/TimestampScheduler.cpp
new file mode 100644
index 0000000..d54996f
--- /dev/null
+++ b/services/oboeservice/TimestampScheduler.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// for random()
+#include <stdlib.h>
+
+#include "TimestampScheduler.h"
+
+using namespace aaudio;
+
+void TimestampScheduler::start(int64_t startTime) {
+ mStartTime = startTime;
+ mLastTime = startTime;
+}
+
+int64_t TimestampScheduler::nextAbsoluteTime() {
+ int64_t periodsElapsed = (mLastTime - mStartTime) / mBurstPeriod;
+ // This is an arbitrary schedule that could probably be improved.
+ // It starts out sending a timestamp on every period because we want to
+ // get an accurate picture when the stream starts. Then it slows down
+ // to the occasional timestamps needed to detect a slow drift.
+ int64_t minPeriodsToDelay = (periodsElapsed < 10) ? 1 :
+ (periodsElapsed < 100) ? 3 :
+ (periodsElapsed < 1000) ? 10 : 50;
+ int64_t sleepTime = minPeriodsToDelay * mBurstPeriod;
+ // Generate a random rectangular distribution one burst wide so that we get
+ // an uncorrelated sampling of the MMAP pointer.
+ sleepTime += (int64_t)(random() * mBurstPeriod / RAND_MAX);
+ mLastTime += sleepTime;
+ return mLastTime;
+}
diff --git a/services/oboeservice/TimestampScheduler.h b/services/oboeservice/TimestampScheduler.h
new file mode 100644
index 0000000..baa5c41
--- /dev/null
+++ b/services/oboeservice/TimestampScheduler.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_TIMESTAMP_SCHEDULER_H
+#define AAUDIO_TIMESTAMP_SCHEDULER_H
+
+#include <aaudio/AAudio.h>
+#include <utility/AudioClock.h>
+
+namespace aaudio {
+
+/**
+ * Schedule wakeup time for monitoring the position
+ * of an MMAP/NOIRQ buffer.
+ *
+ * Note that this object is not thread safe. Only call it from a single thread.
+ */
+class TimestampScheduler
+{
+public:
+ TimestampScheduler() {};
+ virtual ~TimestampScheduler() = default;
+
+ /**
+ * Start the schedule at the given time.
+ */
+ void start(int64_t startTime);
+
+ /**
+ * Calculate the next time that the read position should be measured.
+ */
+ int64_t nextAbsoluteTime();
+
+ void setBurstPeriod(int64_t burstPeriod) {
+ mBurstPeriod = burstPeriod;
+ }
+
+ void setBurstPeriod(int32_t framesPerBurst,
+ int32_t sampleRate) {
+ mBurstPeriod = AAUDIO_NANOS_PER_SECOND * framesPerBurst / sampleRate;
+ }
+
+ int64_t getBurstPeriod() {
+ return mBurstPeriod;
+ }
+
+private:
+ // Start with an arbitrary default so we do not divide by zero.
+ int64_t mBurstPeriod = AAUDIO_NANOS_PER_MILLISECOND;
+ int64_t mStartTime = 0;
+ int64_t mLastTime = 0;
+};
+
+} /* namespace aaudio */
+
+#endif /* AAUDIO_TIMESTAMP_SCHEDULER_H */
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index fc8f00c..1b50dc3 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -17,7 +17,7 @@
include $(CLEAR_VARS)
-LOCAL_SRC_FILES:= \
+LOCAL_SRC_FILES:= \
RadioService.cpp
LOCAL_SHARED_LIBRARIES:= \
@@ -30,6 +30,26 @@
libradio \
libradio_metadata
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
+# libhardware configuration
+LOCAL_SRC_FILES += \
+ RadioHalLegacy.cpp
+else
+# Treble configuration
+
+LOCAL_SRC_FILES += \
+ HidlUtils.cpp \
+ RadioHalHidl.cpp
+
+LOCAL_SHARED_LIBRARIES += \
+ libhwbinder \
+ libhidlbase \
+ libhidltransport \
+ libbase \
+ libaudiohal \
+ android.hardware.broadcastradio@1.0
+endif
+
LOCAL_CFLAGS += -Wall -Wextra -Werror
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/radio/HidlUtils.cpp b/services/radio/HidlUtils.cpp
new file mode 100644
index 0000000..6895377
--- /dev/null
+++ b/services/radio/HidlUtils.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "HidlUtils"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+#include <system/radio_metadata.h>
+
+#include "HidlUtils.h"
+
+namespace android {
+
+using android::hardware::broadcastradio::V1_0::MetadataType;
+using android::hardware::broadcastradio::V1_0::Band;
+using android::hardware::broadcastradio::V1_0::Deemphasis;
+using android::hardware::broadcastradio::V1_0::Rds;
+
+//static
+int HidlUtils::convertHalResult(Result result)
+{
+ switch (result) {
+ case Result::OK:
+ return 0;
+ case Result::INVALID_ARGUMENTS:
+ return -EINVAL;
+ case Result::INVALID_STATE:
+ return -ENOSYS;
+ case Result::TIMEOUT:
+ return -ETIMEDOUT;
+ case Result::NOT_INITIALIZED:
+ default:
+ return -ENODEV;
+ }
+}
+
+
+//static
+void HidlUtils::convertBandConfigToHal(BandConfig *halConfig,
+ const radio_hal_band_config_t *config)
+{
+ halConfig->type = static_cast<Band>(config->type);
+ halConfig->antennaConnected = config->antenna_connected;
+ halConfig->lowerLimit = config->lower_limit;
+ halConfig->upperLimit = config->upper_limit;
+ halConfig->spacings.setToExternal(const_cast<unsigned int *>(&config->spacings[0]),
+ config->num_spacings * sizeof(uint32_t));
+ // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+ halConfig->spacings.resize(config->num_spacings);
+
+ if (halConfig->type == Band::FM) {
+ halConfig->ext.fm.deemphasis = static_cast<Deemphasis>(config->fm.deemphasis);
+ halConfig->ext.fm.stereo = config->fm.stereo;
+ halConfig->ext.fm.rds = static_cast<Rds>(config->fm.rds);
+ halConfig->ext.fm.ta = config->fm.ta;
+ halConfig->ext.fm.af = config->fm.af;
+ halConfig->ext.fm.ea = config->fm.ea;
+ } else {
+ halConfig->ext.am.stereo = config->am.stereo;
+ }
+}
+
+//static
+void HidlUtils::convertPropertiesFromHal(radio_hal_properties_t *properties,
+ const Properties *halProperties)
+{
+ properties->class_id = static_cast<radio_class_t>(halProperties->classId);
+ strlcpy(properties->implementor, halProperties->implementor.c_str(), RADIO_STRING_LEN_MAX);
+ strlcpy(properties->product, halProperties->product.c_str(), RADIO_STRING_LEN_MAX);
+ strlcpy(properties->version, halProperties->version.c_str(), RADIO_STRING_LEN_MAX);
+ strlcpy(properties->serial, halProperties->serial.c_str(), RADIO_STRING_LEN_MAX);
+ properties->num_tuners = halProperties->numTuners;
+ properties->num_audio_sources = halProperties->numAudioSources;
+ properties->supports_capture = halProperties->supportsCapture;
+ properties->num_bands = halProperties->bands.size();
+
+ for (size_t i = 0; i < halProperties->bands.size(); i++) {
+ convertBandConfigFromHal(&properties->bands[i], &halProperties->bands[i]);
+ }
+}
+
+//static
+void HidlUtils::convertBandConfigFromHal(radio_hal_band_config_t *config,
+ const BandConfig *halConfig)
+{
+ config->type = static_cast<radio_band_t>(halConfig->type);
+ config->antenna_connected = halConfig->antennaConnected;
+ config->lower_limit = halConfig->lowerLimit;
+ config->upper_limit = halConfig->upperLimit;
+ config->num_spacings = halConfig->spacings.size();
+ if (config->num_spacings > RADIO_NUM_SPACINGS_MAX) {
+ config->num_spacings = RADIO_NUM_SPACINGS_MAX;
+ }
+ memcpy(config->spacings, halConfig->spacings.data(),
+ sizeof(uint32_t) * config->num_spacings);
+
+ if (halConfig->type == Band::FM) {
+ config->fm.deemphasis = static_cast<radio_deemphasis_t>(halConfig->ext.fm.deemphasis);
+ config->fm.stereo = halConfig->ext.fm.stereo;
+ config->fm.rds = static_cast<radio_rds_t>(halConfig->ext.fm.rds);
+ config->fm.ta = halConfig->ext.fm.ta;
+ config->fm.af = halConfig->ext.fm.af;
+ config->fm.ea = halConfig->ext.fm.ea;
+ } else {
+ config->am.stereo = halConfig->ext.am.stereo;
+ }
+}
+
+
+//static
+void HidlUtils::convertProgramInfoFromHal(radio_program_info_t *info,
+ const ProgramInfo *halInfo)
+{
+ info->channel = halInfo->channel;
+ info->sub_channel = halInfo->subChannel;
+ info->tuned = halInfo->tuned;
+ info->stereo = halInfo->stereo;
+ info->digital = halInfo->digital;
+ info->signal_strength = halInfo->signalStrength;
+ convertMetaDataFromHal(&info->metadata, halInfo->metadata,
+ halInfo->channel, halInfo->subChannel);
+}
+
+// TODO(twasilczyk): drop unnecessary channel info
+//static
+void HidlUtils::convertMetaDataFromHal(radio_metadata_t **metadata,
+ const hidl_vec<MetaData>& halMetadata,
+ uint32_t channel __unused,
+ uint32_t subChannel __unused)
+{
+
+ if (metadata == nullptr || *metadata == nullptr) {
+ ALOGE("destination metadata buffer is a nullptr");
+ return;
+ }
+ for (size_t i = 0; i < halMetadata.size(); i++) {
+ radio_metadata_key_t key = static_cast<radio_metadata_key_t>(halMetadata[i].key);
+ radio_metadata_type_t type = static_cast<radio_metadata_key_t>(halMetadata[i].type);
+ radio_metadata_clock_t clock;
+
+ switch (type) {
+ case RADIO_METADATA_TYPE_INT:
+ radio_metadata_add_int(metadata, key, halMetadata[i].intValue);
+ break;
+ case RADIO_METADATA_TYPE_TEXT:
+ radio_metadata_add_text(metadata, key, halMetadata[i].stringValue.c_str());
+ break;
+ case RADIO_METADATA_TYPE_RAW:
+ radio_metadata_add_raw(metadata, key,
+ halMetadata[i].rawValue.data(),
+ halMetadata[i].rawValue.size());
+ break;
+ case RADIO_METADATA_TYPE_CLOCK:
+ clock.utc_seconds_since_epoch =
+ halMetadata[i].clockValue.utcSecondsSinceEpoch;
+ clock.timezone_offset_in_minutes =
+ halMetadata[i].clockValue.timezoneOffsetInMinutes;
+ radio_metadata_add_clock(metadata, key, &clock);
+ break;
+ default:
+ ALOGW("%s invalid metadata type %u",__FUNCTION__, halMetadata[i].type);
+ break;
+ }
+ }
+}
+
+} // namespace android
diff --git a/services/radio/HidlUtils.h b/services/radio/HidlUtils.h
new file mode 100644
index 0000000..c771060
--- /dev/null
+++ b/services/radio/HidlUtils.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
+#define ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
+
+#include <android/hardware/broadcastradio/1.0/types.h>
+#include <hardware/radio.h>
+
+namespace android {
+
+using android::hardware::hidl_vec;
+using android::hardware::broadcastradio::V1_0::Result;
+using android::hardware::broadcastradio::V1_0::Properties;
+using android::hardware::broadcastradio::V1_0::BandConfig;
+using android::hardware::broadcastradio::V1_0::ProgramInfo;
+using android::hardware::broadcastradio::V1_0::MetaData;
+
+class HidlUtils {
+public:
+ static int convertHalResult(Result result);
+ static void convertBandConfigFromHal(radio_hal_band_config_t *config,
+ const BandConfig *halConfig);
+ static void convertPropertiesFromHal(radio_hal_properties_t *properties,
+ const Properties *halProperties);
+ static void convertBandConfigToHal(BandConfig *halConfig,
+ const radio_hal_band_config_t *config);
+ static void convertProgramInfoFromHal(radio_program_info_t *info,
+ const ProgramInfo *halInfo);
+ static void convertMetaDataFromHal(radio_metadata_t **metadata,
+ const hidl_vec<MetaData>& halMetadata,
+ uint32_t channel,
+ uint32_t subChannel);
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_HAL_HIDL_UTILS_H
diff --git a/services/radio/RadioHalHidl.cpp b/services/radio/RadioHalHidl.cpp
new file mode 100644
index 0000000..f637275
--- /dev/null
+++ b/services/radio/RadioHalHidl.cpp
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RadioHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+#include <utils/misc.h>
+#include <system/RadioMetadataWrapper.h>
+#include <android/hardware/broadcastradio/1.0/IBroadcastRadioFactory.h>
+
+#include "RadioHalHidl.h"
+#include "HidlUtils.h"
+
+namespace android {
+
+using android::hardware::broadcastradio::V1_0::IBroadcastRadioFactory;
+using android::hardware::broadcastradio::V1_0::Class;
+using android::hardware::broadcastradio::V1_0::Direction;
+using android::hardware::broadcastradio::V1_0::Properties;
+
+
+/* static */
+sp<RadioInterface> RadioInterface::connectModule(radio_class_t classId)
+{
+ return new RadioHalHidl(classId);
+}
+
+int RadioHalHidl::getProperties(radio_hal_properties_t *properties)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ sp<IBroadcastRadio> module = getService();
+ if (module == 0) {
+ return -ENODEV;
+ }
+ Properties halProperties;
+ Result halResult = Result::NOT_INITIALIZED;
+ Return<void> hidlReturn =
+ module->getProperties([&](Result result, const Properties& properties) {
+ halResult = result;
+ if (result == Result::OK) {
+ halProperties = properties;
+ }
+ });
+
+ if (halResult == Result::OK) {
+ HidlUtils::convertPropertiesFromHal(properties, &halProperties);
+ }
+ return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::openTuner(const radio_hal_band_config_t *config,
+ bool audio,
+ sp<TunerCallbackInterface> callback,
+ sp<TunerInterface>& tuner)
+{
+ sp<IBroadcastRadio> module = getService();
+ if (module == 0) {
+ return -ENODEV;
+ }
+ sp<Tuner> tunerImpl = new Tuner(callback, this);
+
+ BandConfig halConfig;
+ Result halResult = Result::NOT_INITIALIZED;
+ sp<ITuner> halTuner;
+
+ HidlUtils::convertBandConfigToHal(&halConfig, config);
+ Return<void> hidlReturn =
+ module->openTuner(halConfig, audio, tunerImpl,
+ [&](Result result, const sp<ITuner>& tuner) {
+ halResult = result;
+ if (result == Result::OK) {
+ halTuner = tuner;
+ }
+ });
+
+ if (halResult == Result::OK) {
+ tunerImpl->setHalTuner(halTuner);
+ tuner = tunerImpl;
+ }
+
+ return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::closeTuner(sp<TunerInterface>& tuner)
+{
+ sp<Tuner> tunerImpl = static_cast<Tuner *>(tuner.get());
+ sp<ITuner> clearTuner;
+ tunerImpl->setHalTuner(clearTuner);
+ return 0;
+}
+
+RadioHalHidl::RadioHalHidl(radio_class_t classId)
+ : mClassId(classId)
+{
+}
+
+RadioHalHidl::~RadioHalHidl()
+{
+}
+
+sp<IBroadcastRadio> RadioHalHidl::getService()
+{
+ if (mHalModule == 0) {
+ sp<IBroadcastRadioFactory> factory = IBroadcastRadioFactory::getService();
+ if (factory != 0) {
+ factory->connectModule(static_cast<Class>(mClassId),
+ [&](Result retval, const ::android::sp<IBroadcastRadio>& result) {
+ if (retval == Result::OK) {
+ mHalModule = result;
+ }
+ });
+ }
+ }
+ ALOGV("%s OUT module %p", __FUNCTION__, mHalModule.get());
+ return mHalModule;
+}
+
+void RadioHalHidl::clearService()
+{
+ ALOGV("%s IN module %p", __FUNCTION__, mHalModule.get());
+ mHalModule.clear();
+}
+
+
+int RadioHalHidl::Tuner::setConfiguration(const radio_hal_band_config_t *config)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ BandConfig halConfig;
+ HidlUtils::convertBandConfigToHal(&halConfig, config);
+
+ Return<Result> hidlResult = mHalTuner->setConfiguration(halConfig);
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::getConfiguration(radio_hal_band_config_t *config)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ BandConfig halConfig;
+ Result halResult;
+ Return<void> hidlReturn =
+ mHalTuner->getConfiguration([&](Result result, const BandConfig& config) {
+ halResult = result;
+ if (result == Result::OK) {
+ halConfig = config;
+ }
+ });
+ if (hidlReturn.isOk() && halResult == Result::OK) {
+ HidlUtils::convertBandConfigFromHal(config, &halConfig);
+ }
+ return HidlUtils::convertHalResult(halResult);
+}
+
+int RadioHalHidl::Tuner::scan(radio_direction_t direction, bool skip_sub_channel)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ Return<Result> hidlResult =
+ mHalTuner->scan(static_cast<Direction>(direction), skip_sub_channel);
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::step(radio_direction_t direction, bool skip_sub_channel)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ Return<Result> hidlResult =
+ mHalTuner->step(static_cast<Direction>(direction), skip_sub_channel);
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::tune(unsigned int channel, unsigned int sub_channel)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ Return<Result> hidlResult =
+ mHalTuner->tune(channel, sub_channel);
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::cancel()
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ Return<Result> hidlResult = mHalTuner->cancel();
+ return HidlUtils::convertHalResult(hidlResult);
+}
+
+int RadioHalHidl::Tuner::getProgramInformation(radio_program_info_t *info)
+{
+ ALOGV("%s IN mHalTuner %p", __FUNCTION__, mHalTuner.get());
+ if (mHalTuner == 0) {
+ return -ENODEV;
+ }
+ if (info == nullptr || info->metadata == nullptr) {
+ return BAD_VALUE;
+ }
+ ProgramInfo halInfo;
+ Result halResult;
+ Return<void> hidlReturn = mHalTuner->getProgramInformation(
+ [&](Result result, const ProgramInfo& info) {
+ halResult = result;
+ if (result == Result::OK) {
+ halInfo = info;
+ }
+ });
+ if (hidlReturn.isOk() && halResult == Result::OK) {
+ HidlUtils::convertProgramInfoFromHal(info, &halInfo);
+ }
+ return HidlUtils::convertHalResult(halResult);
+}
+
+Return<void> RadioHalHidl::Tuner::hardwareFailure()
+{
+ ALOGV("%s IN", __FUNCTION__);
+ handleHwFailure();
+ return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::configChange(Result result, const BandConfig& config)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_CONFIG;
+ event.status = HidlUtils::convertHalResult(result);
+ HidlUtils::convertBandConfigFromHal(&event.config, &config);
+ onCallback(&event);
+ return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::tuneComplete(Result result, const ProgramInfo& info)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event = {};
+ RadioMetadataWrapper metadataWrapper(&event.info.metadata);
+
+ event.type = RADIO_EVENT_TUNED;
+ event.status = HidlUtils::convertHalResult(result);
+ HidlUtils::convertProgramInfoFromHal(&event.info, &info);
+ onCallback(&event);
+ return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::afSwitch(const ProgramInfo& info)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event = {};
+ RadioMetadataWrapper metadataWrapper(&event.info.metadata);
+
+ event.type = RADIO_EVENT_AF_SWITCH;
+ HidlUtils::convertProgramInfoFromHal(&event.info, &info);
+ onCallback(&event);
+ return Return<void>();
+}
+
+Return<void> RadioHalHidl::Tuner::antennaStateChange(bool connected)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_ANTENNA;
+ event.on = connected;
+ onCallback(&event);
+ return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::trafficAnnouncement(bool active)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_TA;
+ event.on = active;
+ onCallback(&event);
+ return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::emergencyAnnouncement(bool active)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_EA;
+ event.on = active;
+ onCallback(&event);
+ return Return<void>();
+}
+Return<void> RadioHalHidl::Tuner::newMetadata(uint32_t channel, uint32_t subChannel,
+ const ::android::hardware::hidl_vec<MetaData>& metadata)
+{
+ ALOGV("%s IN", __FUNCTION__);
+ radio_hal_event_t event = {};
+ RadioMetadataWrapper metadataWrapper(&event.metadata);
+
+ event.type = RADIO_EVENT_METADATA;
+ HidlUtils::convertMetaDataFromHal(&event.metadata, metadata, channel, subChannel);
+ onCallback(&event);
+ return Return<void>();
+}
+
+
+RadioHalHidl::Tuner::Tuner(sp<TunerCallbackInterface> callback, sp<RadioHalHidl> module)
+ : TunerInterface(), mHalTuner(NULL), mCallback(callback), mParentModule(module)
+{
+ // Make sure the handler we are passing in only deals with const members,
+ // as it can be called on an arbitrary thread.
+ const auto& self = this;
+ HalDeathHandler::getInstance()->registerAtExitHandler(
+ this, [&self]() { self->sendHwFailureEvent(); });
+}
+
+
+RadioHalHidl::Tuner::~Tuner()
+{
+ HalDeathHandler::getInstance()->unregisterAtExitHandler(this);
+}
+
+void RadioHalHidl::Tuner::setHalTuner(sp<ITuner>& halTuner) {
+ if (mHalTuner != 0) {
+ mHalTuner->unlinkToDeath(HalDeathHandler::getInstance());
+ }
+ mHalTuner = halTuner;
+ if (mHalTuner != 0) {
+ mHalTuner->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+ }
+}
+
+void RadioHalHidl::Tuner::handleHwFailure()
+{
+ ALOGV("%s IN", __FUNCTION__);
+ sp<RadioHalHidl> parentModule = mParentModule.promote();
+ if (parentModule != 0) {
+ parentModule->clearService();
+ }
+ sendHwFailureEvent();
+ mHalTuner.clear();
+}
+
+void RadioHalHidl::Tuner::sendHwFailureEvent() const
+{
+ radio_hal_event_t event;
+ memset(&event, 0, sizeof(radio_hal_event_t));
+ event.type = RADIO_EVENT_HW_FAILURE;
+ onCallback(&event);
+}
+
+void RadioHalHidl::Tuner::onCallback(radio_hal_event_t *halEvent) const
+{
+ if (mCallback != 0) {
+ mCallback->onEvent(halEvent);
+ }
+}
+
+} // namespace android
diff --git a/services/radio/RadioHalHidl.h b/services/radio/RadioHalHidl.h
new file mode 100644
index 0000000..f98420d
--- /dev/null
+++ b/services/radio/RadioHalHidl.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_HAL_HIDL_H
+#define ANDROID_HARDWARE_RADIO_HAL_HIDL_H
+
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include "RadioInterface.h"
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
+#include <android/hardware/broadcastradio/1.0/types.h>
+#include <android/hardware/broadcastradio/1.0/IBroadcastRadio.h>
+#include <android/hardware/broadcastradio/1.0/ITuner.h>
+#include <android/hardware/broadcastradio/1.0/ITunerCallback.h>
+
+namespace android {
+
+using android::hardware::Return;
+using android::hardware::broadcastradio::V1_0::Result;
+using android::hardware::broadcastradio::V1_0::IBroadcastRadio;
+using android::hardware::broadcastradio::V1_0::ITuner;
+using android::hardware::broadcastradio::V1_0::ITunerCallback;
+
+using android::hardware::broadcastradio::V1_0::BandConfig;
+using android::hardware::broadcastradio::V1_0::ProgramInfo;
+using android::hardware::broadcastradio::V1_0::MetaData;
+
+class RadioHalHidl : public RadioInterface
+{
+public:
+ RadioHalHidl(radio_class_t classId);
+
+ // RadioInterface
+ virtual int getProperties(radio_hal_properties_t *properties);
+ virtual int openTuner(const radio_hal_band_config_t *config,
+ bool audio,
+ sp<TunerCallbackInterface> callback,
+ sp<TunerInterface>& tuner);
+ virtual int closeTuner(sp<TunerInterface>& tuner);
+
+ class Tuner : public TunerInterface, public virtual ITunerCallback
+ {
+ public:
+ Tuner(sp<TunerCallbackInterface> callback, sp<RadioHalHidl> module);
+
+ // TunerInterface
+ virtual int setConfiguration(const radio_hal_band_config_t *config);
+ virtual int getConfiguration(radio_hal_band_config_t *config);
+ virtual int scan(radio_direction_t direction, bool skip_sub_channel);
+ virtual int step(radio_direction_t direction, bool skip_sub_channel);
+ virtual int tune(unsigned int channel, unsigned int sub_channel);
+ virtual int cancel();
+ virtual int getProgramInformation(radio_program_info_t *info);
+
+ // ITunerCallback
+ virtual Return<void> hardwareFailure();
+ virtual Return<void> configChange(Result result, const BandConfig& config);
+ virtual Return<void> tuneComplete(Result result, const ProgramInfo& info);
+ virtual Return<void> afSwitch(const ProgramInfo& info);
+ virtual Return<void> antennaStateChange(bool connected);
+ virtual Return<void> trafficAnnouncement(bool active);
+ virtual Return<void> emergencyAnnouncement(bool active);
+ virtual Return<void> newMetadata(uint32_t channel, uint32_t subChannel,
+ const ::android::hardware::hidl_vec<MetaData>& metadata);
+
+ void setHalTuner(sp<ITuner>& halTuner);
+ sp<ITuner> getHalTuner() { return mHalTuner; }
+
+ private:
+ virtual ~Tuner();
+
+ void onCallback(radio_hal_event_t *halEvent) const;
+ void handleHwFailure();
+ void sendHwFailureEvent() const;
+
+ sp<ITuner> mHalTuner;
+ const sp<TunerCallbackInterface> mCallback;
+ wp<RadioHalHidl> mParentModule;
+ };
+
+ sp<IBroadcastRadio> getService();
+ void clearService();
+
+private:
+ virtual ~RadioHalHidl();
+
+ radio_class_t mClassId;
+ sp<IBroadcastRadio> mHalModule;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_HAL_HIDL_H
diff --git a/services/radio/RadioHalLegacy.cpp b/services/radio/RadioHalLegacy.cpp
new file mode 100644
index 0000000..d50ccd4
--- /dev/null
+++ b/services/radio/RadioHalLegacy.cpp
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RadioHalLegacy"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+#include "RadioHalLegacy.h"
+
+namespace android {
+
+const char *RadioHalLegacy::sClassModuleNames[] = {
+ RADIO_HARDWARE_MODULE_ID_FM, /* corresponds to RADIO_CLASS_AM_FM */
+ RADIO_HARDWARE_MODULE_ID_SAT, /* corresponds to RADIO_CLASS_SAT */
+ RADIO_HARDWARE_MODULE_ID_DT, /* corresponds to RADIO_CLASS_DT */
+};
+
+/* static */
+sp<RadioInterface> RadioInterface::connectModule(radio_class_t classId)
+{
+ return new RadioHalLegacy(classId);
+}
+
+RadioHalLegacy::RadioHalLegacy(radio_class_t classId)
+ : RadioInterface(), mClassId(classId), mHwDevice(NULL)
+{
+}
+
+void RadioHalLegacy::onFirstRef()
+{
+ const hw_module_t *mod;
+ int rc;
+ ALOGI("%s mClassId %d", __FUNCTION__, mClassId);
+
+ mHwDevice = NULL;
+
+ if ((mClassId < 0) ||
+ (mClassId >= NELEM(sClassModuleNames))) {
+ ALOGE("invalid class ID %d", mClassId);
+ return;
+ }
+
+ ALOGI("%s RADIO_HARDWARE_MODULE_ID %s %s",
+ __FUNCTION__, RADIO_HARDWARE_MODULE_ID, sClassModuleNames[mClassId]);
+
+ rc = hw_get_module_by_class(RADIO_HARDWARE_MODULE_ID, sClassModuleNames[mClassId], &mod);
+ if (rc != 0) {
+ ALOGE("couldn't load radio module %s.%s (%s)",
+ RADIO_HARDWARE_MODULE_ID, sClassModuleNames[mClassId], strerror(-rc));
+ return;
+ }
+ rc = radio_hw_device_open(mod, &mHwDevice);
+ if (rc != 0) {
+ ALOGE("couldn't open radio hw device in %s.%s (%s)",
+ RADIO_HARDWARE_MODULE_ID, "primary", strerror(-rc));
+ mHwDevice = NULL;
+ return;
+ }
+ if (mHwDevice->common.version != RADIO_DEVICE_API_VERSION_CURRENT) {
+ ALOGE("wrong radio hw device version %04x", mHwDevice->common.version);
+ radio_hw_device_close(mHwDevice);
+ mHwDevice = NULL;
+ }
+}
+
+RadioHalLegacy::~RadioHalLegacy()
+{
+ if (mHwDevice != NULL) {
+ radio_hw_device_close(mHwDevice);
+ }
+}
+
+int RadioHalLegacy::getProperties(radio_hal_properties_t *properties)
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+
+ int rc = mHwDevice->get_properties(mHwDevice, properties);
+ if (rc != 0) {
+ ALOGE("could not read implementation properties");
+ }
+
+ return rc;
+}
+
+int RadioHalLegacy::openTuner(const radio_hal_band_config_t *config,
+ bool audio,
+ sp<TunerCallbackInterface> callback,
+ sp<TunerInterface>& tuner)
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+ sp<Tuner> tunerImpl = new Tuner(callback);
+
+ const struct radio_tuner *halTuner;
+ int rc = mHwDevice->open_tuner(mHwDevice, config, audio,
+ RadioHalLegacy::Tuner::callback, tunerImpl.get(),
+ &halTuner);
+ if (rc == 0) {
+ tunerImpl->setHalTuner(halTuner);
+ tuner = tunerImpl;
+ }
+ return rc;
+}
+
+int RadioHalLegacy::closeTuner(sp<TunerInterface>& tuner)
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+ if (tuner == 0) {
+ return -EINVAL;
+ }
+ sp<Tuner> tunerImpl = (Tuner *)tuner.get();
+ return mHwDevice->close_tuner(mHwDevice, tunerImpl->getHalTuner());
+}
+
+int RadioHalLegacy::Tuner::setConfiguration(const radio_hal_band_config_t *config)
+{
+ if (mHalTuner == NULL) {
+ return -ENODEV;
+ }
+ return mHalTuner->set_configuration(mHalTuner, config);
+}
+
+int RadioHalLegacy::Tuner::getConfiguration(radio_hal_band_config_t *config)
+{
+ if (mHalTuner == NULL) {
+ return -ENODEV;
+ }
+ return mHalTuner->get_configuration(mHalTuner, config);
+}
+
+int RadioHalLegacy::Tuner::scan(radio_direction_t direction, bool skip_sub_channel)
+{
+ if (mHalTuner == NULL) {
+ return -ENODEV;
+ }
+ return mHalTuner->scan(mHalTuner, direction, skip_sub_channel);
+}
+
+int RadioHalLegacy::Tuner::step(radio_direction_t direction, bool skip_sub_channel)
+{
+ if (mHalTuner == NULL) {
+ return -ENODEV;
+ }
+ return mHalTuner->step(mHalTuner, direction, skip_sub_channel);
+}
+
+int RadioHalLegacy::Tuner::tune(unsigned int channel, unsigned int sub_channel)
+{
+ if (mHalTuner == NULL) {
+ return -ENODEV;
+ }
+ return mHalTuner->tune(mHalTuner, channel, sub_channel);
+}
+
+int RadioHalLegacy::Tuner::cancel()
+{
+ if (mHalTuner == NULL) {
+ return -ENODEV;
+ }
+ return mHalTuner->cancel(mHalTuner);
+}
+
+int RadioHalLegacy::Tuner::getProgramInformation(radio_program_info_t *info)
+{
+ if (mHalTuner == NULL) {
+ return -ENODEV;
+ }
+ return mHalTuner->get_program_information(mHalTuner, info);
+}
+
+void RadioHalLegacy::Tuner::onCallback(radio_hal_event_t *halEvent)
+{
+ if (mCallback != 0) {
+ mCallback->onEvent(halEvent);
+ }
+}
+
+//static
+void RadioHalLegacy::Tuner::callback(radio_hal_event_t *halEvent, void *cookie)
+{
+ wp<RadioHalLegacy::Tuner> weak = wp<RadioHalLegacy::Tuner>((RadioHalLegacy::Tuner *)cookie);
+ sp<RadioHalLegacy::Tuner> tuner = weak.promote();
+ if (tuner != 0) {
+ tuner->onCallback(halEvent);
+ }
+}
+
+RadioHalLegacy::Tuner::Tuner(sp<TunerCallbackInterface> callback)
+ : TunerInterface(), mHalTuner(NULL), mCallback(callback)
+{
+}
+
+
+RadioHalLegacy::Tuner::~Tuner()
+{
+}
+
+
+} // namespace android
diff --git a/services/radio/RadioHalLegacy.h b/services/radio/RadioHalLegacy.h
new file mode 100644
index 0000000..7d4831b
--- /dev/null
+++ b/services/radio/RadioHalLegacy.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_HAL_LEGACY_H
+#define ANDROID_HARDWARE_RADIO_HAL_LEGACY_H
+
+#include <utils/RefBase.h>
+#include <hardware/radio.h>
+#include "RadioInterface.h"
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
+
+namespace android {
+
+class RadioHalLegacy : public RadioInterface
+{
+public:
+ RadioHalLegacy(radio_class_t classId);
+
+ // RadioInterface
+ virtual int getProperties(radio_hal_properties_t *properties);
+ virtual int openTuner(const radio_hal_band_config_t *config,
+ bool audio,
+ sp<TunerCallbackInterface> callback,
+ sp<TunerInterface>& tuner);
+ virtual int closeTuner(sp<TunerInterface>& tuner);
+
+ // RefBase
+ virtual void onFirstRef();
+
+ class Tuner : public TunerInterface
+ {
+ public:
+ Tuner(sp<TunerCallbackInterface> callback);
+
+ virtual int setConfiguration(const radio_hal_band_config_t *config);
+ virtual int getConfiguration(radio_hal_band_config_t *config);
+ virtual int scan(radio_direction_t direction, bool skip_sub_channel);
+ virtual int step(radio_direction_t direction, bool skip_sub_channel);
+ virtual int tune(unsigned int channel, unsigned int sub_channel);
+ virtual int cancel();
+ virtual int getProgramInformation(radio_program_info_t *info);
+
+ static void callback(radio_hal_event_t *halEvent, void *cookie);
+ void onCallback(radio_hal_event_t *halEvent);
+
+ void setHalTuner(const struct radio_tuner *halTuner) { mHalTuner = halTuner; }
+ const struct radio_tuner *getHalTuner() { return mHalTuner; }
+
+ private:
+ virtual ~Tuner();
+
+ const struct radio_tuner *mHalTuner;
+ sp<TunerCallbackInterface> mCallback;
+ };
+
+protected:
+ virtual ~RadioHalLegacy();
+
+private:
+ static const char * sClassModuleNames[];
+
+ radio_class_t mClassId;
+ struct radio_hw_device *mHwDevice;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_HAL_LEGACY_H
diff --git a/services/radio/RadioInterface.h b/services/radio/RadioInterface.h
new file mode 100644
index 0000000..fcfb4d5
--- /dev/null
+++ b/services/radio/RadioInterface.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_INTERFACE_H
+#define ANDROID_HARDWARE_RADIO_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include <system/radio.h>
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
+
+namespace android {
+
+class RadioInterface : public virtual RefBase
+{
+public:
+ /* get a sound trigger HAL instance */
+ static sp<RadioInterface> connectModule(radio_class_t classId);
+
+ /*
+ * Retrieve implementation properties.
+ *
+ * arguments:
+ * - properties: where to return the module properties
+ *
+ * returns:
+ * 0 if no error
+ * -EINVAL if invalid arguments are passed
+ */
+ virtual int getProperties(radio_hal_properties_t *properties) = 0;
+
+ /*
+ * Open a tuner interface for the requested configuration.
+ * If no other tuner is opened, this will activate the radio module.
+ *
+ * arguments:
+ * - config: the band configuration to apply
+ * - audio: this tuner will be used for live radio listening and should be connected to
+ * the radio audio source.
+ * - callback: the event callback
+ * - cookie: the cookie to pass when calling the callback
+ * - tuner: where to return the tuner interface
+ *
+ * returns:
+ * 0 if HW was powered up and configuration could be applied
+ * -EINVAL if configuration requested is invalid
+ * -ENOSYS if called out of sequence
+ *
+ * Callback function with event RADIO_EVENT_CONFIG MUST be called once the
+ * configuration is applied or a failure occurs or after a time out.
+ */
+ virtual int openTuner(const radio_hal_band_config_t *config,
+ bool audio,
+ sp<TunerCallbackInterface> callback,
+ sp<TunerInterface>& tuner) = 0;
+
+ /*
+ * Close a tuner interface.
+ * If the last tuner is closed, the radio module is deactivated.
+ *
+ * arguments:
+ * - tuner: the tuner interface to close
+ *
+ * returns:
+ * 0 if powered down successfully.
+ * -EINVAL if an invalid argument is passed
+ * -ENOSYS if called out of sequence
+ */
+ virtual int closeTuner(sp<TunerInterface>& tuner) = 0;
+
+protected:
+ RadioInterface() {}
+ virtual ~RadioInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_INTERFACE_H
diff --git a/services/radio/RadioService.cpp b/services/radio/RadioService.cpp
index 3448678..beb7c09 100644
--- a/services/radio/RadioService.cpp
+++ b/services/radio/RadioService.cpp
@@ -34,6 +34,7 @@
#include <binder/IServiceManager.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
+#include <binder/PermissionCache.h>
#include <hardware/radio.h>
#include <media/AudioSystem.h>
#include "RadioService.h"
@@ -43,6 +44,8 @@
static const char kRadioTunerAudioDeviceName[] = "Radio tuner source";
+static const String16 RADIO_PERMISSION("android.permission.ACCESS_FM_RADIO");
+
RadioService::RadioService()
: BnRadioService(), mNextUniqueId(1)
{
@@ -51,31 +54,15 @@
void RadioService::onFirstRef()
{
- const hw_module_t *mod;
- int rc;
- struct radio_hw_device *dev;
-
ALOGI("%s", __FUNCTION__);
- rc = hw_get_module_by_class(RADIO_HARDWARE_MODULE_ID, RADIO_HARDWARE_MODULE_ID_FM, &mod);
- if (rc != 0) {
- ALOGE("couldn't load radio module %s.%s (%s)",
- RADIO_HARDWARE_MODULE_ID, "primary", strerror(-rc));
- return;
- }
- rc = radio_hw_device_open(mod, &dev);
- if (rc != 0) {
- ALOGE("couldn't open radio hw device in %s.%s (%s)",
- RADIO_HARDWARE_MODULE_ID, "primary", strerror(-rc));
- return;
- }
- if (dev->common.version != RADIO_DEVICE_API_VERSION_CURRENT) {
- ALOGE("wrong radio hw device version %04x", dev->common.version);
- return;
- }
+ sp<RadioInterface> dev = RadioInterface::connectModule(RADIO_CLASS_AM_FM);
+ if (dev == 0) {
+ return;
+ }
struct radio_hal_properties halProperties;
- rc = dev->get_properties(dev, &halProperties);
+ int rc = dev->getProperties(&halProperties);
if (rc != 0) {
ALOGE("could not read implementation properties");
return;
@@ -95,21 +82,21 @@
RadioService::~RadioService()
{
- for (size_t i = 0; i < mModules.size(); i++) {
- radio_hw_device_close(mModules.valueAt(i)->hwDevice());
- }
}
status_t RadioService::listModules(struct radio_properties *properties,
uint32_t *numModules)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
ALOGV("listModules");
AutoMutex lock(mServiceLock);
if (numModules == NULL || (*numModules != 0 && properties == NULL)) {
return BAD_VALUE;
}
- size_t maxModules = *numModules;
+ uint32_t maxModules = *numModules;
*numModules = mModules.size();
for (size_t i = 0; i < mModules.size() && i < maxModules; i++) {
properties[i] = mModules.valueAt(i)->properties();
@@ -123,6 +110,9 @@
bool withAudio,
sp<IRadio>& radio)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
ALOGV("%s %d config %p withAudio %d", __FUNCTION__, handle, config, withAudio);
AutoMutex lock(mServiceLock);
@@ -193,16 +183,6 @@
}
-// static
-void RadioService::callback(radio_hal_event_t *halEvent, void *cookie)
-{
- CallbackThread *callbackThread = (CallbackThread *)cookie;
- if (callbackThread == NULL) {
- return;
- }
- callbackThread->sendEvent(halEvent);
-}
-
/* static */
void RadioService::convertProperties(radio_properties_t *properties,
const radio_hal_properties_t *halProperties)
@@ -306,32 +286,40 @@
{
sp<IMemory> eventMemory;
- size_t headerSize =
- (sizeof(struct radio_event) + sizeof(unsigned int) - 1) /sizeof(unsigned int);
- size_t metadataSize = 0;
+ // The event layout in shared memory is:
+ // sizeof(struct radio_event) bytes : the event itself
+ // 4 bytes : metadata size or 0
+ // N bytes : metadata if present
+ uint32_t metadataOffset = sizeof(struct radio_event) + sizeof(uint32_t);
+ uint32_t metadataSize = 0;
+
switch (halEvent->type) {
case RADIO_EVENT_TUNED:
case RADIO_EVENT_AF_SWITCH:
if (radio_metadata_check(halEvent->info.metadata) == 0) {
- metadataSize = radio_metadata_get_size(halEvent->info.metadata);
+ metadataSize = (uint32_t)radio_metadata_get_size(halEvent->info.metadata);
}
break;
case RADIO_EVENT_METADATA:
if (radio_metadata_check(halEvent->metadata) != 0) {
return eventMemory;
}
- metadataSize = radio_metadata_get_size(halEvent->metadata);
+ metadataSize = (uint32_t)radio_metadata_get_size(halEvent->metadata);
break;
default:
break;
}
- size_t size = headerSize + metadataSize;
- eventMemory = mMemoryDealer->allocate(size);
+
+ eventMemory = mMemoryDealer->allocate(metadataOffset + metadataSize);
if (eventMemory == 0 || eventMemory->pointer() == NULL) {
eventMemory.clear();
return eventMemory;
}
+
struct radio_event *event = (struct radio_event *)eventMemory->pointer();
+
+ *(uint32_t *)((uint8_t *)event + metadataOffset - sizeof(uint32_t)) = metadataSize;
+
event->type = halEvent->type;
event->status = halEvent->status;
@@ -343,10 +331,7 @@
case RADIO_EVENT_AF_SWITCH:
event->info = halEvent->info;
if (metadataSize != 0) {
- memcpy((char *)event + headerSize, halEvent->info.metadata, metadataSize);
- // replace meta data pointer by offset while in shared memory so that receiving side
- // can restore the pointer in destination process.
- event->info.metadata = (radio_metadata_t *)headerSize;
+ memcpy((uint8_t *)event + metadataOffset, halEvent->info.metadata, metadataSize);
}
break;
case RADIO_EVENT_TA:
@@ -356,10 +341,9 @@
event->on = halEvent->on;
break;
case RADIO_EVENT_METADATA:
- memcpy((char *)event + headerSize, halEvent->metadata, metadataSize);
- // replace meta data pointer by offset while in shared memory so that receiving side
- // can restore the pointer in destination process.
- event->metadata = (radio_metadata_t *)headerSize;
+ if (metadataSize != 0) {
+ memcpy((uint8_t *)event + metadataOffset, halEvent->metadata, metadataSize);
+ }
break;
case RADIO_EVENT_HW_FAILURE:
default:
@@ -386,12 +370,13 @@
#undef LOG_TAG
#define LOG_TAG "RadioService::Module"
-RadioService::Module::Module(radio_hw_device* hwDevice, radio_properties properties)
+RadioService::Module::Module(sp<RadioInterface> hwDevice, radio_properties properties)
: mHwDevice(hwDevice), mProperties(properties), mMute(true)
{
}
RadioService::Module::~Module() {
+ mHwDevice.clear();
mModuleClients.clear();
}
@@ -405,10 +390,15 @@
bool audio)
{
ALOGV("addClient() %p config %p product %s", this, config, mProperties.product);
+
AutoMutex lock(mLock);
sp<ModuleClient> moduleClient;
int ret;
+ if (mHwDevice == 0) {
+ return moduleClient;
+ }
+
for (size_t i = 0; i < mModuleClients.size(); i++) {
if (mModuleClients[i]->client() == client) {
// client already connected: reject
@@ -465,7 +455,7 @@
}
}
- const struct radio_tuner *halTuner;
+ sp<TunerInterface> halTuner;
sp<ModuleClient> preemtedClient;
if (audio) {
if (allocatedAudio >= mProperties.num_audio_sources) {
@@ -485,18 +475,19 @@
}
if (preemtedClient != 0) {
halTuner = preemtedClient->getTuner();
- preemtedClient->setTuner(NULL);
- mHwDevice->close_tuner(mHwDevice, halTuner);
+ sp<TunerInterface> clear;
+ preemtedClient->setTuner(clear);
+ mHwDevice->closeTuner(halTuner);
if (preemtedClient->audio()) {
notifyDeviceConnection(false, "");
}
}
- ret = mHwDevice->open_tuner(mHwDevice, &halConfig, audio,
- RadioService::callback, moduleClient->callbackThread().get(),
- &halTuner);
+ ret = mHwDevice->openTuner(&halConfig, audio,
+ moduleClient,
+ halTuner);
if (ret == 0) {
- ALOGV("addClient() setTuner %p", halTuner);
+ ALOGV("addClient() setTuner %p", halTuner.get());
moduleClient->setTuner(halTuner);
mModuleClients.add(moduleClient);
if (audio) {
@@ -528,12 +519,15 @@
}
mModuleClients.removeAt(index);
- const struct radio_tuner *halTuner = moduleClient->getTuner();
+ sp<TunerInterface> halTuner = moduleClient->getTuner();
if (halTuner == NULL) {
return;
}
- mHwDevice->close_tuner(mHwDevice, halTuner);
+ if (mHwDevice != 0) {
+ mHwDevice->closeTuner(halTuner);
+ }
+
if (moduleClient->audio()) {
notifyDeviceConnection(false, "");
}
@@ -544,6 +538,10 @@
return;
}
+ if (mHwDevice == 0) {
+ return;
+ }
+
// Tuner reallocation logic:
// When a client is removed and was controlling a tuner, this tuner will be allocated to a
// previously preempted client. This client will be notified by a callback with
@@ -592,9 +590,9 @@
ALOG_ASSERT(youngestClient != 0, "removeClient() removed client no candidate found for tuner");
struct radio_hal_band_config halConfig = youngestClient->halConfig();
- ret = mHwDevice->open_tuner(mHwDevice, &halConfig, youngestClient->audio(),
- RadioService::callback, moduleClient->callbackThread().get(),
- &halTuner);
+ ret = mHwDevice->openTuner(&halConfig, youngestClient->audio(),
+ moduleClient,
+ halTuner);
if (ret == 0) {
youngestClient->setTuner(halTuner);
@@ -647,7 +645,7 @@
const sp<IRadioClient>& client,
const struct radio_band_config *config,
bool audio)
- : mModule(module), mClient(client), mConfig(*config), mAudio(audio), mTuner(NULL)
+ : mModule(module), mClient(client), mConfig(*config), mAudio(audio), mTuner(0)
{
}
@@ -667,6 +665,11 @@
}
}
+void RadioService::ModuleClient::onEvent(radio_hal_event_t *halEvent)
+{
+ mCallbackThread->sendEvent(halEvent);
+}
+
status_t RadioService::ModuleClient::dump(int fd __unused,
const Vector<String16>& args __unused) {
String8 result;
@@ -697,14 +700,14 @@
return mConfig.band;
}
-const struct radio_tuner *RadioService::ModuleClient::getTuner() const
+sp<TunerInterface>& RadioService::ModuleClient::getTuner()
{
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
return mTuner;
}
-void RadioService::ModuleClient::setTuner(const struct radio_tuner *tuner)
+void RadioService::ModuleClient::setTuner(sp<TunerInterface>& tuner)
{
ALOGV("%s %p", __FUNCTION__, this);
@@ -715,7 +718,7 @@
radio_hal_event_t event;
event.type = RADIO_EVENT_CONTROL;
event.status = 0;
- event.on = mTuner != NULL;
+ event.on = mTuner != 0;
mCallbackThread->sendEvent(&event);
ALOGV("%s DONE", __FUNCTION__);
@@ -723,14 +726,17 @@
status_t RadioService::ModuleClient::setConfiguration(const struct radio_band_config *config)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
status_t status = NO_ERROR;
ALOGV("%s locked", __FUNCTION__);
- if (mTuner != NULL) {
+ if (mTuner != 0) {
struct radio_hal_band_config halConfig;
halConfig = config->band;
- status = (status_t)mTuner->set_configuration(mTuner, &halConfig);
+ status = (status_t)mTuner->setConfiguration(&halConfig);
if (status == NO_ERROR) {
mConfig = *config;
}
@@ -744,13 +750,16 @@
status_t RadioService::ModuleClient::getConfiguration(struct radio_band_config *config)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
status_t status = NO_ERROR;
ALOGV("%s locked", __FUNCTION__);
- if (mTuner != NULL) {
+ if (mTuner != 0) {
struct radio_hal_band_config halConfig;
- status = (status_t)mTuner->get_configuration(mTuner, &halConfig);
+ status = (status_t)mTuner->getConfiguration(&halConfig);
if (status == NO_ERROR) {
mConfig.band = halConfig;
}
@@ -762,11 +771,14 @@
status_t RadioService::ModuleClient::setMute(bool mute)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
sp<Module> module;
{
Mutex::Autolock _l(mLock);
ALOGV("%s locked", __FUNCTION__);
- if (mTuner == NULL || !mAudio) {
+ if (mTuner == 0 || !mAudio) {
return INVALID_OPERATION;
}
module = mModule.promote();
@@ -780,6 +792,9 @@
status_t RadioService::ModuleClient::getMute(bool *mute)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
sp<Module> module;
{
Mutex::Autolock _l(mLock);
@@ -794,11 +809,14 @@
status_t RadioService::ModuleClient::scan(radio_direction_t direction, bool skipSubChannel)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
- if (mTuner != NULL) {
- status = (status_t)mTuner->scan(mTuner, direction, skipSubChannel);
+ if (mTuner != 0) {
+ status = (status_t)mTuner->scan(direction, skipSubChannel);
} else {
status = INVALID_OPERATION;
}
@@ -807,24 +825,30 @@
status_t RadioService::ModuleClient::step(radio_direction_t direction, bool skipSubChannel)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
- if (mTuner != NULL) {
- status = (status_t)mTuner->step(mTuner, direction, skipSubChannel);
+ if (mTuner != 0) {
+ status = (status_t)mTuner->step(direction, skipSubChannel);
} else {
status = INVALID_OPERATION;
}
return status;
}
-status_t RadioService::ModuleClient::tune(unsigned int channel, unsigned int subChannel)
+status_t RadioService::ModuleClient::tune(uint32_t channel, uint32_t subChannel)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
- if (mTuner != NULL) {
- status = (status_t)mTuner->tune(mTuner, channel, subChannel);
+ if (mTuner != 0) {
+ status = (status_t)mTuner->tune(channel, subChannel);
} else {
status = INVALID_OPERATION;
}
@@ -833,11 +857,14 @@
status_t RadioService::ModuleClient::cancel()
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
- if (mTuner != NULL) {
- status = (status_t)mTuner->cancel(mTuner);
+ if (mTuner != 0) {
+ status = (status_t)mTuner->cancel();
} else {
status = INVALID_OPERATION;
}
@@ -846,22 +873,29 @@
status_t RadioService::ModuleClient::getProgramInformation(struct radio_program_info *info)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
AutoMutex lock(mLock);
ALOGV("%s locked", __FUNCTION__);
status_t status;
if (mTuner != NULL) {
- status = (status_t)mTuner->get_program_information(mTuner, info);
+ status = (status_t)mTuner->getProgramInformation(info);
} else {
status = INVALID_OPERATION;
}
+
return status;
}
status_t RadioService::ModuleClient::hasControl(bool *hasControl)
{
+ if (!PermissionCache::checkCallingPermission(RADIO_PERMISSION)) {
+ return PERMISSION_DENIED;
+ }
Mutex::Autolock lock(mLock);
ALOGV("%s locked", __FUNCTION__);
- *hasControl = mTuner != NULL;
+ *hasControl = mTuner != 0;
return NO_ERROR;
}
diff --git a/services/radio/RadioService.h b/services/radio/RadioService.h
index ac3481e..444eb7a 100644
--- a/services/radio/RadioService.h
+++ b/services/radio/RadioService.h
@@ -27,6 +27,9 @@
#include <radio/IRadioClient.h>
#include <system/radio.h>
#include <hardware/radio.h>
+#include "RadioInterface.h"
+#include "TunerInterface.h"
+#include "TunerCallbackInterface.h"
namespace android {
@@ -66,7 +69,7 @@
class Module : public virtual RefBase {
public:
- Module(radio_hw_device* hwDevice,
+ Module(sp<RadioInterface> hwDevice,
struct radio_properties properties);
virtual ~Module();
@@ -83,7 +86,7 @@
virtual status_t dump(int fd, const Vector<String16>& args);
- const struct radio_hw_device *hwDevice() const { return mHwDevice; }
+ sp<RadioInterface> hwDevice() const { return mHwDevice; }
const struct radio_properties properties() const { return mProperties; }
const struct radio_band_config *getDefaultConfig() const ;
@@ -92,7 +95,7 @@
void notifyDeviceConnection(bool connected, const char *address);
Mutex mLock; // protects mModuleClients
- const struct radio_hw_device *mHwDevice; // HAL hardware device
+ sp<RadioInterface> mHwDevice; // HAL hardware device
const struct radio_properties mProperties; // cached hardware module properties
Vector< sp<ModuleClient> > mModuleClients; // list of attached clients
bool mMute; // radio audio source state
@@ -128,7 +131,8 @@
}; // class CallbackThread
class ModuleClient : public BnRadio,
- public IBinder::DeathRecipient {
+ public IBinder::DeathRecipient,
+ public TunerCallbackInterface {
public:
ModuleClient(const sp<Module>& module,
@@ -167,8 +171,8 @@
wp<Module> module() const { return mModule; }
radio_hal_band_config_t halConfig() const;
sp<CallbackThread> callbackThread() const { return mCallbackThread; }
- void setTuner(const struct radio_tuner *tuner);
- const struct radio_tuner *getTuner() const;
+ void setTuner(sp<TunerInterface>& tuner);
+ sp<TunerInterface>& getTuner();
bool audio() const { return mAudio; }
void onCallbackEvent(const sp<IMemory>& event);
@@ -179,6 +183,9 @@
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder> &who);
+ // TunerCallbackInterface
+ virtual void onEvent(radio_hal_event_t *event);
+
private:
mutable Mutex mLock; // protects mClient, mConfig and mTuner
@@ -187,7 +194,7 @@
radio_band_config_t mConfig; // current band configuration
sp<CallbackThread> mCallbackThread; // event callback thread
const bool mAudio;
- const struct radio_tuner *mTuner; // HAL tuner interface. NULL indicates that
+ sp<TunerInterface> mTuner; // HAL tuner interface. NULL indicates that
// this client does not have control on any
// tuner
}; // class ModuleClient
diff --git a/services/radio/TunerCallbackInterface.h b/services/radio/TunerCallbackInterface.h
new file mode 100644
index 0000000..4973cce
--- /dev/null
+++ b/services/radio/TunerCallbackInterface.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_TUNER_CALLBACK_INTERFACE_H
+#define ANDROID_HARDWARE_TUNER_CALLBACK_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include <system/radio.h>
+
+namespace android {
+
+class TunerCallbackInterface : public virtual RefBase
+{
+public:
+ virtual void onEvent(radio_hal_event_t *event) = 0;
+
+protected:
+ TunerCallbackInterface() {}
+ virtual ~TunerCallbackInterface() {}
+
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_TUNER_CALLBACK_INTERFACE_H
diff --git a/services/radio/TunerInterface.h b/services/radio/TunerInterface.h
new file mode 100644
index 0000000..4e657d3
--- /dev/null
+++ b/services/radio/TunerInterface.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_TUNER_INTERFACE_H
+#define ANDROID_HARDWARE_TUNER_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include <system/radio.h>
+
+namespace android {
+
+class TunerInterface : public virtual RefBase
+{
+public:
+ /*
+ * Apply current radio band configuration (band, range, channel spacing ...).
+ *
+ * arguments:
+ * - config: the band configuration to apply
+ *
+ * returns:
+ * 0 if configuration could be applied
+ * -EINVAL if configuration requested is invalid
+ *
+ * Automatically cancels pending scan, step or tune.
+ *
+ * Callback function with event RADIO_EVENT_CONFIG MUST be called once the
+ * configuration is applied or a failure occurs or after a time out.
+ */
+ virtual int setConfiguration(const radio_hal_band_config_t *config) = 0;
+
+ /*
+ * Retrieve current radio band configuration.
+ *
+ * arguments:
+ * - config: where to return the band configuration
+ *
+ * returns:
+ * 0 if valid configuration is returned
+ * -EINVAL if invalid arguments are passed
+ */
+ virtual int getConfiguration(radio_hal_band_config_t *config) = 0;
+
+ /*
+ * Start scanning up to next valid station.
+ * Must be called when a valid configuration has been applied.
+ *
+ * arguments:
+ * - direction: RADIO_DIRECTION_UP or RADIO_DIRECTION_DOWN
+ * - skip_sub_channel: valid for HD radio or digital radios only: ignore sub channels
+ * (e.g SPS for HD radio).
+ *
+ * returns:
+ * 0 if scan successfully started
+ * -ENOSYS if called out of sequence
+ * -ENODEV if another error occurs
+ *
+ * Automatically cancels pending scan, step or tune.
+ *
+ * Callback function with event RADIO_EVENT_TUNED MUST be called once
+ * locked on a station or after a time out or full frequency scan if
+ * no station found. The event status should indicate if a valid station
+ * is tuned or not.
+ */
+ virtual int scan(radio_direction_t direction, bool skip_sub_channel) = 0;
+
+ /*
+ * Move one channel spacing up or down.
+ * Must be called when a valid configuration has been applied.
+ *
+ * arguments:
+ * - direction: RADIO_DIRECTION_UP or RADIO_DIRECTION_DOWN
+ * - skip_sub_channel: valid for HD radio or digital radios only: ignore sub channels
+ * (e.g SPS for HD radio).
+ *
+ * returns:
+ * 0 if step successfully started
+ * -ENOSYS if called out of sequence
+ * -ENODEV if another error occurs
+ *
+ * Automatically cancels pending scan, step or tune.
+ *
+ * Callback function with event RADIO_EVENT_TUNED MUST be called once
+ * step completed or after a time out. The event status should indicate
+ * if a valid station is tuned or not.
+ */
+ virtual int step(radio_direction_t direction, bool skip_sub_channel) = 0;
+
+ /*
+ * Tune to specified frequency.
+ * Must be called when a valid configuration has been applied.
+ *
+ * arguments:
+ * - channel: channel to tune to. A frequency in kHz for AM/FM/HD Radio bands.
+ * - sub_channel: valid for HD radio or digital radios only: (e.g SPS number for HD radio).
+ *
+ * returns:
+ * 0 if tune successfully started
+ * -ENOSYS if called out of sequence
+ * -EINVAL if invalid arguments are passed
+ * -ENODEV if another error occurs
+ *
+ * Automatically cancels pending scan, step or tune.
+ *
+ * Callback function with event RADIO_EVENT_TUNED MUST be called once
+ * tuned or after a time out. The event status should indicate
+ * if a valid station is tuned or not.
+ */
+ virtual int tune(unsigned int channel, unsigned int sub_channel) = 0;
+
+ /*
+ * Cancel a scan, step or tune operation.
+ * Must be called while a scan, step or tune operation is pending
+ * (callback not yet sent).
+ *
+ * returns:
+ * 0 if successful
+ * -ENOSYS if called out of sequence
+ * -ENODEV if another error occurs
+ *
+ * The callback is not sent.
+ */
+ virtual int cancel() = 0;
+
+ /*
+ * Retrieve current station information.
+ *
+ * arguments:
+ * - info: where to return the program info.
+ * If info->metadata is NULL. no meta data should be returned.
+ * If meta data must be returned, they should be added to or cloned to
+ * info->metadata, not passed from a newly created meta data buffer.
+ *
+ * returns:
+ * 0 if tuned and information available
+ * -EINVAL if invalid arguments are passed
+ * -ENODEV if another error occurs
+ */
+ virtual int getProgramInformation(radio_program_info_t *info) = 0;
+
+protected:
+ TunerInterface() {}
+ virtual ~TunerInterface() {}
+
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_TUNER_INTERFACE_H
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index 8408c66..10ee141 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -16,8 +16,10 @@
include $(CLEAR_VARS)
-
ifeq ($(SOUND_TRIGGER_USE_STUB_MODULE), 1)
+ ifneq ($(USE_LEGACY_LOCAL_AUDIO_HAL), true)
+ $(error Requires building with USE_LEGACY_LOCAL_AUDIO_HAL=true)
+ endif
LOCAL_CFLAGS += -DSOUND_TRIGGER_USE_STUB_MODULE
endif
@@ -34,12 +36,33 @@
libaudioclient \
libserviceutility
+
+ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL),true)
+# libhardware configuration
+LOCAL_SRC_FILES += \
+ SoundTriggerHalLegacy.cpp
+else
+# Treble configuration
+LOCAL_SRC_FILES += \
+ SoundTriggerHalHidl.cpp
+
+LOCAL_SHARED_LIBRARIES += \
+ libhwbinder \
+ libhidlbase \
+ libhidltransport \
+ libbase \
+ libaudiohal \
+ android.hardware.soundtrigger@2.0 \
+ android.hardware.audio.common@2.0
+endif
+
+
LOCAL_C_INCLUDES += \
frameworks/av/services/audioflinger
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-LOCAL_CFLAGS := -Wall -Werror
+LOCAL_CFLAGS += -Wall -Werror
LOCAL_MODULE:= libsoundtriggerservice
diff --git a/services/soundtrigger/SoundTriggerHalHidl.cpp b/services/soundtrigger/SoundTriggerHalHidl.cpp
new file mode 100644
index 0000000..0cd5cf7
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalHidl.cpp
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "SoundTriggerHalHidl"
+//#define LOG_NDEBUG 0
+
+#include <media/audiohal/hidl/HalDeathHandler.h>
+#include <utils/Log.h>
+#include "SoundTriggerHalHidl.h"
+#include <hwbinder/IPCThreadState.h>
+#include <hwbinder/ProcessState.h>
+
+namespace android {
+
+using android::hardware::Return;
+using android::hardware::ProcessState;
+using android::hardware::audio::common::V2_0::AudioDevice;
+
+/* static */
+sp<SoundTriggerHalInterface> SoundTriggerHalInterface::connectModule(const char *moduleName)
+{
+ return new SoundTriggerHalHidl(moduleName);
+}
+
+int SoundTriggerHalHidl::getProperties(struct sound_trigger_properties *properties)
+{
+ sp<ISoundTriggerHw> soundtrigger = getService();
+ if (soundtrigger == 0) {
+ return -ENODEV;
+ }
+
+ ISoundTriggerHw::Properties halProperties;
+ Return<void> hidlReturn;
+ int ret;
+ {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger->getProperties([&](int rc, auto res) {
+ ret = rc;
+ halProperties = res;
+ ALOGI("getProperties res implementor %s", res.implementor.c_str());
+ });
+ }
+
+ if (hidlReturn.isOk()) {
+ if (ret == 0) {
+ convertPropertiesFromHal(properties, &halProperties);
+ }
+ } else {
+ ALOGE("getProperties error %s", hidlReturn.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ ALOGI("getProperties ret %d", ret);
+ return ret;
+}
+
+int SoundTriggerHalHidl::loadSoundModel(struct sound_trigger_sound_model *sound_model,
+ sound_model_callback_t callback,
+ void *cookie,
+ sound_model_handle_t *handle)
+{
+ if (handle == NULL) {
+ return -EINVAL;
+ }
+
+ sp<ISoundTriggerHw> soundtrigger = getService();
+ if (soundtrigger == 0) {
+ return -ENODEV;
+ }
+
+ uint32_t modelId;
+ {
+ AutoMutex lock(mLock);
+ do {
+ modelId = nextUniqueId();
+ ALOGI("loadSoundModel modelId %u", modelId);
+ sp<SoundModel> model = mSoundModels.valueFor(modelId);
+ ALOGI("loadSoundModel model %p", model.get());
+ } while (mSoundModels.valueFor(modelId) != 0 && modelId != 0);
+ }
+ LOG_ALWAYS_FATAL_IF(modelId == 0,
+ "loadSoundModel(): wrap around in sound model IDs, num loaded models %zd",
+ mSoundModels.size());
+
+ ISoundTriggerHw::SoundModel *halSoundModel =
+ convertSoundModelToHal(sound_model);
+ if (halSoundModel == NULL) {
+ return -EINVAL;
+ }
+
+ Return<void> hidlReturn;
+ int ret;
+ SoundModelHandle halHandle;
+ {
+ AutoMutex lock(mHalLock);
+ if (sound_model->type == SOUND_MODEL_TYPE_KEYPHRASE) {
+ hidlReturn = soundtrigger->loadPhraseSoundModel(
+ *(const ISoundTriggerHw::PhraseSoundModel *)halSoundModel,
+ this, modelId, [&](int32_t retval, auto res) {
+ ret = retval;
+ halHandle = res;
+ });
+
+ } else {
+ hidlReturn = soundtrigger->loadSoundModel(*halSoundModel,
+ this, modelId, [&](int32_t retval, auto res) {
+ ret = retval;
+ halHandle = res;
+ });
+ }
+ }
+
+ delete halSoundModel;
+
+ if (hidlReturn.isOk()) {
+ if (ret == 0) {
+ AutoMutex lock(mLock);
+ *handle = (sound_model_handle_t)modelId;
+ sp<SoundModel> model = new SoundModel(*handle, callback, cookie, halHandle);
+ mSoundModels.add(*handle, model);
+ }
+ } else {
+ ALOGE("loadSoundModel error %s", hidlReturn.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+
+ return ret;
+}
+
+int SoundTriggerHalHidl::unloadSoundModel(sound_model_handle_t handle)
+{
+ sp<ISoundTriggerHw> soundtrigger = getService();
+ if (soundtrigger == 0) {
+ return -ENODEV;
+ }
+
+ sp<SoundModel> model = removeModel(handle);
+ if (model == 0) {
+ ALOGE("unloadSoundModel model not found for handle %u", handle);
+ return -EINVAL;
+ }
+
+ Return<int32_t> hidlReturn(0);
+ {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger->unloadSoundModel(model->mHalHandle);
+ }
+
+ if (!hidlReturn.isOk()) {
+ ALOGE("unloadSoundModel error %s", hidlReturn.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+
+ return hidlReturn;
+}
+
+int SoundTriggerHalHidl::startRecognition(sound_model_handle_t handle,
+ const struct sound_trigger_recognition_config *config,
+ recognition_callback_t callback,
+ void *cookie)
+{
+ sp<ISoundTriggerHw> soundtrigger = getService();
+ if (soundtrigger == 0) {
+ return -ENODEV;
+ }
+
+ sp<SoundModel> model = getModel(handle);
+ if (model == 0) {
+ ALOGE("startRecognition model not found for handle %u", handle);
+ return -EINVAL;
+ }
+
+ model->mRecognitionCallback = callback;
+ model->mRecognitionCookie = cookie;
+
+ ISoundTriggerHw::RecognitionConfig *halConfig =
+ convertRecognitionConfigToHal(config);
+
+ Return<int32_t> hidlReturn(0);
+ {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger->startRecognition(model->mHalHandle, *halConfig, this, handle);
+ }
+
+ delete halConfig;
+
+ if (!hidlReturn.isOk()) {
+ ALOGE("startRecognition error %s", hidlReturn.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ return hidlReturn;
+}
+
+int SoundTriggerHalHidl::stopRecognition(sound_model_handle_t handle)
+{
+ sp<ISoundTriggerHw> soundtrigger = getService();
+ if (soundtrigger == 0) {
+ return -ENODEV;
+ }
+
+ sp<SoundModel> model = getModel(handle);
+ if (model == 0) {
+ ALOGE("stopRecognition model not found for handle %u", handle);
+ return -EINVAL;
+ }
+
+ Return<int32_t> hidlReturn(0);
+ {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger->stopRecognition(model->mHalHandle);
+ }
+
+ if (!hidlReturn.isOk()) {
+ ALOGE("stopRecognition error %s", hidlReturn.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ return hidlReturn;
+}
+
+int SoundTriggerHalHidl::stopAllRecognitions()
+{
+ sp<ISoundTriggerHw> soundtrigger = getService();
+ if (soundtrigger == 0) {
+ return -ENODEV;
+ }
+
+ Return<int32_t> hidlReturn(0);
+ {
+ AutoMutex lock(mHalLock);
+ hidlReturn = soundtrigger->stopAllRecognitions();
+ }
+
+ if (!hidlReturn.isOk()) {
+ ALOGE("stopAllRecognitions error %s", hidlReturn.description().c_str());
+ return FAILED_TRANSACTION;
+ }
+ return hidlReturn;
+}
+
+SoundTriggerHalHidl::SoundTriggerHalHidl(const char *moduleName)
+ : mModuleName(moduleName), mNextUniqueId(1)
+{
+ LOG_ALWAYS_FATAL_IF(strcmp(mModuleName, "primary") != 0,
+ "Treble soundtrigger only supports primary module");
+}
+
+SoundTriggerHalHidl::~SoundTriggerHalHidl()
+{
+}
+
+sp<ISoundTriggerHw> SoundTriggerHalHidl::getService()
+{
+ AutoMutex lock(mLock);
+ if (mISoundTrigger == 0) {
+ if (mModuleName == NULL) {
+ mModuleName = "primary";
+ }
+ mISoundTrigger = ISoundTriggerHw::getService();
+ if (mISoundTrigger != 0) {
+ mISoundTrigger->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
+ }
+ }
+ return mISoundTrigger;
+}
+
+sp<SoundTriggerHalHidl::SoundModel> SoundTriggerHalHidl::getModel(sound_model_handle_t handle)
+{
+ AutoMutex lock(mLock);
+ return mSoundModels.valueFor(handle);
+}
+
+sp<SoundTriggerHalHidl::SoundModel> SoundTriggerHalHidl::removeModel(sound_model_handle_t handle)
+{
+ AutoMutex lock(mLock);
+ sp<SoundModel> model = mSoundModels.valueFor(handle);
+ mSoundModels.removeItem(handle);
+ return model;
+}
+
+uint32_t SoundTriggerHalHidl::nextUniqueId()
+{
+ return (uint32_t) atomic_fetch_add_explicit(&mNextUniqueId,
+ (uint_fast32_t) 1, memory_order_acq_rel);
+}
+
+void SoundTriggerHalHidl::convertUuidToHal(Uuid *halUuid,
+ const sound_trigger_uuid_t *uuid)
+{
+ halUuid->timeLow = uuid->timeLow;
+ halUuid->timeMid = uuid->timeMid;
+ halUuid->versionAndTimeHigh = uuid->timeHiAndVersion;
+ halUuid->variantAndClockSeqHigh = uuid->clockSeq;
+ memcpy(halUuid->node.data(), &uuid->node[0], sizeof(uuid->node));
+}
+
+void SoundTriggerHalHidl::convertUuidFromHal(sound_trigger_uuid_t *uuid,
+ const Uuid *halUuid)
+{
+ uuid->timeLow = halUuid->timeLow;
+ uuid->timeMid = halUuid->timeMid;
+ uuid->timeHiAndVersion = halUuid->versionAndTimeHigh;
+ uuid->clockSeq = halUuid->variantAndClockSeqHigh;
+ memcpy(&uuid->node[0], halUuid->node.data(), sizeof(uuid->node));
+}
+
+void SoundTriggerHalHidl::convertPropertiesFromHal(
+ struct sound_trigger_properties *properties,
+ const ISoundTriggerHw::Properties *halProperties)
+{
+ strlcpy(properties->implementor,
+ halProperties->implementor.c_str(), SOUND_TRIGGER_MAX_STRING_LEN);
+ strlcpy(properties->description,
+ halProperties->description.c_str(), SOUND_TRIGGER_MAX_STRING_LEN);
+ properties->version = halProperties->version;
+ convertUuidFromHal(&properties->uuid, &halProperties->uuid);
+ properties->max_sound_models = halProperties->maxSoundModels;
+ properties->max_key_phrases = halProperties->maxKeyPhrases;
+ properties->max_users = halProperties->maxUsers;
+ properties->recognition_modes = halProperties->recognitionModes;
+ properties->capture_transition = (bool)halProperties->captureTransition;
+ properties->max_buffer_ms = halProperties->maxBufferMs;
+ properties->concurrent_capture = (bool)halProperties->concurrentCapture;
+ properties->trigger_in_event = (bool)halProperties->triggerInEvent;
+ properties->power_consumption_mw = halProperties->powerConsumptionMw;
+}
+
+void SoundTriggerHalHidl::convertTriggerPhraseToHal(
+ ISoundTriggerHw::Phrase *halTriggerPhrase,
+ const struct sound_trigger_phrase *triggerPhrase)
+{
+ halTriggerPhrase->id = triggerPhrase->id;
+ halTriggerPhrase->recognitionModes = triggerPhrase->recognition_mode;
+ halTriggerPhrase->users.setToExternal((uint32_t *)&triggerPhrase->users[0], triggerPhrase->num_users);
+ halTriggerPhrase->locale = triggerPhrase->locale;
+ halTriggerPhrase->text = triggerPhrase->text;
+}
+
+ISoundTriggerHw::SoundModel *SoundTriggerHalHidl::convertSoundModelToHal(
+ const struct sound_trigger_sound_model *soundModel)
+{
+ ISoundTriggerHw::SoundModel *halModel = NULL;
+ if (soundModel->type == SOUND_MODEL_TYPE_KEYPHRASE) {
+ ISoundTriggerHw::PhraseSoundModel *halKeyPhraseModel =
+ new ISoundTriggerHw::PhraseSoundModel();
+ struct sound_trigger_phrase_sound_model *keyPhraseModel =
+ (struct sound_trigger_phrase_sound_model *)soundModel;
+ ISoundTriggerHw::Phrase *halPhrases =
+ new ISoundTriggerHw::Phrase[keyPhraseModel->num_phrases];
+
+
+ for (unsigned int i = 0; i < keyPhraseModel->num_phrases; i++) {
+ convertTriggerPhraseToHal(&halPhrases[i],
+ &keyPhraseModel->phrases[i]);
+ }
+ halKeyPhraseModel->phrases.setToExternal(halPhrases, keyPhraseModel->num_phrases);
+ // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+ halKeyPhraseModel->phrases.resize(keyPhraseModel->num_phrases);
+
+ delete[] halPhrases;
+
+ halModel = (ISoundTriggerHw::SoundModel *)halKeyPhraseModel;
+ } else {
+ halModel = new ISoundTriggerHw::SoundModel();
+ }
+ halModel->type = (SoundModelType)soundModel->type;
+ convertUuidToHal(&halModel->uuid, &soundModel->uuid);
+ convertUuidToHal(&halModel->vendorUuid, &soundModel->vendor_uuid);
+ halModel->data.setToExternal((uint8_t *)soundModel + soundModel->data_offset, soundModel->data_size);
+ halModel->data.resize(soundModel->data_size);
+
+ return halModel;
+}
+
+void SoundTriggerHalHidl::convertPhraseRecognitionExtraToHal(
+ PhraseRecognitionExtra *halExtra,
+ const struct sound_trigger_phrase_recognition_extra *extra)
+{
+ halExtra->id = extra->id;
+ halExtra->recognitionModes = extra->recognition_modes;
+ halExtra->confidenceLevel = extra->confidence_level;
+ ConfidenceLevel *halLevels =
+ new ConfidenceLevel[extra->num_levels];
+ for (unsigned int i = 0; i < extra->num_levels; i++) {
+ halLevels[i].userId = extra->levels[i].user_id;
+ halLevels[i].levelPercent = extra->levels[i].level;
+ }
+ halExtra->levels.setToExternal(halLevels, extra->num_levels);
+ // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+ halExtra->levels.resize(extra->num_levels);
+
+ delete[] halLevels;
+}
+
+
+ISoundTriggerHw::RecognitionConfig *SoundTriggerHalHidl::convertRecognitionConfigToHal(
+ const struct sound_trigger_recognition_config *config)
+{
+ ISoundTriggerHw::RecognitionConfig *halConfig =
+ new ISoundTriggerHw::RecognitionConfig();
+
+ halConfig->captureHandle = config->capture_handle;
+ halConfig->captureDevice = (AudioDevice)config->capture_device;
+ halConfig->captureRequested = (uint32_t)config->capture_requested;
+
+ PhraseRecognitionExtra *halExtras =
+ new PhraseRecognitionExtra[config->num_phrases];
+
+ for (unsigned int i = 0; i < config->num_phrases; i++) {
+ convertPhraseRecognitionExtraToHal(&halExtras[i],
+ &config->phrases[i]);
+ }
+ halConfig->phrases.setToExternal(halExtras, config->num_phrases);
+ // FIXME: transfer buffer ownership. should have a method for that in hidl_vec
+ halConfig->phrases.resize(config->num_phrases);
+
+ delete[] halExtras;
+
+ halConfig->data.setToExternal((uint8_t *)config + config->data_offset, config->data_size);
+
+ return halConfig;
+}
+
+
+// ISoundTriggerHwCallback
+::android::hardware::Return<void> SoundTriggerHalHidl::recognitionCallback(
+ const ISoundTriggerHwCallback::RecognitionEvent& halEvent,
+ CallbackCookie cookie)
+{
+ sp<SoundModel> model;
+ {
+ AutoMutex lock(mLock);
+ model = mSoundModels.valueFor((SoundModelHandle)cookie);
+ if (model == 0) {
+ return Return<void>();
+ }
+ }
+ struct sound_trigger_recognition_event *event = convertRecognitionEventFromHal(&halEvent);
+ if (event == NULL) {
+ return Return<void>();
+ }
+ event->model = model->mHandle;
+ model->mRecognitionCallback(event, model->mRecognitionCookie);
+
+ free(event);
+
+ return Return<void>();
+}
+
+::android::hardware::Return<void> SoundTriggerHalHidl::phraseRecognitionCallback(
+ const ISoundTriggerHwCallback::PhraseRecognitionEvent& halEvent,
+ CallbackCookie cookie)
+{
+ sp<SoundModel> model;
+ {
+ AutoMutex lock(mLock);
+ model = mSoundModels.valueFor((SoundModelHandle)cookie);
+ if (model == 0) {
+ return Return<void>();
+ }
+ }
+
+ struct sound_trigger_recognition_event *event = convertRecognitionEventFromHal(
+ (const ISoundTriggerHwCallback::RecognitionEvent *)&halEvent);
+ if (event == NULL) {
+ return Return<void>();
+ }
+
+ event->model = model->mHandle;
+ model->mRecognitionCallback(event, model->mRecognitionCookie);
+
+ free(event);
+
+ return Return<void>();
+}
+
+::android::hardware::Return<void> SoundTriggerHalHidl::soundModelCallback(
+ const ISoundTriggerHwCallback::ModelEvent& halEvent,
+ CallbackCookie cookie)
+{
+ sp<SoundModel> model;
+ {
+ AutoMutex lock(mLock);
+ model = mSoundModels.valueFor((SoundModelHandle)cookie);
+ if (model == 0) {
+ return Return<void>();
+ }
+ }
+
+ struct sound_trigger_model_event *event = convertSoundModelEventFromHal(&halEvent);
+ if (event == NULL) {
+ return Return<void>();
+ }
+
+ event->model = model->mHandle;
+ model->mSoundModelCallback(event, model->mSoundModelCookie);
+
+ free(event);
+
+ return Return<void>();
+}
+
+
+struct sound_trigger_model_event *SoundTriggerHalHidl::convertSoundModelEventFromHal(
+ const ISoundTriggerHwCallback::ModelEvent *halEvent)
+{
+ struct sound_trigger_model_event *event = (struct sound_trigger_model_event *)malloc(
+ sizeof(struct sound_trigger_model_event) +
+ halEvent->data.size());
+ if (event == NULL) {
+ return NULL;
+ }
+
+ event->status = (int)halEvent->status;
+ // event->model to be set by caller
+ event->data_offset = sizeof(struct sound_trigger_model_event);
+ event->data_size = halEvent->data.size();
+ uint8_t *dst = (uint8_t *)event + event->data_offset;
+ uint8_t *src = (uint8_t *)&halEvent->data[0];
+ memcpy(dst, src, halEvent->data.size());
+
+ return event;
+}
+
+void SoundTriggerHalHidl::convertPhraseRecognitionExtraFromHal(
+ struct sound_trigger_phrase_recognition_extra *extra,
+ const PhraseRecognitionExtra *halExtra)
+{
+ extra->id = halExtra->id;
+ extra->recognition_modes = halExtra->recognitionModes;
+ extra->confidence_level = halExtra->confidenceLevel;
+
+ size_t i;
+ for (i = 0; i < halExtra->levels.size() && i < SOUND_TRIGGER_MAX_USERS; i++) {
+ extra->levels[i].user_id = halExtra->levels[i].userId;
+ extra->levels[i].level = halExtra->levels[i].levelPercent;
+ }
+ extra->num_levels = (unsigned int)i;
+}
+
+
+struct sound_trigger_recognition_event *SoundTriggerHalHidl::convertRecognitionEventFromHal(
+ const ISoundTriggerHwCallback::RecognitionEvent *halEvent)
+{
+ struct sound_trigger_recognition_event *event;
+
+ if (halEvent->type == SoundModelType::KEYPHRASE) {
+ struct sound_trigger_phrase_recognition_event *phraseEvent =
+ (struct sound_trigger_phrase_recognition_event *)malloc(
+ sizeof(struct sound_trigger_phrase_recognition_event) +
+ halEvent->data.size());
+ if (phraseEvent == NULL) {
+ return NULL;
+ }
+ const ISoundTriggerHwCallback::PhraseRecognitionEvent *halPhraseEvent =
+ (const ISoundTriggerHwCallback::PhraseRecognitionEvent *)halEvent;
+
+ for (unsigned int i = 0; i < halPhraseEvent->phraseExtras.size(); i++) {
+ convertPhraseRecognitionExtraFromHal(&phraseEvent->phrase_extras[i],
+ &halPhraseEvent->phraseExtras[i]);
+ }
+ phraseEvent->num_phrases = halPhraseEvent->phraseExtras.size();
+ event = (struct sound_trigger_recognition_event *)phraseEvent;
+ event->data_offset = sizeof(sound_trigger_phrase_recognition_event);
+ } else {
+ event = (struct sound_trigger_recognition_event *)malloc(
+ sizeof(struct sound_trigger_recognition_event) + halEvent->data.size());
+ if (event == NULL) {
+ return NULL;
+ }
+ event->data_offset = sizeof(sound_trigger_recognition_event);
+ }
+ event->status = (int)halEvent->status;
+ event->type = (sound_trigger_sound_model_type_t)halEvent->type;
+ // event->model to be set by caller
+ event->capture_available = (bool)halEvent->captureAvailable;
+ event->capture_session = halEvent->captureSession;
+ event->capture_delay_ms = halEvent->captureDelayMs;
+ event->capture_preamble_ms = halEvent->capturePreambleMs;
+ event->trigger_in_data = (bool)halEvent->triggerInData;
+ event->audio_config.sample_rate = halEvent->audioConfig.sampleRateHz;
+ event->audio_config.channel_mask = (audio_channel_mask_t)halEvent->audioConfig.channelMask;
+ event->audio_config.format = (audio_format_t)halEvent->audioConfig.format;
+
+ event->data_size = halEvent->data.size();
+ uint8_t *dst = (uint8_t *)event + event->data_offset;
+ uint8_t *src = (uint8_t *)&halEvent->data[0];
+ memcpy(dst, src, halEvent->data.size());
+
+ return event;
+}
+
+} // namespace android
diff --git a/services/soundtrigger/SoundTriggerHalHidl.h b/services/soundtrigger/SoundTriggerHalHidl.h
new file mode 100644
index 0000000..0c68cf1
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalHidl.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_SOUNDTRIGGER_HAL_HIDL_H
+#define ANDROID_HARDWARE_SOUNDTRIGGER_HAL_HIDL_H
+
+#include <stdatomic.h>
+#include <utils/RefBase.h>
+#include <utils/KeyedVector.h>
+#include <utils/Vector.h>
+#include <utils/threads.h>
+#include "SoundTriggerHalInterface.h"
+#include <android/hardware/soundtrigger/2.0/types.h>
+#include <android/hardware/soundtrigger/2.0/ISoundTriggerHw.h>
+#include <android/hardware/soundtrigger/2.0/ISoundTriggerHwCallback.h>
+
+namespace android {
+
+using android::hardware::audio::common::V2_0::Uuid;
+using android::hardware::soundtrigger::V2_0::ConfidenceLevel;
+using android::hardware::soundtrigger::V2_0::PhraseRecognitionExtra;
+using android::hardware::soundtrigger::V2_0::SoundModelType;
+using android::hardware::soundtrigger::V2_0::SoundModelHandle;
+using android::hardware::soundtrigger::V2_0::ISoundTriggerHw;
+using android::hardware::soundtrigger::V2_0::ISoundTriggerHwCallback;
+
+class SoundTriggerHalHidl : public SoundTriggerHalInterface,
+ public virtual ISoundTriggerHwCallback
+
+{
+public:
+ virtual int getProperties(struct sound_trigger_properties *properties);
+
+ /*
+ * Load a sound model. Once loaded, recognition of this model can be started and stopped.
+ * Only one active recognition per model at a time. The SoundTrigger service will handle
+ * concurrent recognition requests by different users/applications on the same model.
+ * The implementation returns a unique handle used by other functions (unload_sound_model(),
+ * start_recognition(), etc...
+ */
+ virtual int loadSoundModel(struct sound_trigger_sound_model *sound_model,
+ sound_model_callback_t callback,
+ void *cookie,
+ sound_model_handle_t *handle);
+
+ /*
+ * Unload a sound model. A sound model can be unloaded to make room for a new one to overcome
+ * implementation limitations.
+ */
+ virtual int unloadSoundModel(sound_model_handle_t handle);
+
+ /* Start recognition on a given model. Only one recognition active at a time per model.
+ * Once recognition succeeds of fails, the callback is called.
+ * TODO: group recognition configuration parameters into one struct and add key phrase options.
+ */
+ virtual int startRecognition(sound_model_handle_t handle,
+ const struct sound_trigger_recognition_config *config,
+ recognition_callback_t callback,
+ void *cookie);
+
+ /* Stop recognition on a given model.
+ * The implementation does not have to call the callback when stopped via this method.
+ */
+ virtual int stopRecognition(sound_model_handle_t handle);
+
+ /* Stop recognition on all models.
+ * Only supported for device api versions SOUND_TRIGGER_DEVICE_API_VERSION_1_1 or above.
+ * If no implementation is provided, stop_recognition will be called for each running model.
+ */
+ virtual int stopAllRecognitions();
+
+ // ISoundTriggerHwCallback
+ virtual ::android::hardware::Return<void> recognitionCallback(
+ const ISoundTriggerHwCallback::RecognitionEvent& event, CallbackCookie cookie);
+ virtual ::android::hardware::Return<void> phraseRecognitionCallback(
+ const ISoundTriggerHwCallback::PhraseRecognitionEvent& event, int32_t cookie);
+ virtual ::android::hardware::Return<void> soundModelCallback(
+ const ISoundTriggerHwCallback::ModelEvent& event, CallbackCookie cookie);
+private:
+ class SoundModel : public RefBase {
+ public:
+ SoundModel(sound_model_handle_t handle, sound_model_callback_t callback,
+ void *cookie, android::hardware::soundtrigger::V2_0::SoundModelHandle halHandle)
+ : mHandle(handle), mHalHandle(halHandle),
+ mSoundModelCallback(callback), mSoundModelCookie(cookie),
+ mRecognitionCallback(NULL), mRecognitionCookie(NULL) {}
+ ~SoundModel() {}
+
+ sound_model_handle_t mHandle;
+ android::hardware::soundtrigger::V2_0::SoundModelHandle mHalHandle;
+ sound_model_callback_t mSoundModelCallback;
+ void * mSoundModelCookie;
+ recognition_callback_t mRecognitionCallback;
+ void * mRecognitionCookie;
+ };
+
+ friend class SoundTriggerHalInterface;
+
+ explicit SoundTriggerHalHidl(const char *moduleName = NULL);
+ virtual ~SoundTriggerHalHidl();
+
+ void convertUuidToHal(Uuid *halUuid,
+ const sound_trigger_uuid_t *uuid);
+ void convertUuidFromHal(sound_trigger_uuid_t *uuid,
+ const Uuid *halUuid);
+
+ void convertPropertiesFromHal(
+ struct sound_trigger_properties *properties,
+ const ISoundTriggerHw::Properties *halProperties);
+
+ void convertTriggerPhraseToHal(
+ ISoundTriggerHw::Phrase *halTriggerPhrase,
+ const struct sound_trigger_phrase *triggerPhrase);
+ ISoundTriggerHw::SoundModel *convertSoundModelToHal(
+ const struct sound_trigger_sound_model *soundModel);
+
+ void convertPhraseRecognitionExtraToHal(
+ PhraseRecognitionExtra *halExtra,
+ const struct sound_trigger_phrase_recognition_extra *extra);
+ ISoundTriggerHw::RecognitionConfig *convertRecognitionConfigToHal(
+ const struct sound_trigger_recognition_config *config);
+
+ struct sound_trigger_model_event *convertSoundModelEventFromHal(
+ const ISoundTriggerHwCallback::ModelEvent *halEvent);
+ void convertPhraseRecognitionExtraFromHal(
+ struct sound_trigger_phrase_recognition_extra *extra,
+ const PhraseRecognitionExtra *halExtra);
+ struct sound_trigger_recognition_event *convertRecognitionEventFromHal(
+ const ISoundTriggerHwCallback::RecognitionEvent *halEvent);
+
+ uint32_t nextUniqueId();
+ sp<ISoundTriggerHw> getService();
+ sp<SoundModel> getModel(sound_model_handle_t handle);
+ sp<SoundModel> removeModel(sound_model_handle_t handle);
+
+ static pthread_once_t sOnceControl;
+ static void sOnceInit();
+
+ Mutex mLock;
+ Mutex mHalLock;
+ const char *mModuleName;
+ volatile atomic_uint_fast32_t mNextUniqueId;
+ // Effect chains without a valid thread
+ DefaultKeyedVector< sound_model_handle_t , sp<SoundModel> > mSoundModels;
+ sp<::android::hardware::soundtrigger::V2_0::ISoundTriggerHw> mISoundTrigger;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_SOUNDTRIGGER_HAL_HIDL_H
diff --git a/services/soundtrigger/SoundTriggerHalInterface.h b/services/soundtrigger/SoundTriggerHalInterface.h
new file mode 100644
index 0000000..c083195
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalInterface.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_SOUNDTRIGGER_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_SOUNDTRIGGER_HAL_INTERFACE_H
+
+#include <utils/RefBase.h>
+#include <system/sound_trigger.h>
+#include <hardware/sound_trigger.h>
+
+namespace android {
+
+class SoundTriggerHalInterface : public virtual RefBase
+{
+public:
+ /* get a sound trigger HAL instance */
+ static sp<SoundTriggerHalInterface> connectModule(const char *moduleName);
+
+ virtual ~SoundTriggerHalInterface() {}
+
+ virtual int getProperties(struct sound_trigger_properties *properties) = 0;
+
+ /*
+ * Load a sound model. Once loaded, recognition of this model can be started and stopped.
+ * Only one active recognition per model at a time. The SoundTrigger service will handle
+ * concurrent recognition requests by different users/applications on the same model.
+ * The implementation returns a unique handle used by other functions (unload_sound_model(),
+ * start_recognition(), etc...
+ */
+ virtual int loadSoundModel(struct sound_trigger_sound_model *sound_model,
+ sound_model_callback_t callback,
+ void *cookie,
+ sound_model_handle_t *handle) = 0;
+
+ /*
+ * Unload a sound model. A sound model can be unloaded to make room for a new one to overcome
+ * implementation limitations.
+ */
+ virtual int unloadSoundModel(sound_model_handle_t handle) = 0;
+
+ /* Start recognition on a given model. Only one recognition active at a time per model.
+ * Once recognition succeeds of fails, the callback is called.
+ * TODO: group recognition configuration parameters into one struct and add key phrase options.
+ */
+ virtual int startRecognition(sound_model_handle_t handle,
+ const struct sound_trigger_recognition_config *config,
+ recognition_callback_t callback,
+ void *cookie) = 0;
+
+ /* Stop recognition on a given model.
+ * The implementation does not have to call the callback when stopped via this method.
+ */
+ virtual int stopRecognition(sound_model_handle_t handle) = 0;
+
+ /* Stop recognition on all models.
+ * Only supported for device api versions SOUND_TRIGGER_DEVICE_API_VERSION_1_1 or above.
+ * If no implementation is provided, stop_recognition will be called for each running model.
+ */
+ virtual int stopAllRecognitions() = 0;
+
+protected:
+ SoundTriggerHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_SOUNDTRIGGER_HAL_INTERFACE_H
diff --git a/services/soundtrigger/SoundTriggerHalLegacy.cpp b/services/soundtrigger/SoundTriggerHalLegacy.cpp
new file mode 100644
index 0000000..2b78818
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalLegacy.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <utils/Log.h>
+#include "SoundTriggerHalLegacy.h"
+
+namespace android {
+
+/* static */
+sp<SoundTriggerHalInterface> SoundTriggerHalInterface::connectModule(const char *moduleName)
+{
+ return new SoundTriggerHalLegacy(moduleName);
+}
+
+SoundTriggerHalLegacy::SoundTriggerHalLegacy(const char *moduleName)
+ : mModuleName(moduleName), mHwDevice(NULL)
+{
+}
+
+void SoundTriggerHalLegacy::onFirstRef()
+{
+ const hw_module_t *mod;
+ int rc;
+
+ if (mModuleName == NULL) {
+ mModuleName = "primary";
+ }
+
+ rc = hw_get_module_by_class(SOUND_TRIGGER_HARDWARE_MODULE_ID, mModuleName, &mod);
+ if (rc != 0) {
+ ALOGE("couldn't load sound trigger module %s.%s (%s)",
+ SOUND_TRIGGER_HARDWARE_MODULE_ID, mModuleName, strerror(-rc));
+ return;
+ }
+ rc = sound_trigger_hw_device_open(mod, &mHwDevice);
+ if (rc != 0) {
+ ALOGE("couldn't open sound trigger hw device in %s.%s (%s)",
+ SOUND_TRIGGER_HARDWARE_MODULE_ID, mModuleName, strerror(-rc));
+ mHwDevice = NULL;
+ return;
+ }
+ if (mHwDevice->common.version < SOUND_TRIGGER_DEVICE_API_VERSION_1_0 ||
+ mHwDevice->common.version > SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT) {
+ ALOGE("wrong sound trigger hw device version %04x", mHwDevice->common.version);
+ return;
+ }
+}
+
+SoundTriggerHalLegacy::~SoundTriggerHalLegacy()
+{
+ if (mHwDevice != NULL) {
+ sound_trigger_hw_device_close(mHwDevice);
+ }
+}
+
+int SoundTriggerHalLegacy::getProperties(struct sound_trigger_properties *properties)
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+ return mHwDevice->get_properties(mHwDevice, properties);
+}
+
+int SoundTriggerHalLegacy::loadSoundModel(struct sound_trigger_sound_model *sound_model,
+ sound_model_callback_t callback,
+ void *cookie,
+ sound_model_handle_t *handle)
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+ return mHwDevice->load_sound_model(mHwDevice, sound_model, callback, cookie, handle);
+}
+
+int SoundTriggerHalLegacy::unloadSoundModel(sound_model_handle_t handle)
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+ return mHwDevice->unload_sound_model(mHwDevice, handle);
+}
+
+int SoundTriggerHalLegacy::startRecognition(sound_model_handle_t handle,
+ const struct sound_trigger_recognition_config *config,
+ recognition_callback_t callback,
+ void *cookie)
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+ return mHwDevice->start_recognition(mHwDevice, handle, config, callback, cookie);
+}
+
+int SoundTriggerHalLegacy::stopRecognition(sound_model_handle_t handle)
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+ return mHwDevice->stop_recognition(mHwDevice, handle);
+}
+
+int SoundTriggerHalLegacy::stopAllRecognitions()
+{
+ if (mHwDevice == NULL) {
+ return -ENODEV;
+ }
+ if (mHwDevice->common.version >= SOUND_TRIGGER_DEVICE_API_VERSION_1_1 &&
+ mHwDevice->stop_all_recognitions) {
+ return mHwDevice->stop_all_recognitions(mHwDevice);
+ }
+ return -ENOSYS;
+}
+
+} // namespace android
diff --git a/services/soundtrigger/SoundTriggerHalLegacy.h b/services/soundtrigger/SoundTriggerHalLegacy.h
new file mode 100644
index 0000000..52488de
--- /dev/null
+++ b/services/soundtrigger/SoundTriggerHalLegacy.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_SOUNDTRIGGER_HAL_LEGACY_H
+#define ANDROID_HARDWARE_SOUNDTRIGGER_HAL_LEGACY_H
+
+#include "SoundTriggerHalInterface.h"
+
+namespace android {
+
+class SoundTriggerHalLegacy : public SoundTriggerHalInterface
+
+{
+public:
+ virtual ~SoundTriggerHalLegacy();
+
+ virtual int getProperties(struct sound_trigger_properties *properties);
+
+ /*
+ * Load a sound model. Once loaded, recognition of this model can be started and stopped.
+ * Only one active recognition per model at a time. The SoundTrigger service will handle
+ * concurrent recognition requests by different users/applications on the same model.
+ * The implementation returns a unique handle used by other functions (unload_sound_model(),
+ * start_recognition(), etc...
+ */
+ virtual int loadSoundModel(struct sound_trigger_sound_model *sound_model,
+ sound_model_callback_t callback,
+ void *cookie,
+ sound_model_handle_t *handle);
+
+ /*
+ * Unload a sound model. A sound model can be unloaded to make room for a new one to overcome
+ * implementation limitations.
+ */
+ virtual int unloadSoundModel(sound_model_handle_t handle);
+
+ /* Start recognition on a given model. Only one recognition active at a time per model.
+ * Once recognition succeeds of fails, the callback is called.
+ * TODO: group recognition configuration parameters into one struct and add key phrase options.
+ */
+ virtual int startRecognition(sound_model_handle_t handle,
+ const struct sound_trigger_recognition_config *config,
+ recognition_callback_t callback,
+ void *cookie);
+
+ /* Stop recognition on a given model.
+ * The implementation does not have to call the callback when stopped via this method.
+ */
+ virtual int stopRecognition(sound_model_handle_t handle);
+
+ /* Stop recognition on all models.
+ * Only supported for device api versions SOUND_TRIGGER_DEVICE_API_VERSION_1_1 or above.
+ * If no implementation is provided, stop_recognition will be called for each running model.
+ */
+ int stopAllRecognitions();
+
+ // RefBase
+ virtual void onFirstRef();
+
+private:
+
+ friend class SoundTriggerHalInterface;
+
+ explicit SoundTriggerHalLegacy(const char *moduleName = NULL);
+
+ const char *mModuleName;
+ struct sound_trigger_hw_device* mHwDevice;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_SOUNDTRIGGER_HAL_LEGACY_H
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 6a52b9c..5b8d990 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -32,17 +32,16 @@
#include <binder/IServiceManager.h>
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
-#include <hardware/sound_trigger.h>
+#include <system/sound_trigger.h>
#include <ServiceUtilities.h>
#include "SoundTriggerHwService.h"
-namespace android {
-
#ifdef SOUND_TRIGGER_USE_STUB_MODULE
#define HW_MODULE_PREFIX "stub"
#else
#define HW_MODULE_PREFIX "primary"
#endif
+namespace android {
SoundTriggerHwService::SoundTriggerHwService()
: BnSoundTriggerHwService(),
@@ -54,30 +53,17 @@
void SoundTriggerHwService::onFirstRef()
{
- const hw_module_t *mod;
int rc;
- sound_trigger_hw_device *dev;
- rc = hw_get_module_by_class(SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, &mod);
- if (rc != 0) {
- ALOGE("couldn't load sound trigger module %s.%s (%s)",
- SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, strerror(-rc));
- return;
- }
- rc = sound_trigger_hw_device_open(mod, &dev);
- if (rc != 0) {
- ALOGE("couldn't open sound trigger hw device in %s.%s (%s)",
- SOUND_TRIGGER_HARDWARE_MODULE_ID, HW_MODULE_PREFIX, strerror(-rc));
- return;
- }
- if (dev->common.version < SOUND_TRIGGER_DEVICE_API_VERSION_1_0 ||
- dev->common.version > SOUND_TRIGGER_DEVICE_API_VERSION_CURRENT) {
- ALOGE("wrong sound trigger hw device version %04x", dev->common.version);
- return;
- }
+ sp<SoundTriggerHalInterface> halInterface =
+ SoundTriggerHalInterface::connectModule(HW_MODULE_PREFIX);
+ if (halInterface == 0) {
+ ALOGW("could not connect to HAL");
+ return;
+ }
sound_trigger_module_descriptor descriptor;
- rc = dev->get_properties(dev, &descriptor.properties);
+ rc = halInterface->getProperties(&descriptor.properties);
if (rc != 0) {
ALOGE("could not read implementation properties");
return;
@@ -87,8 +73,7 @@
ALOGI("loaded default module %s, handle %d", descriptor.properties.description,
descriptor.handle);
- sp<ISoundTriggerClient> client;
- sp<Module> module = new Module(this, dev, descriptor, client);
+ sp<Module> module = new Module(this, halInterface, descriptor);
mModules.add(descriptor.handle, module);
mCallbackThread = new CallbackThread(this);
}
@@ -98,9 +83,6 @@
if (mCallbackThread != 0) {
mCallbackThread->exit();
}
- for (size_t i = 0; i < mModules.size(); i++) {
- sound_trigger_hw_device_close(mModules.valueAt(i)->hwDevice());
- }
}
status_t SoundTriggerHwService::listModules(struct sound_trigger_module_descriptor *modules,
@@ -143,11 +125,13 @@
}
sp<Module> module = mModules.valueAt(index);
- module->setClient(client);
- IInterface::asBinder(client)->linkToDeath(module);
- moduleInterface = module;
+ sp<ModuleClient> moduleClient = module->addClient(client);
+ if (moduleClient == 0) {
+ return NO_INIT;
+ }
- module->setCaptureState_l(mCaptureState);
+ moduleClient->setCaptureState_l(mCaptureState);
+ moduleInterface = moduleClient;
return NO_ERROR;
}
@@ -164,14 +148,6 @@
}
-void SoundTriggerHwService::detachModule(const sp<Module>& module)
-{
- ALOGV("detachModule");
- AutoMutex lock(mServiceLock);
- module->clearClient();
-}
-
-
static const int kDumpLockRetries = 50;
static const int kDumpLockSleep = 60000;
@@ -293,8 +269,10 @@
return;
}
- sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
- eventMemory, strongModule));
+ sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
+ eventMemory);
+ callbackEvent->setModule(strongModule);
+ sendCallbackEvent_l(callbackEvent);
}
// static
@@ -346,8 +324,10 @@
if (strongModule == 0) {
return;
}
- sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_SOUNDMODEL,
- eventMemory, strongModule));
+ sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SOUNDMODEL,
+ eventMemory);
+ callbackEvent->setModule(strongModule);
+ sendCallbackEvent_l(callbackEvent);
}
@@ -383,8 +363,23 @@
if (strongModule == 0) {
return;
}
- sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
- eventMemory, strongModule));
+ sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
+ eventMemory);
+ callbackEvent->setModule(strongModule);
+ sendCallbackEvent_l(callbackEvent);
+}
+
+void SoundTriggerHwService::sendServiceStateEvent_l(sound_trigger_service_state_t state,
+ ModuleClient *moduleClient)
+{
+ sp<IMemory> eventMemory = prepareServiceStateEvent_l(state);
+ if (eventMemory == 0) {
+ return;
+ }
+ sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
+ eventMemory);
+ callbackEvent->setModuleClient(moduleClient);
+ sendCallbackEvent_l(callbackEvent);
}
// call with mServiceLock held
@@ -397,14 +392,25 @@
{
ALOGV("onCallbackEvent");
sp<Module> module;
+ sp<ModuleClient> moduleClient;
{
AutoMutex lock(mServiceLock);
+ //CallbackEvent is either for Module or ModuleClient
module = event->mModule.promote();
if (module == 0) {
- return;
+ moduleClient = event->mModuleClient.promote();
+ if (moduleClient == 0) {
+ return;
+ }
}
}
- module->onCallbackEvent(event);
+ if (module != 0) {
+ ALOGV("onCallbackEvent for module");
+ module->onCallbackEvent(event);
+ } else if (moduleClient != 0) {
+ ALOGV("onCallbackEvent for moduleClient");
+ moduleClient->onCallbackEvent(event);
+ }
{
AutoMutex lock(mServiceLock);
// clear now to execute with mServiceLock locked
@@ -474,9 +480,8 @@
mCallbackCond.signal();
}
-SoundTriggerHwService::CallbackEvent::CallbackEvent(event_type type, sp<IMemory> memory,
- wp<Module> module)
- : mType(type), mMemory(memory), mModule(module)
+SoundTriggerHwService::CallbackEvent::CallbackEvent(event_type type, sp<IMemory> memory)
+ : mType(type), mMemory(memory)
{
}
@@ -489,52 +494,82 @@
#define LOG_TAG "SoundTriggerHwService::Module"
SoundTriggerHwService::Module::Module(const sp<SoundTriggerHwService>& service,
- sound_trigger_hw_device* hwDevice,
- sound_trigger_module_descriptor descriptor,
- const sp<ISoundTriggerClient>& client)
- : mService(service), mHwDevice(hwDevice), mDescriptor(descriptor),
- mClient(client), mServiceState(SOUND_TRIGGER_STATE_NO_INIT)
+ const sp<SoundTriggerHalInterface>& halInterface,
+ sound_trigger_module_descriptor descriptor)
+ : mService(service), mHalInterface(halInterface), mDescriptor(descriptor),
+ mServiceState(SOUND_TRIGGER_STATE_NO_INIT)
{
}
SoundTriggerHwService::Module::~Module() {
+ mModuleClients.clear();
}
-void SoundTriggerHwService::Module::detach() {
- ALOGV("detach()");
- if (!captureHotwordAllowed()) {
- return;
- }
- {
- AutoMutex lock(mLock);
- for (size_t i = 0; i < mModels.size(); i++) {
- sp<Model> model = mModels.valueAt(i);
- ALOGV("detach() unloading model %d", model->mHandle);
- if (model->mState == Model::STATE_ACTIVE) {
- mHwDevice->stop_recognition(mHwDevice, model->mHandle);
- }
- mHwDevice->unload_sound_model(mHwDevice, model->mHandle);
+sp<SoundTriggerHwService::ModuleClient>
+SoundTriggerHwService::Module::addClient(const sp<ISoundTriggerClient>& client)
+{
+ AutoMutex lock(mLock);
+ sp<ModuleClient> moduleClient;
+
+ for (size_t i = 0; i < mModuleClients.size(); i++) {
+ if (mModuleClients[i]->client() == client) {
+ // Client already present, reuse client
+ return moduleClient;
}
- mModels.clear();
}
- if (mClient != 0) {
- IInterface::asBinder(mClient)->unlinkToDeath(this);
+ moduleClient = new ModuleClient(this, client);
+
+ ALOGV("addClient() client %p", moduleClient.get());
+ mModuleClients.add(moduleClient);
+
+ return moduleClient;
+}
+
+void SoundTriggerHwService::Module::detach(const sp<ModuleClient>& moduleClient)
+{
+ ALOGV("Module::detach()");
+ AutoMutex lock(mLock);
+ ssize_t index = -1;
+
+ for (size_t i = 0; i < mModuleClients.size(); i++) {
+ if (mModuleClients[i] == moduleClient) {
+ index = i;
+ break;
+ }
}
- sp<SoundTriggerHwService> service = mService.promote();
- if (service == 0) {
+ if (index == -1) {
return;
}
- service->detachModule(this);
+
+ ALOGV("remove client %p", moduleClient.get());
+ mModuleClients.removeAt(index);
+
+ // Iterate in reverse order as models are removed from list inside the loop.
+ for (size_t i = mModels.size(); i > 0; i--) {
+ sp<Model> model = mModels.valueAt(i - 1);
+ if (moduleClient == model->mModuleClient) {
+ mModels.removeItemsAt(i - 1);
+ ALOGV("detach() unloading model %d", model->mHandle);
+ if (mHalInterface != 0) {
+ if (model->mState == Model::STATE_ACTIVE) {
+ mHalInterface->stopRecognition(model->mHandle);
+ }
+ mHalInterface->unloadSoundModel(model->mHandle);
+ }
+ AudioSystem::releaseSoundTriggerSession(model->mCaptureSession);
+ mHalInterface->unloadSoundModel(model->mHandle);
+ }
+ }
}
status_t SoundTriggerHwService::Module::loadSoundModel(const sp<IMemory>& modelMemory,
- sound_model_handle_t *handle)
+ sp<ModuleClient> moduleClient,
+ sound_model_handle_t *handle)
{
ALOGV("loadSoundModel() handle");
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
+ if (mHalInterface == 0) {
+ return NO_INIT;
}
-
if (modelMemory == 0 || modelMemory->pointer() == NULL) {
ALOGE("loadSoundModel() modelMemory is 0 or has NULL pointer()");
return BAD_VALUE;
@@ -566,7 +601,7 @@
return INVALID_OPERATION;
}
- status_t status = mHwDevice->load_sound_model(mHwDevice, sound_model,
+ status_t status = mHalInterface->loadSoundModel(sound_model,
SoundTriggerHwService::soundModelCallback,
this, handle);
@@ -582,7 +617,8 @@
return status;
}
- sp<Model> model = new Model(*handle, session, ioHandle, device, sound_model->type);
+ sp<Model> model = new Model(*handle, session, ioHandle, device, sound_model->type,
+ moduleClient);
mModels.replaceValueFor(*handle, model);
return status;
@@ -591,16 +627,15 @@
status_t SoundTriggerHwService::Module::unloadSoundModel(sound_model_handle_t handle)
{
ALOGV("unloadSoundModel() model handle %d", handle);
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
- }
-
AutoMutex lock(mLock);
return unloadSoundModel_l(handle);
}
status_t SoundTriggerHwService::Module::unloadSoundModel_l(sound_model_handle_t handle)
{
+ if (mHalInterface == 0) {
+ return NO_INIT;
+ }
ssize_t index = mModels.indexOfKey(handle);
if (index < 0) {
return BAD_VALUE;
@@ -608,21 +643,20 @@
sp<Model> model = mModels.valueAt(index);
mModels.removeItem(handle);
if (model->mState == Model::STATE_ACTIVE) {
- mHwDevice->stop_recognition(mHwDevice, model->mHandle);
+ mHalInterface->stopRecognition(model->mHandle);
model->mState = Model::STATE_IDLE;
}
AudioSystem::releaseSoundTriggerSession(model->mCaptureSession);
- return mHwDevice->unload_sound_model(mHwDevice, handle);
+ return mHalInterface->unloadSoundModel(handle);
}
status_t SoundTriggerHwService::Module::startRecognition(sound_model_handle_t handle,
const sp<IMemory>& dataMemory)
{
ALOGV("startRecognition() model handle %d", handle);
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
+ if (mHalInterface == 0) {
+ return NO_INIT;
}
-
if (dataMemory == 0 || dataMemory->pointer() == NULL) {
ALOGE("startRecognition() dataMemory is 0 or has NULL pointer()");
return BAD_VALUE;
@@ -657,7 +691,7 @@
//TODO: get capture handle and device from audio policy service
config->capture_handle = model->mCaptureIOHandle;
config->capture_device = model->mCaptureDevice;
- status_t status = mHwDevice->start_recognition(mHwDevice, handle, config,
+ status_t status = mHalInterface->startRecognition(handle, config,
SoundTriggerHwService::recognitionCallback,
this);
@@ -672,10 +706,9 @@
status_t SoundTriggerHwService::Module::stopRecognition(sound_model_handle_t handle)
{
ALOGV("stopRecognition() model handle %d", handle);
- if (!captureHotwordAllowed()) {
- return PERMISSION_DENIED;
+ if (mHalInterface == 0) {
+ return NO_INIT;
}
-
AutoMutex lock(mLock);
sp<Model> model = getModel(handle);
if (model == 0) {
@@ -685,12 +718,11 @@
if (model->mState != Model::STATE_ACTIVE) {
return INVALID_OPERATION;
}
- mHwDevice->stop_recognition(mHwDevice, handle);
+ mHalInterface->stopRecognition(handle);
model->mState = Model::STATE_IDLE;
return NO_ERROR;
}
-
void SoundTriggerHwService::Module::onCallbackEvent(const sp<CallbackEvent>& event)
{
ALOGV("onCallbackEvent type %d", event->mType);
@@ -700,8 +732,8 @@
if (eventMemory == 0 || eventMemory->pointer() == NULL) {
return;
}
- if (mClient == 0) {
- ALOGI("%s mClient == 0", __func__);
+ if (mModuleClients.isEmpty()) {
+ ALOGI("%s no clients", __func__);
return;
}
@@ -724,7 +756,7 @@
recognitionEvent->capture_session = model->mCaptureSession;
model->mState = Model::STATE_IDLE;
- client = mClient;
+ client = model->mModuleClient->client();
}
if (client != 0) {
client->onRecognitionEvent(eventMemory);
@@ -741,20 +773,24 @@
ALOGW("%s model == 0", __func__);
return;
}
- client = mClient;
+ client = model->mModuleClient->client();
}
if (client != 0) {
client->onSoundModelEvent(eventMemory);
}
} break;
case CallbackEvent::TYPE_SERVICE_STATE: {
- sp<ISoundTriggerClient> client;
+ Vector< sp<ISoundTriggerClient> > clients;
{
AutoMutex lock(mLock);
- client = mClient;
+ for (size_t i = 0; i < mModuleClients.size(); i++) {
+ if (mModuleClients[i] != 0) {
+ clients.add(mModuleClients[i]->client());
+ }
+ }
}
- if (client != 0) {
- client->onServiceStateChange(eventMemory);
+ for (size_t i = 0; i < clients.size(); i++) {
+ clients[i]->onServiceStateChange(eventMemory);
}
} break;
default:
@@ -773,12 +809,6 @@
return model;
}
-void SoundTriggerHwService::Module::binderDied(
- const wp<IBinder> &who __unused) {
- ALOGW("client binder died for module %d", mDescriptor.handle);
- detach();
-}
-
// Called with mServiceLock held
void SoundTriggerHwService::Module::setCaptureState_l(bool active)
{
@@ -808,18 +838,13 @@
}
const bool supports_stop_all =
- (mHwDevice->common.version >= SOUND_TRIGGER_DEVICE_API_VERSION_1_1 &&
- mHwDevice->stop_all_recognitions);
-
- if (supports_stop_all) {
- mHwDevice->stop_all_recognitions(mHwDevice);
- }
+ (mHalInterface != 0) && (mHalInterface->stopAllRecognitions() == ENOSYS);
for (size_t i = 0; i < mModels.size(); i++) {
sp<Model> model = mModels.valueAt(i);
if (model->mState == Model::STATE_ACTIVE) {
- if (!supports_stop_all) {
- mHwDevice->stop_recognition(mHwDevice, model->mHandle);
+ if (mHalInterface != 0 && !supports_stop_all) {
+ mHalInterface->stopRecognition(model->mHandle);
}
// keep model in ACTIVE state so that event is processed by onCallbackEvent()
if (model->mType == SOUND_MODEL_TYPE_KEYPHRASE) {
@@ -867,8 +892,10 @@
}
for (size_t i = 0; i < events.size(); i++) {
- service->sendCallbackEvent_l(new CallbackEvent(CallbackEvent::TYPE_RECOGNITION, events[i],
- this));
+ sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
+ events[i]);
+ callbackEvent->setModule(this);
+ service->sendCallbackEvent_l(callbackEvent);
}
exit:
@@ -878,17 +905,173 @@
SoundTriggerHwService::Model::Model(sound_model_handle_t handle, audio_session_t session,
audio_io_handle_t ioHandle, audio_devices_t device,
- sound_trigger_sound_model_type_t type) :
+ sound_trigger_sound_model_type_t type,
+ sp<ModuleClient>& moduleClient) :
mHandle(handle), mState(STATE_IDLE), mCaptureSession(session),
- mCaptureIOHandle(ioHandle), mCaptureDevice(device), mType(type)
+ mCaptureIOHandle(ioHandle), mCaptureDevice(device), mType(type),
+ mModuleClient(moduleClient)
{
-
}
-status_t SoundTriggerHwService::Module::dump(int fd __unused,
- const Vector<String16>& args __unused) {
+#undef LOG_TAG
+#define LOG_TAG "SoundTriggerHwService::ModuleClient"
+
+SoundTriggerHwService::ModuleClient::ModuleClient(const sp<Module>& module,
+ const sp<ISoundTriggerClient>& client)
+ : mModule(module), mClient(client)
+{
+}
+
+void SoundTriggerHwService::ModuleClient::onFirstRef()
+{
+ sp<IBinder> binder = IInterface::asBinder(mClient);
+ if (binder != 0) {
+ binder->linkToDeath(this);
+ }
+}
+
+SoundTriggerHwService::ModuleClient::~ModuleClient()
+{
+}
+
+status_t SoundTriggerHwService::ModuleClient::dump(int fd __unused,
+ const Vector<String16>& args __unused) {
String8 result;
return NO_ERROR;
}
+void SoundTriggerHwService::ModuleClient::detach() {
+ ALOGV("detach()");
+ if (!captureHotwordAllowed()) {
+ return;
+ }
+
+ {
+ AutoMutex lock(mLock);
+ if (mClient != 0) {
+ IInterface::asBinder(mClient)->unlinkToDeath(this);
+ mClient.clear();
+ }
+ }
+
+ sp<Module> module = mModule.promote();
+ if (module == 0) {
+ return;
+ }
+ module->detach(this);
+}
+
+status_t SoundTriggerHwService::ModuleClient::loadSoundModel(const sp<IMemory>& modelMemory,
+ sound_model_handle_t *handle)
+{
+ ALOGV("loadSoundModel() handle");
+ if (!captureHotwordAllowed()) {
+ return PERMISSION_DENIED;
+ }
+
+ sp<Module> module = mModule.promote();
+ if (module == 0) {
+ return NO_INIT;
+ }
+ return module->loadSoundModel(modelMemory, this, handle);
+}
+
+status_t SoundTriggerHwService::ModuleClient::unloadSoundModel(sound_model_handle_t handle)
+{
+ ALOGV("unloadSoundModel() model handle %d", handle);
+ if (!captureHotwordAllowed()) {
+ return PERMISSION_DENIED;
+ }
+
+ sp<Module> module = mModule.promote();
+ if (module == 0) {
+ return NO_INIT;
+ }
+ return module->unloadSoundModel(handle);
+}
+
+status_t SoundTriggerHwService::ModuleClient::startRecognition(sound_model_handle_t handle,
+ const sp<IMemory>& dataMemory)
+{
+ ALOGV("startRecognition() model handle %d", handle);
+ if (!captureHotwordAllowed()) {
+ return PERMISSION_DENIED;
+ }
+
+ sp<Module> module = mModule.promote();
+ if (module == 0) {
+ return NO_INIT;
+ }
+ return module->startRecognition(handle, dataMemory);
+}
+
+status_t SoundTriggerHwService::ModuleClient::stopRecognition(sound_model_handle_t handle)
+{
+ ALOGV("stopRecognition() model handle %d", handle);
+ if (!captureHotwordAllowed()) {
+ return PERMISSION_DENIED;
+ }
+
+ sp<Module> module = mModule.promote();
+ if (module == 0) {
+ return NO_INIT;
+ }
+ return module->stopRecognition(handle);
+}
+
+void SoundTriggerHwService::ModuleClient::setCaptureState_l(bool active)
+{
+ ALOGV("ModuleClient::setCaptureState_l %d", active);
+ sp<SoundTriggerHwService> service;
+ sound_trigger_service_state_t state;
+
+ sp<Module> module = mModule.promote();
+ if (module == 0) {
+ return;
+ }
+ {
+ AutoMutex lock(mLock);
+ state = (active && !module->isConcurrentCaptureAllowed()) ?
+ SOUND_TRIGGER_STATE_DISABLED : SOUND_TRIGGER_STATE_ENABLED;
+
+ service = module->service().promote();
+ if (service == 0) {
+ return;
+ }
+ }
+ service->sendServiceStateEvent_l(state, this);
+}
+
+void SoundTriggerHwService::ModuleClient::onCallbackEvent(const sp<CallbackEvent>& event)
+{
+ ALOGV("ModuleClient onCallbackEvent type %d", event->mType);
+
+ sp<IMemory> eventMemory = event->mMemory;
+
+ if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ return;
+ }
+
+ switch (event->mType) {
+ case CallbackEvent::TYPE_SERVICE_STATE: {
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ client = mClient;
+ }
+ if (client !=0 ) {
+ client->onServiceStateChange(eventMemory);
+ }
+ } break;
+ default:
+ LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);
+ }
+}
+
+void SoundTriggerHwService::ModuleClient::binderDied(
+ const wp<IBinder> &who __unused) {
+ ALOGW("client binder died for client %p", this);
+ detach();
+}
+
}; // namespace android
diff --git a/services/soundtrigger/SoundTriggerHwService.h b/services/soundtrigger/SoundTriggerHwService.h
index 13a577a..60ebb35 100644
--- a/services/soundtrigger/SoundTriggerHwService.h
+++ b/services/soundtrigger/SoundTriggerHwService.h
@@ -26,7 +26,7 @@
#include <soundtrigger/ISoundTrigger.h>
#include <soundtrigger/ISoundTriggerClient.h>
#include <system/sound_trigger.h>
-#include <hardware/sound_trigger.h>
+#include "SoundTriggerHalInterface.h"
namespace android {
@@ -39,6 +39,7 @@
friend class BinderService<SoundTriggerHwService>;
public:
class Module;
+ class ModuleClient;
static char const* getServiceName() { return "media.sound_trigger_hw"; }
@@ -69,7 +70,8 @@
};
Model(sound_model_handle_t handle, audio_session_t session, audio_io_handle_t ioHandle,
- audio_devices_t device, sound_trigger_sound_model_type_t type);
+ audio_devices_t device, sound_trigger_sound_model_type_t type,
+ sp<ModuleClient>& moduleClient);
~Model() {}
sound_model_handle_t mHandle;
@@ -79,6 +81,7 @@
audio_devices_t mCaptureDevice;
sound_trigger_sound_model_type_t mType;
struct sound_trigger_recognition_config mConfig;
+ sp<ModuleClient> mModuleClient;
};
class CallbackEvent : public RefBase {
@@ -88,27 +91,76 @@
TYPE_SOUNDMODEL,
TYPE_SERVICE_STATE,
} event_type;
- CallbackEvent(event_type type, sp<IMemory> memory, wp<Module> module);
+ CallbackEvent(event_type type, sp<IMemory> memory);
virtual ~CallbackEvent();
+ void setModule(wp<Module> module) { mModule = module; }
+ void setModuleClient(wp<ModuleClient> moduleClient) { mModuleClient = moduleClient; }
+
event_type mType;
sp<IMemory> mMemory;
wp<Module> mModule;
+ wp<ModuleClient> mModuleClient;
};
- class Module : public virtual RefBase,
- public BnSoundTrigger,
- public IBinder::DeathRecipient {
+ class Module : public RefBase {
public:
Module(const sp<SoundTriggerHwService>& service,
- sound_trigger_hw_device* hwDevice,
- sound_trigger_module_descriptor descriptor,
- const sp<ISoundTriggerClient>& client);
+ const sp<SoundTriggerHalInterface>& halInterface,
+ sound_trigger_module_descriptor descriptor);
virtual ~Module();
+ virtual status_t loadSoundModel(const sp<IMemory>& modelMemory,
+ sp<ModuleClient> moduleClient,
+ sound_model_handle_t *handle);
+
+ virtual status_t unloadSoundModel(sound_model_handle_t handle);
+
+ virtual status_t startRecognition(sound_model_handle_t handle,
+ const sp<IMemory>& dataMemory);
+ virtual status_t stopRecognition(sound_model_handle_t handle);
+
+ sp<SoundTriggerHalInterface> halInterface() const { return mHalInterface; }
+ struct sound_trigger_module_descriptor descriptor() { return mDescriptor; }
+ wp<SoundTriggerHwService> service() const { return mService; }
+ bool isConcurrentCaptureAllowed() const { return mDescriptor.properties.concurrent_capture; }
+
+ sp<Model> getModel(sound_model_handle_t handle);
+
+ void setCaptureState_l(bool active);
+
+ sp<ModuleClient> addClient(const sp<ISoundTriggerClient>& client);
+
+ void detach(const sp<ModuleClient>& moduleClient);
+
+ void onCallbackEvent(const sp<CallbackEvent>& event);
+
+ private:
+
+ status_t unloadSoundModel_l(sound_model_handle_t handle);
+
+ Mutex mLock;
+ wp<SoundTriggerHwService> mService;
+ sp<SoundTriggerHalInterface> mHalInterface;
+ struct sound_trigger_module_descriptor mDescriptor;
+ Vector< sp<ModuleClient> > mModuleClients;
+ DefaultKeyedVector< sound_model_handle_t, sp<Model> > mModels;
+ sound_trigger_service_state_t mServiceState;
+ }; // class Module
+
+ class ModuleClient : public virtual RefBase,
+ public BnSoundTrigger,
+ public IBinder::DeathRecipient {
+ public:
+
+ ModuleClient(const sp<Module>& module,
+ const sp<ISoundTriggerClient>& client);
+
+ virtual ~ModuleClient();
+
virtual void detach();
virtual status_t loadSoundModel(const sp<IMemory>& modelMemory,
@@ -122,36 +174,23 @@
virtual status_t dump(int fd, const Vector<String16>& args);
-
- sound_trigger_hw_device *hwDevice() const { return mHwDevice; }
- struct sound_trigger_module_descriptor descriptor() { return mDescriptor; }
- void setClient(const sp<ISoundTriggerClient>& client) { mClient = client; }
- void clearClient() { mClient.clear(); }
- sp<ISoundTriggerClient> client() const { return mClient; }
- wp<SoundTriggerHwService> service() const { return mService; }
-
- void onCallbackEvent(const sp<CallbackEvent>& event);
-
- sp<Model> getModel(sound_model_handle_t handle);
-
- void setCaptureState_l(bool active);
+ virtual void onFirstRef();
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder> &who);
+ void onCallbackEvent(const sp<CallbackEvent>& event);
+
+ void setCaptureState_l(bool active);
+
+ sp<ISoundTriggerClient> client() const { return mClient; }
+
private:
- status_t unloadSoundModel_l(sound_model_handle_t handle);
-
-
- Mutex mLock;
- wp<SoundTriggerHwService> mService;
- struct sound_trigger_hw_device* mHwDevice;
- struct sound_trigger_module_descriptor mDescriptor;
- sp<ISoundTriggerClient> mClient;
- DefaultKeyedVector< sound_model_handle_t, sp<Model> > mModels;
- sound_trigger_service_state_t mServiceState;
- }; // class Module
+ mutable Mutex mLock;
+ wp<Module> mModule;
+ sp<ISoundTriggerClient> mClient;
+ }; // class ModuleClient
class CallbackThread : public Thread {
public:
@@ -176,8 +215,6 @@
Vector< sp<CallbackEvent> > mEventQueue;
};
- void detachModule(const sp<Module>& module);
-
static void recognitionCallback(struct sound_trigger_recognition_event *event, void *cookie);
sp<IMemory> prepareRecognitionEvent_l(struct sound_trigger_recognition_event *event);
void sendRecognitionEvent(struct sound_trigger_recognition_event *event, Module *module);
@@ -188,6 +225,8 @@
sp<IMemory> prepareServiceStateEvent_l(sound_trigger_service_state_t state);
void sendServiceStateEvent_l(sound_trigger_service_state_t state, Module *module);
+ void sendServiceStateEvent_l(sound_trigger_service_state_t state,
+ ModuleClient *moduleClient);
void sendCallbackEvent_l(const sp<CallbackEvent>& event);
void onCallbackEvent(const sp<CallbackEvent>& event);